CombinedText stringlengths 4 3.42M |
|---|
extern crate hkg;
extern crate termion;
extern crate rustc_serialize;
extern crate chrono;
extern crate kuchiki;
extern crate hyper;
extern crate cancellation;
extern crate time;
extern crate url;
use url::Url;
use kuchiki::traits::*;
use kuchiki::NodeRef;
use std::default::Default;
use termion::input::TermRead;
use termion::raw::IntoRawMode;
use termion::{color, style};
use termion::event::Key;
use termion::terminal_size;
use rustc_serialize::json;
use rustc_serialize::json::Json;
use chrono::*;
use hkg::utility::cache;
use hkg::model::IconItem;
use hkg::model::ListTopicItem;
use hkg::model::ListTopicTitleItem;
use hkg::model::ListTopicAuthorItem;
use hkg::model::ShowItem;
use hkg::model::ShowReplyItem;
use hkg::model::UrlQueryItem;
use hkg::utility::client::*;
use std::path::Path;
// use std::io::prelude::*;
use std::fs::File;
use std::fs;
use std::io::{Error, ErrorKind};
use std::io::Cursor;
use std::io::BufReader;
use std::io::{Read, Write, Stdout, Stdin};
use std::io::{stdout, stdin};
use std::collections::HashMap;
use hyper::Client;
use std::sync::{Arc, Mutex};
use std::thread;
use cancellation::{CancellationToken, CancellationTokenSource, OperationCanceled};
use std::sync::mpsc::sync_channel;
use std::sync::mpsc::channel;
use std::sync::mpsc::Sender;
#[derive(PartialEq, Eq, Copy, Clone)]
enum Status {
Startup,
List,
Show,
}
fn main() {
// Initialize 'em all.
let stdout = stdout();
let mut stdout = stdout.lock().into_raw_mode().unwrap();
// Clear the screen.
print!("{}", termion::clear::All); // stdout.clear().unwrap();
let title = String::from("高登");
let icon_manifest_string = cache::readfile(String::from("data/icon.manifest.json"));
let icon_collection: Vec<IconItem> = json::decode(&icon_manifest_string).unwrap();
// let s = cache::readfile(String::from("data/topics.json"));
// let collection: Vec<ListTopicItem> = json::decode(&s).unwrap();
let collection: Vec<ListTopicItem> = vec![];
// initialize show with empty page
let mut show_item = ShowItem {
url_query: UrlQueryItem { channel: "".to_string(), message: String::from("") },
replies: vec![],
page: 0,
max_page: 0,
reply_count: String::from(""),
title: String::from(""),
};
let mut status = String::from("> ");
let mut state = Status::Startup;
let mut prev_state = state;
let mut prev_width = terminal_size().unwrap().0; //rustbox.width();
let mut index = hkg::screen::index::Index::new();
let mut show_icon_collection = &[icon_collection];
let mut show = hkg::screen::show::Show::new(show_icon_collection);
let mut builder = hkg::builder::Builder::new();
// let url = String::from("http://www.alexa.com/");
// let url = String::from("http://localhost:3000");
// let url = String::from("https://www.yahoo.com.hk/");
let (tx_req, rx_req) = channel::<ChannelItem>();
let (tx_res, rx_res) = channel::<ChannelItem>();
let wclient = thread::spawn(move || {
let mut wr = WebResource::new();
let mut ct = CancellationTokenSource::new();
ct.cancel_after(std::time::Duration::new(10, 0));
loop {
match rx_req.recv() {
Ok(item) => {
let th = thread::current();
ct.run(|| {
th.unpark();
},
|| {
tx_res.send(page_request(&item, &mut wr, &ct)).unwrap();
});
if ct.is_canceled() {
thread::park_timeout(std::time::Duration::new(0, 250));
// Err(OperationCanceled)
} else {
// Ok(())
}
}
Err(e) => {}
}
}
});
let mut is_web_requesting = false;
// topics request
let w = terminal_size().unwrap().0;
let status_message = list_page(&mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&status_message);
loop {
// show UI
if prev_state != state {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox); // clear screen when switching state
prev_state = state;
}
match rx_res.try_recv() {
Ok(item) => {
match item.extra {
ChannelItemType::Show(extra) => {
let document = kuchiki::parse_html().from_utf8().one(item.result.as_bytes());
let posturl = get_posturl(&extra.postid, extra.page);
show_item = builder.show_item(&document, &posturl);
let w = terminal_size().unwrap().0 as usize; //rustbox.width();
status = format_status(status,
w,
&format!("[{}-{}:ROK][{}]",
show_item.url_query.message,
show_item.page,
is_web_requesting));
show.resetY(); // show.resetY();
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox);
state = Status::Show;
is_web_requesting = false;
},
ChannelItemType::Index(extra) => {
let document = kuchiki::parse_html().from_utf8().one(item.result.as_bytes());
let url = get_topic_bw_url();
let list_topic_items = builder.list_topic_items(&document);
let w = terminal_size().unwrap().0 as usize; //rustbox.width();
status = format_status(status,
w,
&format!("[TOPICS:ROK]"));
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox);
write!(stdout, "{}{}",
termion::cursor::Goto(1, 1),
color::Fg(color::White));
println!("{:?}", list_topic_items);
// write!(stdout, "{}{}{}",
// termion::cursor::Goto(1, (i + j + 1) as u16),
// color::Fg(color::White),
// &format!("[{}][{}] => {:?}", i, j, item.as_node())
// );
// state = Status::List;
is_web_requesting = false;
}
}
}
Err(e) => {}
}
match state {
Status::Startup => {
},
Status::List => {
// list.print(&title, &collection);
index.print(&mut stdout, &collection);
}
Status::Show => {
// show.print(&title, &show_item);
show.print(&mut stdout, &title, &show_item);
}
}
let w = terminal_size().unwrap().0;
let timeFormat = |t: time::Tm| {
match t.strftime("%Y%m%d%H%M") {
Ok(s) => s.to_string(),
Err(e) => panic!(e)
}
};
// let (time1, time2) = (timeFormat(time::now()), timeFormat(time::now()));
// status = format_status(status.clone(), w as usize, &format!("now: {:?} {:?}", time1, time2));
print_status(&mut stdout, &status); // print_status(&rustbox, &status);
stdout.flush().unwrap(); // rustbox.present();
if !is_web_requesting {
let stdin = stdin();
for c in stdin.keys() {
if prev_width != terminal_size().unwrap().0 {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); //hkg::screen::common::clear(&rustbox);
prev_width = terminal_size().unwrap().0;
}
let w = terminal_size().unwrap().0;
match c.unwrap() {
Key::Char('q') => {
print!("{}{}{}", termion::clear::All, style::Reset, termion::cursor::Show); // stdout.clear().unwrap();
return
},
Key::Char('\n') => {
// status = format_status(status, w as usize, &format!("ENTER"));
status = format_status(status, w as usize, "ENTER");
match state {
Status::Startup => {},
Status::List => {
let i = index.get_selected_topic();
if i > 0 {
let topic_item = &collection[i - 1];
let postid = &topic_item.title.url_query.message;
let page = 1;
let status_message = show_page(&postid, page, &mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&get_show_page_status_message(postid, page, &status_message));
}
}
Status::Show => {}
}
break
},
Key::Alt(c) => {
status = format_status(status, w as usize, &format!("^{}", c));
break
},
Key::Ctrl(c) => {
status = format_status(status, w as usize, &format!("*{}", c));
break
},
Key::Left => {
status = format_status(status, w as usize, &format!("←"));
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
if show_item.page > 1 {
let postid = &show_item.url_query.message;
let page = &show_item.page - 1;
let status_message = show_page(&postid, page, &mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&get_show_page_status_message(postid, page, &status_message));
}
}
}
break
}
Key::Right => {
status = format_status(status, w as usize, &format!("→"));
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
if show_item.max_page > show_item.page {
let postid = &show_item.url_query.message;
let page = &show_item.page + 1;
let status_message = show_page(&postid, page, &mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&get_show_page_status_message(postid, page, &status_message));
}
}
}
break
},
Key::PageUp => {
status = format_status(status, w as usize, "↑");
match state {
Status::Startup => {},
Status::List => {
let tmp = index.get_selected_topic();
status = format_status(status, w as usize, &format!("{}", tmp));
if tmp > 1 {
index.select_topic(tmp - 1);
}
}
Status::Show => {
let bh = show.body_height();
if show.scrollUp(bh) {
print!("{}", termion::clear::All); // hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::PageDown => {
status = format_status(status, w as usize, "↓");
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
let bh = show.body_height();
if show.scrollDown(bh) {
print!("{}", termion::clear::All); //hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::Up => {
status = format_status(status, w as usize, "↑");
match state {
Status::Startup => {},
Status::List => {
let tmp = index.get_selected_topic();
status = format_status(status, w as usize, &format!("{}", tmp));
if tmp > 1 {
index.select_topic(tmp - 1);
}
}
Status::Show => {
if show.scrollUp(2) {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::Down => {
status = format_status(status, w as usize, "↓");
match state {
Status::Startup => {},
Status::List => {
let tmp = index.get_selected_topic();
status = format_status(status, w as usize, &format!("{}", tmp));
if tmp < index.body_height() {
index.select_topic(tmp + 1);
}
}
Status::Show => {
if show.scrollDown(2) {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); //hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::Backspace => {
// status = format_status(status, w as usize, &format!("×"));
status = format_status(status, w as usize, "B");
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
state = Status::List;
print!("{}", termion::clear::All);
}
}
break
},
Key::Char(c) => { status = format_status(status, w as usize, &format!(" {}", c));break },
// Key::Invalid => {
// status = format_status(status, w as usize, &format!("???"));
// break
// },
_ => {},
}
}
}
}
}
fn read_cache<P: AsRef<Path>, S: AsRef<Path>>(cache_path: P,
file_name: S)
-> Result<String, String> {
let file_path = cache_path.as_ref().join(file_name);
let mut file = try!(File::open(file_path).map_err(|e| e.to_string()));
let mut contents = String::new();
try!(file.read_to_string(&mut contents).map_err(|e| e.to_string()));
Ok(contents)
}
fn write_cache<P: AsRef<Path>, S: AsRef<Path>>(cache_path: P,
file_name: S,
s: String)
-> Result<(), String> {
let file_path = cache_path.as_ref().join(file_name);
try!(fs::create_dir_all(&cache_path).map_err(|e| e.to_string()));
let mut file = try!(File::create(file_path).map_err(|e| e.to_string()));
try!(file.write_all(&s.into_bytes()).map_err(|e| e.to_string()));
Ok(())
}
fn get_posturl(postid: &String, page: usize) -> String {
let base_url = "http://forum1.hkgolden.com/view.aspx";
let posturl = format!("{base_url}?type=BW&message={postid}&page={page}",
base_url = base_url,
postid = postid,
page = page);
posturl
}
fn get_topic_bw_url() -> String {
let base_url = "http://forum1.hkgolden.com";
let url = format!("{base_url}/topics_bw.htm", base_url = base_url);
url
}
fn page_request(item: &ChannelItem,
wr: &mut WebResource,
ct: &CancellationTokenSource)
-> ChannelItem {
match item.extra.clone() {
ChannelItemType::Show(extra) => {
let html_path = format!("data/html/{postid}/", postid = extra.postid);
let show_file_name = format!("show_{page}.html", page = extra.page);
let postid = extra.postid.clone();
let (from_cache, result) = match read_cache(&html_path, &show_file_name) {
Ok(result) => (true, result),
Err(e) => {
let posturl = get_posturl(&extra.postid, extra.page);
let result = wr.get(&posturl);
(false, result)
}
};
if !from_cache {
let result2 = result.clone();
write_cache(&html_path, &show_file_name, result2);
}
let result_item = ChannelItem {
extra: ChannelItemType::Show(ChannelShowItem { postid: postid, page: extra.page }),
result: result,
};
result_item
},
ChannelItemType::Index(extra) => {
let timeFormat = |t: time::Tm| {
match t.strftime("%Y%m%d%H%M") {
Ok(s) => s.to_string(),
Err(e) => panic!(e)
}
};
let time = timeFormat(time::now());
let html_path = format!("data/html/topics/");
let file_name = format!("{time}.html", time = time);
let (from_cache, result) = match read_cache(&html_path, &file_name) {
Ok(result) => (true, result),
Err(e) => {
let url = get_topic_bw_url();
let result = wr.get(&url);
(false, result)
}
};
if !from_cache {
let result2 = result.clone();
write_cache(&html_path, &file_name, result2);
}
let result_item = ChannelItem {
extra: ChannelItemType::Index(ChannelIndexItem { }),
result: result,
};
result_item
}
}
}
fn list_page(is_web_requesting: &mut bool, tx_req: &Sender<ChannelItem>) -> String {
let ci = ChannelItem {
extra: ChannelItemType::Index(ChannelIndexItem { }),
result: String::from(""),
};
let status_message = match tx_req.send(ci) {
Ok(()) => {
*is_web_requesting = true;
"SOK".to_string()
}
Err(e) => format!("{}:{}", "SFAIL", e).to_string(),
};
status_message
}
fn show_page(postid: &String, page: usize, is_web_requesting: &mut bool, tx_req: &Sender<ChannelItem>) -> String {
let posturl = get_posturl(postid, page);
let ci = ChannelItem {
extra: ChannelItemType::Show(ChannelShowItem { postid: postid.clone(), page: page }),
result: String::from(""),
};
let status_message = match tx_req.send(ci) {
Ok(()) => {
*is_web_requesting = true;
"SOK".to_string()
}
Err(e) => format!("{}:{}", "SFAIL", e).to_string(),
};
status_message
}
fn get_show_page_status_message(postid: &String, page: usize, status_message: &String) -> String {
format!("[{}-{}:{}]", postid, page, status_message)
}
fn print_status(stdout: &mut termion::raw::RawTerminal<std::io::StdoutLock>, status: &str) {
// // for status bar only
let w = terminal_size().unwrap().0; // let w = rustbox.width();
let h = terminal_size().unwrap().1; // let h = rustbox.height();
write!(stdout, "{}{}{}{}{}{}",
termion::cursor::Goto(1, h),
color::Fg(color::White),
style::Bold,
format!("{status}", status = status),
style::Reset,
termion::cursor::Hide);
}
fn format_status(status: String, w: usize, s: &str) -> String {
if status.len() >= w {
String::from(format!("{}{}", &"> ", s))
} else {
String::from(format!("{}{}", &status, s))
}
}
// fn show_item_build_example(rustbox: &rustbox::RustBox, collection: &Vec<ListTopicItem>) {
//
// rustbox.print(1,
// 1,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("before parse => {}", Local::now()));
//
// let mut builder = hkg::builder::Builder::new();
//
// let url = &collection[1].title.url;
// rustbox.print(1, 2, rustbox::RB_NORMAL, Color::White, Color::Black, url);
//
// let uqi = builder.url_query_item(&url);
// let postid = "6360604"; //uqi.message;
// let page = 1;
// let path = format!("data/html/{postid}/show_{page}.html",
// postid = postid,
// page = page);
//
// rustbox.print(1,
// 3,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("path: {}", path));
//
// let show_item = match kuchiki::parse_html().from_utf8().from_file(&path) {
// Ok(document) => Some(builder.show_item(&document, &url)),
// Err(e) => None,
// };
//
// match show_item {
// Some(si) => {
//
// rustbox.print(1,
// 5,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("url_query->message: {} title:{} reploy count: {} page: {} \
// max_page: {}",
// si.url_query.message,
// si.title,
// si.reply_count,
// si.page,
// si.max_page));
//
// for (index, item) in si.replies.iter().enumerate() {
// rustbox.print(1,
// index + 7,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("{:<2}={:?}", index, item));
// }
// }
// _ => {}
// }
// }
main - apply builder list_topic_items
extern crate hkg;
extern crate termion;
extern crate rustc_serialize;
extern crate chrono;
extern crate kuchiki;
extern crate hyper;
extern crate cancellation;
extern crate time;
extern crate url;
use url::Url;
use kuchiki::traits::*;
use kuchiki::NodeRef;
use std::default::Default;
use termion::input::TermRead;
use termion::raw::IntoRawMode;
use termion::{color, style};
use termion::event::Key;
use termion::terminal_size;
use rustc_serialize::json;
use rustc_serialize::json::Json;
use chrono::*;
use hkg::utility::cache;
use hkg::model::IconItem;
use hkg::model::ListTopicItem;
use hkg::model::ListTopicTitleItem;
use hkg::model::ListTopicAuthorItem;
use hkg::model::ShowItem;
use hkg::model::ShowReplyItem;
use hkg::model::UrlQueryItem;
use hkg::utility::client::*;
use std::path::Path;
// use std::io::prelude::*;
use std::fs::File;
use std::fs;
use std::io::{Error, ErrorKind};
use std::io::Cursor;
use std::io::BufReader;
use std::io::{Read, Write, Stdout, Stdin};
use std::io::{stdout, stdin};
use std::collections::HashMap;
use hyper::Client;
use std::sync::{Arc, Mutex};
use std::thread;
use cancellation::{CancellationToken, CancellationTokenSource, OperationCanceled};
use std::sync::mpsc::sync_channel;
use std::sync::mpsc::channel;
use std::sync::mpsc::Sender;
#[derive(PartialEq, Eq, Copy, Clone)]
enum Status {
Startup,
List,
Show,
}
fn main() {
// Initialize 'em all.
let stdout = stdout();
let mut stdout = stdout.lock().into_raw_mode().unwrap();
// Clear the screen.
print!("{}", termion::clear::All); // stdout.clear().unwrap();
let title = String::from("高登");
let icon_manifest_string = cache::readfile(String::from("data/icon.manifest.json"));
let icon_collection: Vec<IconItem> = json::decode(&icon_manifest_string).unwrap();
let mut list_topic_items: Vec<ListTopicItem> = vec![];
// initialize show with empty page
let mut show_item = ShowItem {
url_query: UrlQueryItem { channel: "".to_string(), message: String::from("") },
replies: vec![],
page: 0,
max_page: 0,
reply_count: String::from(""),
title: String::from(""),
};
let mut status = String::from("> ");
let mut state = Status::Startup;
let mut prev_state = state;
let mut prev_width = terminal_size().unwrap().0; //rustbox.width();
let mut index = hkg::screen::index::Index::new();
let mut show_icon_collection = &[icon_collection];
let mut show = hkg::screen::show::Show::new(show_icon_collection);
let mut builder = hkg::builder::Builder::new();
// let url = String::from("http://www.alexa.com/");
// let url = String::from("http://localhost:3000");
// let url = String::from("https://www.yahoo.com.hk/");
let (tx_req, rx_req) = channel::<ChannelItem>();
let (tx_res, rx_res) = channel::<ChannelItem>();
let wclient = thread::spawn(move || {
let mut wr = WebResource::new();
let mut ct = CancellationTokenSource::new();
ct.cancel_after(std::time::Duration::new(10, 0));
loop {
match rx_req.recv() {
Ok(item) => {
let th = thread::current();
ct.run(|| {
th.unpark();
},
|| {
tx_res.send(page_request(&item, &mut wr, &ct)).unwrap();
});
if ct.is_canceled() {
thread::park_timeout(std::time::Duration::new(0, 250));
// Err(OperationCanceled)
} else {
// Ok(())
}
}
Err(e) => {}
}
}
});
let mut is_web_requesting = false;
// topics request
let w = terminal_size().unwrap().0;
let status_message = list_page(&mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&status_message);
loop {
// show UI
if prev_state != state {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox); // clear screen when switching state
prev_state = state;
}
match rx_res.try_recv() {
Ok(item) => {
match item.extra {
ChannelItemType::Show(extra) => {
let document = kuchiki::parse_html().from_utf8().one(item.result.as_bytes());
let posturl = get_posturl(&extra.postid, extra.page);
show_item = builder.show_item(&document, &posturl);
let w = terminal_size().unwrap().0 as usize; //rustbox.width();
status = format_status(status,
w,
&format!("[{}-{}:ROK][{}]",
show_item.url_query.message,
show_item.page,
is_web_requesting));
show.resetY(); // show.resetY();
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox);
state = Status::Show;
is_web_requesting = false;
},
ChannelItemType::Index(extra) => {
let document = kuchiki::parse_html().from_utf8().one(item.result.as_bytes());
let url = get_topic_bw_url();
list_topic_items = builder.list_topic_items(&document);
let w = terminal_size().unwrap().0 as usize; //rustbox.width();
status = format_status(status,
w,
&format!("[TOPICS:ROK]"));
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox);
state = Status::List;
is_web_requesting = false;
}
}
}
Err(e) => {}
}
match state {
Status::Startup => {
},
Status::List => {
// list.print(&title, &collection);
index.print(&mut stdout, &list_topic_items);
}
Status::Show => {
// show.print(&title, &show_item);
show.print(&mut stdout, &title, &show_item);
}
}
let w = terminal_size().unwrap().0;
let timeFormat = |t: time::Tm| {
match t.strftime("%Y%m%d%H%M") {
Ok(s) => s.to_string(),
Err(e) => panic!(e)
}
};
// let (time1, time2) = (timeFormat(time::now()), timeFormat(time::now()));
// status = format_status(status.clone(), w as usize, &format!("now: {:?} {:?}", time1, time2));
print_status(&mut stdout, &status); // print_status(&rustbox, &status);
stdout.flush().unwrap(); // rustbox.present();
if !is_web_requesting {
let stdin = stdin();
for c in stdin.keys() {
if prev_width != terminal_size().unwrap().0 {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); //hkg::screen::common::clear(&rustbox);
prev_width = terminal_size().unwrap().0;
}
let w = terminal_size().unwrap().0;
match c.unwrap() {
Key::Char('q') => {
print!("{}{}{}", termion::clear::All, style::Reset, termion::cursor::Show); // stdout.clear().unwrap();
return
},
Key::Char('\n') => {
// status = format_status(status, w as usize, &format!("ENTER"));
status = format_status(status, w as usize, "ENTER");
match state {
Status::Startup => {},
Status::List => {
let i = index.get_selected_topic();
if i > 0 {
let topic_item = &list_topic_items[i - 1];
let postid = &topic_item.title.url_query.message;
let page = 1;
let status_message = show_page(&postid, page, &mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&get_show_page_status_message(postid, page, &status_message));
}
}
Status::Show => {}
}
break
},
Key::Alt(c) => {
status = format_status(status, w as usize, &format!("^{}", c));
break
},
Key::Ctrl(c) => {
status = format_status(status, w as usize, &format!("*{}", c));
break
},
Key::Left => {
status = format_status(status, w as usize, &format!("←"));
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
if show_item.page > 1 {
let postid = &show_item.url_query.message;
let page = &show_item.page - 1;
let status_message = show_page(&postid, page, &mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&get_show_page_status_message(postid, page, &status_message));
}
}
}
break
}
Key::Right => {
status = format_status(status, w as usize, &format!("→"));
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
if show_item.max_page > show_item.page {
let postid = &show_item.url_query.message;
let page = &show_item.page + 1;
let status_message = show_page(&postid, page, &mut is_web_requesting, &tx_req);
status = format_status(status.clone(),
w as usize,
&get_show_page_status_message(postid, page, &status_message));
}
}
}
break
},
Key::PageUp => {
status = format_status(status, w as usize, "↑");
match state {
Status::Startup => {},
Status::List => {
let tmp = index.get_selected_topic();
status = format_status(status, w as usize, &format!("{}", tmp));
if tmp > 1 {
index.select_topic(tmp - 1);
}
}
Status::Show => {
let bh = show.body_height();
if show.scrollUp(bh) {
print!("{}", termion::clear::All); // hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::PageDown => {
status = format_status(status, w as usize, "↓");
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
let bh = show.body_height();
if show.scrollDown(bh) {
print!("{}", termion::clear::All); //hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::Up => {
status = format_status(status, w as usize, "↑");
match state {
Status::Startup => {},
Status::List => {
let tmp = index.get_selected_topic();
status = format_status(status, w as usize, &format!("{}", tmp));
if tmp > 1 {
index.select_topic(tmp - 1);
}
}
Status::Show => {
if show.scrollUp(2) {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); // hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::Down => {
status = format_status(status, w as usize, "↓");
match state {
Status::Startup => {},
Status::List => {
let tmp = index.get_selected_topic();
status = format_status(status, w as usize, &format!("{}", tmp));
if tmp < index.body_height() {
index.select_topic(tmp + 1);
}
}
Status::Show => {
if show.scrollDown(2) {
print!("{}", termion::clear::All); // stdout.clear().unwrap(); //hkg::screen::common::clear(&rustbox);
}
}
}
break
},
Key::Backspace => {
// status = format_status(status, w as usize, &format!("×"));
status = format_status(status, w as usize, "B");
match state {
Status::Startup => {},
Status::List => {}
Status::Show => {
state = Status::List;
print!("{}", termion::clear::All);
}
}
break
},
Key::Char(c) => { status = format_status(status, w as usize, &format!(" {}", c));break },
// Key::Invalid => {
// status = format_status(status, w as usize, &format!("???"));
// break
// },
_ => {},
}
}
}
}
}
fn read_cache<P: AsRef<Path>, S: AsRef<Path>>(cache_path: P,
file_name: S)
-> Result<String, String> {
let file_path = cache_path.as_ref().join(file_name);
let mut file = try!(File::open(file_path).map_err(|e| e.to_string()));
let mut contents = String::new();
try!(file.read_to_string(&mut contents).map_err(|e| e.to_string()));
Ok(contents)
}
fn write_cache<P: AsRef<Path>, S: AsRef<Path>>(cache_path: P,
file_name: S,
s: String)
-> Result<(), String> {
let file_path = cache_path.as_ref().join(file_name);
try!(fs::create_dir_all(&cache_path).map_err(|e| e.to_string()));
let mut file = try!(File::create(file_path).map_err(|e| e.to_string()));
try!(file.write_all(&s.into_bytes()).map_err(|e| e.to_string()));
Ok(())
}
fn get_posturl(postid: &String, page: usize) -> String {
let base_url = "http://forum1.hkgolden.com/view.aspx";
let posturl = format!("{base_url}?type=BW&message={postid}&page={page}",
base_url = base_url,
postid = postid,
page = page);
posturl
}
fn get_topic_bw_url() -> String {
let base_url = "http://forum1.hkgolden.com";
let url = format!("{base_url}/topics_bw.htm", base_url = base_url);
url
}
fn page_request(item: &ChannelItem,
wr: &mut WebResource,
ct: &CancellationTokenSource)
-> ChannelItem {
match item.extra.clone() {
ChannelItemType::Show(extra) => {
let html_path = format!("data/html/{postid}/", postid = extra.postid);
let show_file_name = format!("show_{page}.html", page = extra.page);
let postid = extra.postid.clone();
let (from_cache, result) = match read_cache(&html_path, &show_file_name) {
Ok(result) => (true, result),
Err(e) => {
let posturl = get_posturl(&extra.postid, extra.page);
let result = wr.get(&posturl);
(false, result)
}
};
if !from_cache {
let result2 = result.clone();
write_cache(&html_path, &show_file_name, result2);
}
let result_item = ChannelItem {
extra: ChannelItemType::Show(ChannelShowItem { postid: postid, page: extra.page }),
result: result,
};
result_item
},
ChannelItemType::Index(extra) => {
let timeFormat = |t: time::Tm| {
match t.strftime("%Y%m%d%H%M") {
Ok(s) => s.to_string(),
Err(e) => panic!(e)
}
};
let time = timeFormat(time::now());
let html_path = format!("data/html/topics/");
let file_name = format!("{time}.html", time = time);
let (from_cache, result) = match read_cache(&html_path, &file_name) {
Ok(result) => (true, result),
Err(e) => {
let url = get_topic_bw_url();
let result = wr.get(&url);
(false, result)
}
};
if !from_cache {
let result2 = result.clone();
write_cache(&html_path, &file_name, result2);
}
let result_item = ChannelItem {
extra: ChannelItemType::Index(ChannelIndexItem { }),
result: result,
};
result_item
}
}
}
fn list_page(is_web_requesting: &mut bool, tx_req: &Sender<ChannelItem>) -> String {
let ci = ChannelItem {
extra: ChannelItemType::Index(ChannelIndexItem { }),
result: String::from(""),
};
let status_message = match tx_req.send(ci) {
Ok(()) => {
*is_web_requesting = true;
"SOK".to_string()
}
Err(e) => format!("{}:{}", "SFAIL", e).to_string(),
};
status_message
}
fn show_page(postid: &String, page: usize, is_web_requesting: &mut bool, tx_req: &Sender<ChannelItem>) -> String {
let posturl = get_posturl(postid, page);
let ci = ChannelItem {
extra: ChannelItemType::Show(ChannelShowItem { postid: postid.clone(), page: page }),
result: String::from(""),
};
let status_message = match tx_req.send(ci) {
Ok(()) => {
*is_web_requesting = true;
"SOK".to_string()
}
Err(e) => format!("{}:{}", "SFAIL", e).to_string(),
};
status_message
}
fn get_show_page_status_message(postid: &String, page: usize, status_message: &String) -> String {
format!("[{}-{}:{}]", postid, page, status_message)
}
fn print_status(stdout: &mut termion::raw::RawTerminal<std::io::StdoutLock>, status: &str) {
// // for status bar only
let w = terminal_size().unwrap().0; // let w = rustbox.width();
let h = terminal_size().unwrap().1; // let h = rustbox.height();
write!(stdout, "{}{}{}{}{}{}",
termion::cursor::Goto(1, h),
color::Fg(color::White),
style::Bold,
format!("{status}", status = status),
style::Reset,
termion::cursor::Hide);
}
fn format_status(status: String, w: usize, s: &str) -> String {
if status.len() >= w {
String::from(format!("{}{}", &"> ", s))
} else {
String::from(format!("{}{}", &status, s))
}
}
// fn show_item_build_example(rustbox: &rustbox::RustBox, collection: &Vec<ListTopicItem>) {
//
// rustbox.print(1,
// 1,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("before parse => {}", Local::now()));
//
// let mut builder = hkg::builder::Builder::new();
//
// let url = &collection[1].title.url;
// rustbox.print(1, 2, rustbox::RB_NORMAL, Color::White, Color::Black, url);
//
// let uqi = builder.url_query_item(&url);
// let postid = "6360604"; //uqi.message;
// let page = 1;
// let path = format!("data/html/{postid}/show_{page}.html",
// postid = postid,
// page = page);
//
// rustbox.print(1,
// 3,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("path: {}", path));
//
// let show_item = match kuchiki::parse_html().from_utf8().from_file(&path) {
// Ok(document) => Some(builder.show_item(&document, &url)),
// Err(e) => None,
// };
//
// match show_item {
// Some(si) => {
//
// rustbox.print(1,
// 5,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("url_query->message: {} title:{} reploy count: {} page: {} \
// max_page: {}",
// si.url_query.message,
// si.title,
// si.reply_count,
// si.page,
// si.max_page));
//
// for (index, item) in si.replies.iter().enumerate() {
// rustbox.print(1,
// index + 7,
// rustbox::RB_NORMAL,
// Color::White,
// Color::Black,
// &format!("{:<2}={:?}", index, item));
// }
// }
// _ => {}
// }
// }
|
#![allow(dead_code)]
extern crate time;
use std::os::{num_cpus};
use std::num::{Float};
use std::sync::{Arc};
use std::thread::{Thread};
use color::{ColorRGB};
use geometry::{Point3D, Vector3D, Direction3D, Ray3D};
use image::{PPMImage};
use material::{MaterialBuilder};
use scene::{Scene, SceneLight, Sphere, Plane, Camera};
use table::{Table, AsTable};
mod color;
mod geometry;
mod image;
mod material;
mod scene;
mod table;
static EXAMPLE_TO_RUN: u32 = 3;
fn main() {
let start = time::precise_time_ns();
let camera;
let mut scene;
let scene_setup_start = time::precise_time_ns();
if EXAMPLE_TO_RUN == 1 {
//----------------------------------------------------------------------
// Scratchapixel Tutorial
//----------------------------------------------------------------------
let background_color = ColorRGB::from_rgb(2.0, 2.0, 2.0);
scene = Scene::new(&background_color, 1.0, 5);
let ground_sphere = Box::new(Sphere::new(&Point3D::from_xyz(0.0, -10004.0, 20.0), 10000.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.20, 0.20, 0.20))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(ground_sphere);
let sphere1 = Box::new(Sphere::new(&Point3D::from_xyz(0.0, 0.0, 20.0), 4.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(1.00, 0.32, 0.36))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.5)
.refractive_index(1.1)
.to_material()
));
scene.add_object(sphere1);
let sphere2 = Box::new(Sphere::new(&Point3D::from_xyz(5.0, -1.0, 15.0), 2.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.90, 0.76, 0.46))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(sphere2);
let sphere3 = Box::new(Sphere::new(&Point3D::from_xyz(5.0, 0.0, 25.0), 3.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.65, 0.77, 0.97))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(sphere3);
let sphere4 = Box::new(Sphere::new(&Point3D::from_xyz(-5.5, 0.0, 15.0), 3.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.90, 0.90, 0.90))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(sphere4);
let light_source = Box::new(SceneLight::new(&Point3D::from_xyz(0.0, 20.0, 30.0), 3.0, &ColorRGB::from_rgb(3.0, 3.0, 3.0)));
scene.add_light_source(light_source);
let image_dimensions = (640, 480);
let field_of_view: f32 = 30.0;
camera = Camera::from_fov(image_dimensions, field_of_view, 1.0, Point3D::origin(), &Point3D::from_xyz(0.0, 0.0, 1.0));
} else if EXAMPLE_TO_RUN == 2 {
//----------------------------------------------------------------------
// flipcode Tutorial, version 1 & version 2
//----------------------------------------------------------------------
scene = Scene::new(ColorRGB::black(), 1.0, 5);
let ground_plane = Box::new(Plane::from_d_vector(4.4, &Vector3D::from_xyz(0.0, 1.0, 0.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.4, 0.3, 0.3))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.to_material()
));
scene.add_object(ground_plane);
let big_sphere = Box::new(Sphere::new(&Point3D::from_xyz(1.0, -0.8, 3.0), 2.5, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 0.7))
.diffuse(0.2)
.specular(0.8)
.shininess(20)
.reflection(0.6)
.to_material()
));
scene.add_object(big_sphere);
let small_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-5.5, -0.5, 7.0), 2.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 1.0))
.diffuse(0.1)
.specular(0.9)
.shininess(20)
.reflection(1.0)
.to_material()
));
scene.add_object(small_sphere);
let light_source1 = Box::new(SceneLight::new(&Point3D::from_xyz(0.0, 5.0, 5.0), 0.1, &ColorRGB::from_rgb(0.6, 0.6, 0.6)));
scene.add_light_source(light_source1);
let light_source2 = Box::new(SceneLight::new(&Point3D::from_xyz(2.0, 5.0, 1.0), 0.1, &ColorRGB::from_rgb(0.7, 0.7, 0.9)));
scene.add_light_source(light_source2);
let image_dimensions = (800, 600);
camera = Camera::from_dimensions(image_dimensions, (8.0, 6.0), 5.0, &Point3D::from_xyz(0.0, 0.0, -5.0), &Point3D::from_xyz(0.0, 0.0, 1.0));
} else {
//----------------------------------------------------------------------
// flipcode Tutorial, version 3
//----------------------------------------------------------------------
scene = Scene::new(ColorRGB::black(), 1.0, 5);
let ground_plane = Box::new(Plane::from_d_vector(4.4, &Vector3D::from_xyz(0.0, 1.0, 0.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.4, 0.3, 0.3))
.diffuse(1.0)
.specular(0.8)
.shininess(20)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(ground_plane);
let big_sphere = Box::new(Sphere::new(&Point3D::from_xyz(2.0, 0.8, 3.0), 2.5, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 1.0))
.diffuse(0.2)
.specular(0.8)
.shininess(20)
.reflection(0.2)
.refraction(0.8)
.refractive_index(1.3)
.to_material()
));
scene.add_object(big_sphere);
let small_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-5.5, -0.5, 7.0), 2.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 1.0))
.diffuse(0.1)
.specular(0.8)
.shininess(20)
.reflection(0.5)
.refraction(0.0)
.refractive_index(1.3)
.to_material()
));
scene.add_object(small_sphere);
let light_source1 = Box::new(SceneLight::new(&Point3D::from_xyz(0.0, 5.0, 5.0), 0.1, &ColorRGB::from_rgb(0.4, 0.4, 0.4)));
scene.add_light_source(light_source1);
let light_source2 = Box::new(SceneLight::new(&Point3D::from_xyz(-3.0, 5.0, 1.0), 0.1, &ColorRGB::from_rgb(0.6, 0.6, 0.8)));
scene.add_light_source(light_source2);
let extra_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-1.5, -3.8, 1.0), 1.5, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(1.0, 0.4, 0.4))
.diffuse(0.2)
.specular(0.8)
.shininess(20)
.reflection(0.0)
.refraction(0.8)
.refractive_index(1.5)
.to_material()
));
scene.add_object(extra_sphere);
let back_plane = Box::new(Plane::from_d_vector(12.0, &Vector3D::from_xyz(0.4, 0.0, -1.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.5, 0.3, 0.5))
.diffuse(0.6)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(back_plane);
let ceiling_plane = Box::new(Plane::from_d_vector(7.4, &Vector3D::from_xyz(0.0, -1.0, 0.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.4, 0.7, 0.7))
.diffuse(0.5)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(ceiling_plane);
for x in 0..8 {
for y in 0..7 {
let grid_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-4.5 + (x as f32) * 1.5, -4.3 + (y as f32) * 1.5, 10.0), 0.3, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.3, 1.0, 0.4))
.diffuse(0.6)
.specular(0.6)
.shininess(20)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(grid_sphere);
}
}
let image_dimensions = (800, 600);
camera = Camera::from_dimensions(image_dimensions, (8.0, 6.0), 5.0, &Point3D::from_xyz(0.0, 0.0, -5.0), &Point3D::from_xyz(0.0, 0.0, 1.0));
}
let scene_setup_end = time::precise_time_ns();
let elapsed = (scene_setup_end - scene_setup_start) / 1000000;
println!("Scene Setup : {}", elapsed);
let pixel_table = render(Arc::new(scene), Arc::new(camera));
let image_saving_start = time::precise_time_ns();
let image = PPMImage::new(format!("example{}.ppm", EXAMPLE_TO_RUN).as_slice());
let result = image.save(&pixel_table);
let image_saving_end = time::precise_time_ns();
let elapsed = (image_saving_end - image_saving_start) / 1000000;
println!("Image Saving : {}", elapsed);
match result {
Ok(_) => println!("Image rendered successfully"),
Err(e) => println!("Image rendering failed:\n{}", e)
}
//--------------------------------------------------------------------------
let end = time::precise_time_ns();
let elapsed = (end - start) / 1000000;
println!("Elapsed time: {}", elapsed);
}
fn render(scene: Arc<Scene>, camera: Arc<Camera>) -> Table<ColorRGB> {
let dimensions = camera.get_image_dimensions();
let (width, height) = dimensions;
let mut pixel_table = Table::from_elem(dimensions, *ColorRGB::black());
// Initial Pixel Coloring
// let intital_coloring_start = time::precise_time_ns();
// for (index, value) in pixel_table.iter_mut().enumerate_2d() {
// let ray = camera.get_primary_ray(index);
// let result = scene.trace(&ray, 0);
// let result_color = ColorRGB::from_rgb(
// result.color.red.min(1.0),
// result.color.green.min(1.0),
// result.color.blue.min(1.0)
// );
// *value = result_color;
// }
// let initial_coloring_end = time::precise_time_ns();
let thread_setup_start = time::precise_time_ns();
let num_threads = num_cpus();
let total_pixels = width * height;
let pixels_per_thread = if total_pixels % num_threads > 0 {
total_pixels / num_threads + 1
} else {
total_pixels / num_threads
};
// Initial Pixel Coloring
let initial_coloring_threads = (0..num_threads).map(|thread_index| {
let local_camera = camera.clone();
let local_scene = scene.clone();
Thread::scoped(move|| {
let start_index = pixels_per_thread * thread_index;
let num_pixels = if thread_index != num_threads-1 {
pixels_per_thread
} else {
total_pixels - start_index
};
let mut local_table = Vec::with_capacity(num_pixels);
for (index, _) in (0..num_pixels).as_table(dimensions).enumerate_2d_from_index(start_index) {
let ray = local_camera.get_primary_ray(index);
let result = local_scene.trace(&ray, 0);
let result_color = ColorRGB::from_rgb(
result.color.red.min(1.0),
result.color.green.min(1.0),
result.color.blue.min(1.0)
);
local_table.push(result_color);
}
local_table
})
}).collect::<Vec<_>>();
let thread_setup_end = time::precise_time_ns();
// Collect the colored pixels back into the original table.
let thread_waiting_start = time::precise_time_ns();
let initial_coloring = initial_coloring_threads.into_iter().flat_map(|f| {
match f.join() {
Ok(local_table) => local_table.into_iter(),
Err(e) => panic!(e)
}
}).collect::<Vec<_>>();
let thread_waiting_end = time::precise_time_ns();
let pixel_combining_start = time::precise_time_ns();
for (pixel, color) in pixel_table.iter_mut().zip(initial_coloring.iter()) {
*pixel = *color;
}
let pixel_combining_end = time::precise_time_ns();
// Edge Detection
let edge_detection_start = time::precise_time_ns();
let mut is_edge = Table::from_elem(dimensions, false);
for (index, value) in is_edge.iter_mut().enumerate_2d() {
let (row, column) = index;
if row != 0 && column != 0 && row != height-1 && column != width-1 {
let p1 = pixel_table[(row - 1, column - 1)];
let p2 = pixel_table[(row - 1, column)];
let p3 = pixel_table[(row - 1, column + 1)];
let p4 = pixel_table[(row, column - 1)];
let p6 = pixel_table[(row, column + 1)];
let p7 = pixel_table[(row + 1, column - 1)];
let p8 = pixel_table[(row + 1, column)];
let p9 = pixel_table[(row + 1, column + 1)];
let r = calculate_gradient(p1.red, p2.red, p3.red, p4.red, p6.red, p7.red, p8.red, p9.red);
let g = calculate_gradient(p1.green, p2.green, p3.green, p4.green, p6.green, p7.green, p8.green, p9.green);
let b = calculate_gradient(p1.blue, p2.blue, p3.blue, p4.blue, p6.blue, p7.blue, p8.blue, p9.blue);
if (r + b + g) > 0.5 {
*value = true;
} else {
*value = false;
}
}
}
let edge_detection_end = time::precise_time_ns();
// Anti-aliasing
let anti_aliasing_start = time::precise_time_ns();
let sub_width = 3;
let sub_height = 3;
let sub_size = (sub_width * sub_height) as f32;
let mut sub_rays = Table::from_elem((sub_width, sub_height), Ray3D::new(Point3D::origin(), Direction3D::unit_x()));
for (index, value) in pixel_table.iter_mut().enumerate_2d() {
if is_edge[index] {
let mut pixel_color = *ColorRGB::black();
camera.get_sub_rays(index, &mut sub_rays);
for sub_ray in sub_rays.iter() {
let result = scene.trace(sub_ray, 0);
pixel_color = ColorRGB::from_rgb(
pixel_color.red + result.color.red / sub_size,
pixel_color.green + result.color.green / sub_size,
pixel_color.blue + result.color.blue / sub_size
);
}
*value = pixel_color;
}
}
let anti_aliasing_end = time::precise_time_ns();
// let elapsed = (initial_coloring_end - intital_coloring_start) / 1000000;
// println!("Initial Coloring: {}", elapsed);
let elapsed = (thread_setup_end - thread_setup_start) / 1000000;
println!("Thread Setup : {}", elapsed);
let elapsed = (thread_waiting_end - thread_waiting_start) / 1000000;
println!("Thread Waiting : {}", elapsed);
let elapsed = (pixel_combining_end - pixel_combining_start) / 1000000;
println!("Pixel Combining : {}", elapsed);
let elapsed = (edge_detection_end - edge_detection_start) / 1000000;
println!("Edge Detection : {}", elapsed);
let elapsed = (anti_aliasing_end - anti_aliasing_start) / 1000000;
println!("Anti-aliasing : {}", elapsed);
pixel_table
}
fn calculate_gradient(p1: f32, p2: f32, p3: f32, p4: f32, p6: f32, p7: f32, p8: f32, p9: f32) -> f32
{
let gx = (p3 + 2.0 * p6 + p9) - (p1 + 2.0 * p4 + p7);
let gy = (p1 + 2.0 * p2 + p3) - (p7 + 2.0 * p8 + p9);
(gx*gx + gy*gy).sqrt()
}
Replaces a `mut Vec` with a call to `collect()`.
#![allow(dead_code)]
extern crate time;
use std::os::{num_cpus};
use std::num::{Float};
use std::sync::{Arc};
use std::thread::{Thread};
use color::{ColorRGB};
use geometry::{Point3D, Vector3D, Direction3D, Ray3D};
use image::{PPMImage};
use material::{MaterialBuilder};
use scene::{Scene, SceneLight, Sphere, Plane, Camera};
use table::{Table, AsTable};
mod color;
mod geometry;
mod image;
mod material;
mod scene;
mod table;
static EXAMPLE_TO_RUN: u32 = 3;
fn main() {
let start = time::precise_time_ns();
let camera;
let mut scene;
let scene_setup_start = time::precise_time_ns();
if EXAMPLE_TO_RUN == 1 {
//----------------------------------------------------------------------
// Scratchapixel Tutorial
//----------------------------------------------------------------------
let background_color = ColorRGB::from_rgb(2.0, 2.0, 2.0);
scene = Scene::new(&background_color, 1.0, 5);
let ground_sphere = Box::new(Sphere::new(&Point3D::from_xyz(0.0, -10004.0, 20.0), 10000.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.20, 0.20, 0.20))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(ground_sphere);
let sphere1 = Box::new(Sphere::new(&Point3D::from_xyz(0.0, 0.0, 20.0), 4.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(1.00, 0.32, 0.36))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.5)
.refractive_index(1.1)
.to_material()
));
scene.add_object(sphere1);
let sphere2 = Box::new(Sphere::new(&Point3D::from_xyz(5.0, -1.0, 15.0), 2.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.90, 0.76, 0.46))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(sphere2);
let sphere3 = Box::new(Sphere::new(&Point3D::from_xyz(5.0, 0.0, 25.0), 3.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.65, 0.77, 0.97))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(sphere3);
let sphere4 = Box::new(Sphere::new(&Point3D::from_xyz(-5.5, 0.0, 15.0), 3.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.90, 0.90, 0.90))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(1.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(sphere4);
let light_source = Box::new(SceneLight::new(&Point3D::from_xyz(0.0, 20.0, 30.0), 3.0, &ColorRGB::from_rgb(3.0, 3.0, 3.0)));
scene.add_light_source(light_source);
let image_dimensions = (640, 480);
let field_of_view: f32 = 30.0;
camera = Camera::from_fov(image_dimensions, field_of_view, 1.0, Point3D::origin(), &Point3D::from_xyz(0.0, 0.0, 1.0));
} else if EXAMPLE_TO_RUN == 2 {
//----------------------------------------------------------------------
// flipcode Tutorial, version 1 & version 2
//----------------------------------------------------------------------
scene = Scene::new(ColorRGB::black(), 1.0, 5);
let ground_plane = Box::new(Plane::from_d_vector(4.4, &Vector3D::from_xyz(0.0, 1.0, 0.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.4, 0.3, 0.3))
.diffuse(1.0)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.to_material()
));
scene.add_object(ground_plane);
let big_sphere = Box::new(Sphere::new(&Point3D::from_xyz(1.0, -0.8, 3.0), 2.5, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 0.7))
.diffuse(0.2)
.specular(0.8)
.shininess(20)
.reflection(0.6)
.to_material()
));
scene.add_object(big_sphere);
let small_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-5.5, -0.5, 7.0), 2.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 1.0))
.diffuse(0.1)
.specular(0.9)
.shininess(20)
.reflection(1.0)
.to_material()
));
scene.add_object(small_sphere);
let light_source1 = Box::new(SceneLight::new(&Point3D::from_xyz(0.0, 5.0, 5.0), 0.1, &ColorRGB::from_rgb(0.6, 0.6, 0.6)));
scene.add_light_source(light_source1);
let light_source2 = Box::new(SceneLight::new(&Point3D::from_xyz(2.0, 5.0, 1.0), 0.1, &ColorRGB::from_rgb(0.7, 0.7, 0.9)));
scene.add_light_source(light_source2);
let image_dimensions = (800, 600);
camera = Camera::from_dimensions(image_dimensions, (8.0, 6.0), 5.0, &Point3D::from_xyz(0.0, 0.0, -5.0), &Point3D::from_xyz(0.0, 0.0, 1.0));
} else {
//----------------------------------------------------------------------
// flipcode Tutorial, version 3
//----------------------------------------------------------------------
scene = Scene::new(ColorRGB::black(), 1.0, 5);
let ground_plane = Box::new(Plane::from_d_vector(4.4, &Vector3D::from_xyz(0.0, 1.0, 0.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.4, 0.3, 0.3))
.diffuse(1.0)
.specular(0.8)
.shininess(20)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(ground_plane);
let big_sphere = Box::new(Sphere::new(&Point3D::from_xyz(2.0, 0.8, 3.0), 2.5, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 1.0))
.diffuse(0.2)
.specular(0.8)
.shininess(20)
.reflection(0.2)
.refraction(0.8)
.refractive_index(1.3)
.to_material()
));
scene.add_object(big_sphere);
let small_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-5.5, -0.5, 7.0), 2.0, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.7, 0.7, 1.0))
.diffuse(0.1)
.specular(0.8)
.shininess(20)
.reflection(0.5)
.refraction(0.0)
.refractive_index(1.3)
.to_material()
));
scene.add_object(small_sphere);
let light_source1 = Box::new(SceneLight::new(&Point3D::from_xyz(0.0, 5.0, 5.0), 0.1, &ColorRGB::from_rgb(0.4, 0.4, 0.4)));
scene.add_light_source(light_source1);
let light_source2 = Box::new(SceneLight::new(&Point3D::from_xyz(-3.0, 5.0, 1.0), 0.1, &ColorRGB::from_rgb(0.6, 0.6, 0.8)));
scene.add_light_source(light_source2);
let extra_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-1.5, -3.8, 1.0), 1.5, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(1.0, 0.4, 0.4))
.diffuse(0.2)
.specular(0.8)
.shininess(20)
.reflection(0.0)
.refraction(0.8)
.refractive_index(1.5)
.to_material()
));
scene.add_object(extra_sphere);
let back_plane = Box::new(Plane::from_d_vector(12.0, &Vector3D::from_xyz(0.4, 0.0, -1.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.5, 0.3, 0.5))
.diffuse(0.6)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(back_plane);
let ceiling_plane = Box::new(Plane::from_d_vector(7.4, &Vector3D::from_xyz(0.0, -1.0, 0.0), &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.4, 0.7, 0.7))
.diffuse(0.5)
.specular(0.0)
.shininess(0)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(ceiling_plane);
for x in 0..8 {
for y in 0..7 {
let grid_sphere = Box::new(Sphere::new(&Point3D::from_xyz(-4.5 + (x as f32) * 1.5, -4.3 + (y as f32) * 1.5, 10.0), 0.3, &MaterialBuilder::new()
.color(&ColorRGB::from_rgb(0.3, 1.0, 0.4))
.diffuse(0.6)
.specular(0.6)
.shininess(20)
.reflection(0.0)
.refraction(0.0)
.refractive_index(0.0)
.to_material()
));
scene.add_object(grid_sphere);
}
}
let image_dimensions = (800, 600);
camera = Camera::from_dimensions(image_dimensions, (8.0, 6.0), 5.0, &Point3D::from_xyz(0.0, 0.0, -5.0), &Point3D::from_xyz(0.0, 0.0, 1.0));
}
let scene_setup_end = time::precise_time_ns();
let elapsed = (scene_setup_end - scene_setup_start) / 1000000;
println!("Scene Setup : {}", elapsed);
let pixel_table = render(Arc::new(scene), Arc::new(camera));
let image_saving_start = time::precise_time_ns();
let image = PPMImage::new(format!("example{}.ppm", EXAMPLE_TO_RUN).as_slice());
let result = image.save(&pixel_table);
let image_saving_end = time::precise_time_ns();
let elapsed = (image_saving_end - image_saving_start) / 1000000;
println!("Image Saving : {}", elapsed);
match result {
Ok(_) => println!("Image rendered successfully"),
Err(e) => println!("Image rendering failed:\n{}", e)
}
//--------------------------------------------------------------------------
let end = time::precise_time_ns();
let elapsed = (end - start) / 1000000;
println!("Elapsed time: {}", elapsed);
}
fn render(scene: Arc<Scene>, camera: Arc<Camera>) -> Table<ColorRGB> {
let dimensions = camera.get_image_dimensions();
let (width, height) = dimensions;
let mut pixel_table = Table::from_elem(dimensions, *ColorRGB::black());
// Initial Pixel Coloring
// let intital_coloring_start = time::precise_time_ns();
// for (index, value) in pixel_table.iter_mut().enumerate_2d() {
// let ray = camera.get_primary_ray(index);
// let result = scene.trace(&ray, 0);
// let result_color = ColorRGB::from_rgb(
// result.color.red.min(1.0),
// result.color.green.min(1.0),
// result.color.blue.min(1.0)
// );
// *value = result_color;
// }
// let initial_coloring_end = time::precise_time_ns();
let thread_setup_start = time::precise_time_ns();
let num_threads = num_cpus();
let total_pixels = width * height;
let pixels_per_thread = if total_pixels % num_threads > 0 {
total_pixels / num_threads + 1
} else {
total_pixels / num_threads
};
// Initial Pixel Coloring
let initial_coloring_threads = (0..num_threads).map(|thread_index| {
let local_camera = camera.clone();
let local_scene = scene.clone();
Thread::scoped(move|| {
let start_index = pixels_per_thread * thread_index;
let num_pixels = if thread_index != num_threads-1 {
pixels_per_thread
} else {
total_pixels - start_index
};
(0..num_pixels).as_table(dimensions).enumerate_2d_from_index(start_index).map(|(index, _)| {
let ray = local_camera.get_primary_ray(index);
let result = local_scene.trace(&ray, 0);
ColorRGB::from_rgb(
result.color.red.min(1.0),
result.color.green.min(1.0),
result.color.blue.min(1.0)
)
}).collect::<Vec<_>>()
})
}).collect::<Vec<_>>();
let thread_setup_end = time::precise_time_ns();
let thread_waiting_start = time::precise_time_ns();
let initial_coloring = initial_coloring_threads.into_iter().flat_map(|f| {
match f.join() {
Ok(local_table) => local_table.into_iter(),
Err(e) => panic!(e)
}
}).collect::<Vec<_>>();
let thread_waiting_end = time::precise_time_ns();
// Collect the colored pixels back into the original table.
let pixel_combining_start = time::precise_time_ns();
for (pixel, color) in pixel_table.iter_mut().zip(initial_coloring.iter()) {
*pixel = *color;
}
let pixel_combining_end = time::precise_time_ns();
// Edge Detection
let edge_detection_start = time::precise_time_ns();
let mut is_edge = Table::from_elem(dimensions, false);
for (index, value) in is_edge.iter_mut().enumerate_2d() {
let (row, column) = index;
if row != 0 && column != 0 && row != height-1 && column != width-1 {
let p1 = pixel_table[(row - 1, column - 1)];
let p2 = pixel_table[(row - 1, column)];
let p3 = pixel_table[(row - 1, column + 1)];
let p4 = pixel_table[(row, column - 1)];
let p6 = pixel_table[(row, column + 1)];
let p7 = pixel_table[(row + 1, column - 1)];
let p8 = pixel_table[(row + 1, column)];
let p9 = pixel_table[(row + 1, column + 1)];
let r = calculate_gradient(p1.red, p2.red, p3.red, p4.red, p6.red, p7.red, p8.red, p9.red);
let g = calculate_gradient(p1.green, p2.green, p3.green, p4.green, p6.green, p7.green, p8.green, p9.green);
let b = calculate_gradient(p1.blue, p2.blue, p3.blue, p4.blue, p6.blue, p7.blue, p8.blue, p9.blue);
if (r + b + g) > 0.5 {
*value = true;
} else {
*value = false;
}
}
}
let edge_detection_end = time::precise_time_ns();
// Anti-aliasing
let anti_aliasing_start = time::precise_time_ns();
let sub_width = 3;
let sub_height = 3;
let sub_size = (sub_width * sub_height) as f32;
let mut sub_rays = Table::from_elem((sub_width, sub_height), Ray3D::new(Point3D::origin(), Direction3D::unit_x()));
for (index, value) in pixel_table.iter_mut().enumerate_2d() {
if is_edge[index] {
let mut pixel_color = *ColorRGB::black();
camera.get_sub_rays(index, &mut sub_rays);
for sub_ray in sub_rays.iter() {
let result = scene.trace(sub_ray, 0);
pixel_color = ColorRGB::from_rgb(
pixel_color.red + result.color.red / sub_size,
pixel_color.green + result.color.green / sub_size,
pixel_color.blue + result.color.blue / sub_size
);
}
*value = pixel_color;
}
}
let anti_aliasing_end = time::precise_time_ns();
// let elapsed = (initial_coloring_end - intital_coloring_start) / 1000000;
// println!("Initial Coloring: {}", elapsed);
let elapsed = (thread_setup_end - thread_setup_start) / 1000000;
println!("Thread Setup : {}", elapsed);
let elapsed = (thread_waiting_end - thread_waiting_start) / 1000000;
println!("Thread Waiting : {}", elapsed);
let elapsed = (pixel_combining_end - pixel_combining_start) / 1000000;
println!("Pixel Combining : {}", elapsed);
let elapsed = (edge_detection_end - edge_detection_start) / 1000000;
println!("Edge Detection : {}", elapsed);
let elapsed = (anti_aliasing_end - anti_aliasing_start) / 1000000;
println!("Anti-aliasing : {}", elapsed);
pixel_table
}
fn calculate_gradient(p1: f32, p2: f32, p3: f32, p4: f32, p6: f32, p7: f32, p8: f32, p9: f32) -> f32
{
let gx = (p3 + 2.0 * p6 + p9) - (p1 + 2.0 * p4 + p7);
let gy = (p1 + 2.0 * p2 + p3) - (p7 + 2.0 * p8 + p9);
(gx*gx + gy*gy).sqrt()
}
|
extern crate delta_l;
extern crate getopts;
pub mod utils;
mod chatter;
use chatter::{Chatter, Flags};
pub use chatter::Colour::*;
use std::env;
use std::io;
use getopts::Options;
const HELLO: &'static str = "Welcome to gochatde an encrypted terminal chat client using delta-l encryption.";
const USAGE: &'static str = r"
USAGE:
chatde-rs [OPTIONS] ip[:PORT]
";
fn main() {
let mut options = Options::new();
options.optflag("", "color", "Use colour.");
options.optflag("z", "gzip", "Use compression.");
options.optflag("c", "checksum", "Don't use checksum.");
options.optflag("D", "debug", "Enable debug info");
options.optopt("p", "passphrase", "Use a passphrase", "PASSPHRASE");
let matches = options.parse(env::args().skip(1)).unwrap();
let pass = matches.opt_str("passphrase").unwrap_or("".to_owned());
let chatter = Chatter{flags: Flags{
use_colour : matches.opt_present("color"),
use_compress: matches.opt_present("gzip"),
use_checksum: !matches.opt_present("checksum"),
debug : matches.opt_present("debug"),
}};
let args = matches.free;
if chatter.flags.use_compress{
chatter.set_colour(YellowSlashBrown).unwrap_or(());
println!("WARN: Compression hasn't been implemented yet! See issue #1");
chatter.reset_colour().unwrap_or(());
}
if args.len() == 1{
let ip = utils::parse_addr(&args[0]).unwrap();
println!("{}", HELLO);
match chatter.chat_mode(ip, &pass){
Ok(()) => {
chatter.set_colour(Blue).unwrap_or(());
println!("BYE!");
chatter.reset_colour().unwrap_or(());
},
Err(e) => return handle_error(chatter, e)
}
}else{
chatter.set_colour(Red).unwrap_or(());
println!("Incorrect uasge!\n");
print!("{}", USAGE);
chatter.reset_colour().unwrap_or(());
}
}
fn handle_error(chatter: Chatter, err: io::Error){
chatter.set_colour(Red).unwrap_or(());
match err.kind(){
_ => println!("Unexpected error occured: {:?}", err)
}
chatter.reset_colour().unwrap_or(());
}
Fix error when too many free arguments are given
extern crate delta_l;
extern crate getopts;
pub mod utils;
mod chatter;
use chatter::{Chatter, Flags};
pub use chatter::Colour::*;
use std::env;
use std::io;
use getopts::Options;
const HELLO: &'static str = "Welcome to gochatde an encrypted terminal chat client using delta-l encryption.";
const USAGE: &'static str = r"
USAGE:
chatde-rs [OPTIONS] ip[:PORT]
";
fn main() {
let mut options = Options::new();
options.optflag("", "color", "Use colour.");
options.optflag("z", "gzip", "Use compression.");
options.optflag("c", "checksum", "Don't use checksum.");
options.optflag("D", "debug", "Enable debug info");
options.optopt("p", "passphrase", "Use a passphrase", "PASSPHRASE");
let matches = match options.parse(env::args().skip(1)){
Ok(m) => m,
Err(_) => return incorrect_usage()
};
let pass = matches.opt_str("passphrase").unwrap_or("".to_owned());
let chatter = Chatter{flags: Flags{
use_colour : matches.opt_present("color"),
use_compress: matches.opt_present("gzip"),
use_checksum: !matches.opt_present("checksum"),
debug : matches.opt_present("debug"),
}};
let args = matches.free;
if chatter.flags.use_compress{
chatter.set_colour(YellowSlashBrown).unwrap_or(());
println!("WARN: Compression hasn't been implemented yet! See issue #1");
chatter.reset_colour().unwrap_or(());
}
if args.len() == 1{
let ip = utils::parse_addr(&args[0]).unwrap();
println!("{}", HELLO);
match chatter.chat_mode(ip, &pass){
Ok(()) => {
chatter.set_colour(Blue).unwrap_or(());
println!("BYE!");
chatter.reset_colour().unwrap_or(());
},
Err(e) => handle_error(chatter, e)
}
}else{
incorrect_usage();
}
}
fn incorrect_usage(){
println!("Incorrect uasge!\n");
print!("{}", USAGE);
}
fn handle_error(chatter: Chatter, err: io::Error) -> !{
chatter.set_colour(Red).unwrap_or(());
match err.kind(){
_ => println!("Unexpected error occured: {:?}", err)
}
chatter.reset_colour().unwrap_or(());
std::process::exit(1);
}
|
extern crate discord;
extern crate regex;
use std::collections::{HashSet, HashMap};
use std::env;
use std::time::{Duration, Instant};
use discord::Discord;
use discord::model::{Event, ChannelId, Message, UserId};
use regex::Regex;
const TIMEOUT: u64 = 5 * 60; // 5 minutes
fn handle_message(message: Message,
greps: &mut Vec<(Regex, UserId)>,
timeouts: &mut HashMap<(UserId, ChannelId), Instant>)
-> Option<String> {
let channel = message.channel_id;
let content = message.content;
let author = message.author;
if author.bot {
return None;
}
if content == "!grephelp" {
Some(include_str!("help.md").into())
} else if content.starts_with("!grep") {
Some(match Regex::new(&content[6..]) {
Ok(regex) => {
if greps.iter()
.any(|&(ref regex, id)| id == author.id && regex.as_str() == &content[6..]) {
"Regex already exists".into()
} else {
greps.push((regex, author.id));
"Regex Added".into()
}
}
Err(error) => format!("Invalid regex. {}", error),
})
} else if content.starts_with("!ungrep") {
let mut removals = false;
greps.retain(|&(ref regex, id)| {
if id == author.id && regex.as_str() == &content[8..] {
removals = true;
false
} else {
true
}
});
if removals {
Some(format!("Refex {} removed", &content[8..]))
} else {
Some(format!("Regex {} was not found", &content[8..]))
}
} else if content == "!mygreps" {
Some(greps.iter()
.filter(|&&(_, id)| id == author.id)
.map(|&(ref regex, _)| regex)
.fold(String::new(),
|string, regex| format!("{}\n{}", string, regex)))
} else {
let users: HashSet<_> = greps.iter()
.filter(|&&(ref regex, _)| regex.is_match(&content))
.map(|&(_, id)| id)
.filter(|&id| id != author.id)
.filter(|&id| match timeouts.get(&(id, channel)) {
Some(instant) => instant.elapsed() > Duration::from_secs(TIMEOUT),
None => true,
})
.collect();
if !users.is_empty() {
Some(users.into_iter()
.inspect(|&id| {
timeouts.insert((id, channel), Instant::now());
})
.fold("Hey!".into(),
|string, id| format!("{} {}", string, id.mention())))
} else {
None
}
}
}
fn main() {
// state
let mut greps = Vec::new();
let mut timeouts = HashMap::new();
// api
let discord = Discord::from_bot_token(&env::var("DISCORD_BOT_TOKEN")
.expect("DISCORD_BOT_TOKEN not set"))
.expect("Login Failed");
let mut connection = match discord.connect() {
Ok((connection, _)) => connection,
Err(e) => panic!("Unable to connect to discord API: {}", e),
};
// generic fun stuff
connection.set_game_name("Talk to me with !grephelp".to_string());
// main loop time
while let Ok(event) = connection.recv_event() {
if let Event::MessageCreate(message) = event {
let channel = message.channel_id;
if let Some(content) = handle_message(message, &mut greps, &mut timeouts) {
let _ = discord.send_message(channel, &content, "", false);
}
}
}
}
refactored to remove hardcoded string indexes, and to be safer around poorly formatted input
extern crate discord;
extern crate regex;
use std::collections::{HashSet, HashMap};
use std::env;
use std::time::{Duration, Instant};
use discord::Discord;
use discord::model::{Event, ChannelId, Message, UserId};
use regex::Regex;
const TIMEOUT: u64 = 5 * 60; // 5 minutes
fn handle_message(message: Message,
greps: &mut Vec<(Regex, UserId)>,
timeouts: &mut HashMap<(UserId, ChannelId), Instant>)
-> Option<String> {
let channel = message.channel_id;
let content = message.content;
let author = message.author;
if author.bot {
return None;
}
if content == "!grephelp" {
Some(include_str!("help.md").into())
} else if content == "!mygreps" {
Some(greps.iter()
.filter(|&&(_, id)| id == author.id)
.map(|&(ref regex, _)| regex)
.fold(String::new(),
|string, regex| format!("{}\n{}", string, regex)))
} else if content.starts_with("!grep ") {
content.splitn(2, ' ').nth(1).map(|pattern| match Regex::new(pattern) {
Ok(regex) => {
if greps.iter()
.any(|&(ref regex, id)| id == author.id && regex.as_str() == pattern) {
"Regex already exists".into()
} else {
greps.push((regex, author.id));
"Regex added".into()
}
}
Err(error) => format!("Invalid regex. {}", error),
})
} else if content.starts_with("!ungrep ") {
content.splitn(2, ' ').nth(1).map(|pattern| {
let mut removals = false;
greps.retain(|&(ref regex, id)| {
if id == author.id && regex.as_str() == pattern {
removals = true;
false
} else {
true
}
});
if removals {
format!("Regex {} removed", pattern)
} else {
format!("Regex {} was not found", pattern)
}
})
} else {
let users: HashSet<_> = greps.iter()
.filter(|&&(ref regex, _)| regex.is_match(&content))
.map(|&(_, id)| id)
.filter(|&id| id != author.id)
.filter(|&id| match timeouts.get(&(id, channel)) {
Some(instant) => instant.elapsed() > Duration::from_secs(TIMEOUT),
None => true,
})
.collect();
if !users.is_empty() {
Some(users.into_iter()
.inspect(|&id| {
timeouts.insert((id, channel), Instant::now());
})
.fold("Hey!".into(),
|string, id| format!("{} {}", string, id.mention())))
} else {
None
}
}
}
fn main() {
// state
let mut greps = Vec::new();
let mut timeouts = HashMap::new();
// api
let discord = Discord::from_bot_token(&env::var("DISCORD_BOT_TOKEN")
.expect("DISCORD_BOT_TOKEN not set"))
.expect("Login Failed");
let mut connection = match discord.connect() {
Ok((connection, _)) => connection,
Err(e) => panic!("Unable to connect to discord API: {}", e),
};
// generic fun stuff
connection.set_game_name("Talk to me with !grephelp".to_string());
// main loop time
while let Ok(event) = connection.recv_event() {
if let Event::MessageCreate(message) = event {
let channel = message.channel_id;
if let Some(content) = handle_message(message, &mut greps, &mut timeouts) {
let _ = discord.send_message(channel, &content, "", false);
}
}
}
}
|
#![allow(dead_code)]
#![allow(unused_variables)]
#![feature(custom_attribute)]
#![feature(custom_derive, plugin)]
#![plugin(serde_macros)]
extern crate hyper;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate serde;
extern crate serde_json;
extern crate clap;
mod card;
mod player;
mod game_status;
mod deal;
mod card_strategy;
mod try_from;
mod error;
use player::Player;
use player::PlayerName;
use player::Password;
use card_strategy::DefensiveCardStrategy;
use clap::App;
#[allow(dead_code)]
fn main() {
env_logger::init().unwrap();
let cli_options = App::new("hearts")
.version("0.0.1")
.author("Derek Williams <derek@nebvin.ca>")
.about("Plays hearts for RBS Code Comp Nov 2015")
.args_from_usage(
"-u --user=<USER> 'Sets the player name'
-p --password=<PASSWORD> 'Sets the player password'
-s --server=<SERVER> 'Sets the hearts server'")
.get_matches();
let player_name = PlayerName::new(cli_options.value_of("USER").unwrap());
let password = Password::new(cli_options.value_of("PASSWORD").unwrap());
let server = cli_options.value_of("SERVER").unwrap();
info!("Start Game");
// Settings.init();
let player = Player::new(player_name, password, server, DefensiveCardStrategy);
player.play();
}
struct Settings {
username: String,
password: String,
hostname: String,
}
Remove unused Settings
#![allow(dead_code)]
#![allow(unused_variables)]
#![feature(custom_attribute)]
#![feature(custom_derive, plugin)]
#![plugin(serde_macros)]
extern crate hyper;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate serde;
extern crate serde_json;
extern crate clap;
mod card;
mod player;
mod game_status;
mod deal;
mod card_strategy;
mod try_from;
mod error;
use player::Player;
use player::PlayerName;
use player::Password;
use card_strategy::DefensiveCardStrategy;
use clap::App;
#[allow(dead_code)]
fn main() {
env_logger::init().unwrap();
let cli_options = App::new("hearts")
.version("0.0.1")
.author("Derek Williams <derek@nebvin.ca>")
.about("Plays hearts for RBS Code Comp Nov 2015")
.args_from_usage(
"-u --user=<USER> 'Sets the player name'
-p --password=<PASSWORD> 'Sets the player password'
-s --server=<SERVER> 'Sets the hearts server'")
.get_matches();
let player_name = PlayerName::new(cli_options.value_of("USER").unwrap());
let password = Password::new(cli_options.value_of("PASSWORD").unwrap());
let server = cli_options.value_of("SERVER").unwrap();
info!("Start Game");
// Settings.init();
let player = Player::new(player_name, password, server, DefensiveCardStrategy);
player.play();
}
|
use std::env;
use std::fs;
use std::io::{self, ErrorKind, Read, BufWriter};
use std::path::Path;
use std::process::exit;
use std::time::{Duration, Instant};
use getopts::Options;
use ppbert::prelude::*;
use ppbert::parsers::*;
use ppbert::pp::*;
const PROG_NAME: &str = "ppbert";
#[derive(Clone, Copy)]
enum ParserChoice {
ByExtension,
ForceBert1,
ForceBert2,
ForceDiskLog,
}
fn opt_usize(m: &getopts::Matches, opt: &str, default: usize) -> usize {
match m.opt_get_default(opt, default) {
Ok(n) => n,
Err(_) => {
eprintln!("'{}' must be a number", opt);
exit(1);
}
}
}
fn main() {
let mut opts = Options::new();
opts.optflag("V", "version", "display version");
opts.optflag("h", "help", "display this help");
opts.optopt("i", "indent", "indent with NUM spaces", "NUM");
opts.optopt("m", "per-line", "print at most NUM basic terms per line", "NUM");
opts.optflag("p", "parse", "parse only, not pretty print");
opts.optflag("1", "bert1", "force ppbert to use regular BERT parser");
opts.optflag("2", "bert2", "force ppbert to use BERT2 parser");
opts.optflag("d", "disk-log", "force ppbert to use DiskLog parser");
opts.optflag("v", "verbose", "show diagnostics on stderr");
opts.optflag("j", "json", "print as JSON");
opts.optflag("t", "transform-proplists", "convert proplists to JSON objects");
let mut matches = match opts.parse(env::args().skip(1)) {
Ok(m) => m,
Err(e) => {
eprintln!("{}: {}", PROG_NAME, e);
eprintln!("{}", opts.usage(&format!("{} {}", PROG_NAME, VERSION)));
exit(1);
}
};
if matches.opt_present("help") {
println!("{}", opts.usage(&format!("{} {}", PROG_NAME, VERSION)));
exit(0);
}
if matches.opt_present("version") {
println!("{} {}", PROG_NAME, VERSION);
exit(0);
}
// If no files to process, use stdin.
if matches.free.is_empty() {
matches.free.push("-".to_owned());
}
let indent_width = opt_usize(&matches, "indent", 2);
let max_per_line = opt_usize(&matches, "per-line", 6);
let parse_only = matches.opt_present("parse");
let json = matches.opt_present("json");
let transform_proplists = matches.opt_present("transform-proplists");
let verbose = matches.opt_present("verbose");
let parser_choice =
if matches.opt_present("bert1") {
ParserChoice::ForceBert1
} else if matches.opt_present("bert2") {
ParserChoice::ForceBert2
} else if matches.opt_present("disk-log") {
ParserChoice::ForceDiskLog
} else {
ParserChoice::ByExtension
};
let mut return_code = 0;
for file in &matches.free {
let pp: Box<dyn PrettyPrinter> = match (json, transform_proplists) {
(true, false) => Box::new(JsonPrettyPrinter::new(false)),
(true, true) => Box::new(JsonPrettyPrinter::new(true)),
(false, false) => Box::new(ErlangPrettyPrinter::new(indent_width, max_per_line)),
(false, true) => {
eprintln!("{}: --transform-proplists is only valid with the --json flag", PROG_NAME);
Box::new(ErlangPrettyPrinter::new(indent_width, max_per_line))
}
};
let res = handle_file(file, parse_only, verbose, parser_choice, pp);
match res {
Ok(()) => (),
Err(ref e) => {
if broken_pipe(e) {
break;
}
return_code = 1;
eprintln!("{}: {}: {}", PROG_NAME, file, e);
}
}
}
exit(return_code);
}
fn broken_pipe(err: &BertError) -> bool {
match *err {
BertError::IoError(ref ioerr) =>
ioerr.kind() == ErrorKind::BrokenPipe,
_ => false
}
}
fn read_bytes(filename: &str) -> Result<Vec<u8>> {
if filename == "-" {
let stdin = io::stdin();
let mut stdin = stdin.lock();
let mut buf: Vec<u8> = Vec::with_capacity(4096);
stdin.read_to_end(&mut buf)?;
return Ok(buf);
} else {
let buf = fs::read(filename)?;
return Ok(buf);
}
}
fn parser_from_ext(filename: &str, bytes: Vec<u8>) -> Box<Parser> {
let ext: Option<&str> =
Path::new(filename)
.extension()
.and_then(|x| x.to_str());
match ext {
Some("bert") | Some("bert1") => Box::new(Bert1Parser::new(bytes)),
Some("bert2") => Box::new(Bert2Parser::new(bytes)),
Some("log") => Box::new(DiskLogParser::new(bytes)),
_ => {
eprintln!("{}: cannot find an appropriate parser for {}; using BERT",
PROG_NAME, filename);
Box::new(Bert1Parser::new(bytes))
},
}
}
fn handle_file(
filename: &str,
parse_only: bool,
verbose: bool,
parser_choice: ParserChoice,
pp: Box<dyn PrettyPrinter>,
) -> Result<()> {
// Read file or stdin into buffer
let now = Instant::now();
let bytes = read_bytes(filename)?;
let read_dur = now.elapsed();
let mut parser: Box<Parser> = match parser_choice {
ParserChoice::ForceBert1 => Box::new(Bert1Parser::new(bytes)),
ParserChoice::ForceBert2 => Box::new(Bert2Parser::new(bytes)),
ParserChoice::ForceDiskLog => Box::new(DiskLogParser::new(bytes)),
ParserChoice::ByExtension => parser_from_ext(filename, bytes),
};
let mut parse_dur = Duration::new(0, 0);
let mut pp_dur = Duration::new(0, 0);
loop {
let now = Instant::now();
let term = match parser.next() {
Some(term) => term?,
None => break,
};
parse_dur += now.elapsed();
if !parse_only {
let now = Instant::now();
let stdout = BufWriter::new(io::stdout());
pp.write(&term, Box::new(stdout))?;
pp_dur += now.elapsed();
}
}
if verbose {
eprintln!("{}: {} read time: {:?}", PROG_NAME, filename, read_dur);
eprintln!("{}: {} parse time: {:?}", PROG_NAME, filename, parse_dur);
if !parse_only {
eprintln!("{}: {} print time: {:?}", PROG_NAME, filename, pp_dur);
}
}
return Ok(());
}
Move pretty printer creation outside loop
use std::env;
use std::fs;
use std::io::{self, ErrorKind, Read, BufWriter};
use std::path::Path;
use std::process::exit;
use std::time::{Duration, Instant};
use getopts::Options;
use ppbert::prelude::*;
use ppbert::parsers::*;
use ppbert::pp::*;
const PROG_NAME: &str = "ppbert";
#[derive(Clone, Copy)]
enum ParserChoice {
ByExtension,
ForceBert1,
ForceBert2,
ForceDiskLog,
}
fn opt_usize(m: &getopts::Matches, opt: &str, default: usize) -> usize {
match m.opt_get_default(opt, default) {
Ok(n) => n,
Err(_) => {
eprintln!("'{}' must be a number", opt);
exit(1);
}
}
}
fn main() {
let mut opts = Options::new();
opts.optflag("V", "version", "display version");
opts.optflag("h", "help", "display this help");
opts.optopt("i", "indent", "indent with NUM spaces", "NUM");
opts.optopt("m", "per-line", "print at most NUM basic terms per line", "NUM");
opts.optflag("p", "parse", "parse only, not pretty print");
opts.optflag("1", "bert1", "force ppbert to use regular BERT parser");
opts.optflag("2", "bert2", "force ppbert to use BERT2 parser");
opts.optflag("d", "disk-log", "force ppbert to use DiskLog parser");
opts.optflag("v", "verbose", "show diagnostics on stderr");
opts.optflag("j", "json", "print as JSON");
opts.optflag("t", "transform-proplists", "convert proplists to JSON objects");
let mut matches = match opts.parse(env::args().skip(1)) {
Ok(m) => m,
Err(e) => {
eprintln!("{}: {}", PROG_NAME, e);
eprintln!("{}", opts.usage(&format!("{} {}", PROG_NAME, VERSION)));
exit(1);
}
};
if matches.opt_present("help") {
println!("{}", opts.usage(&format!("{} {}", PROG_NAME, VERSION)));
exit(0);
}
if matches.opt_present("version") {
println!("{} {}", PROG_NAME, VERSION);
exit(0);
}
// If no files to process, use stdin.
if matches.free.is_empty() {
matches.free.push("-".to_owned());
}
let indent_width = opt_usize(&matches, "indent", 2);
let max_per_line = opt_usize(&matches, "per-line", 6);
let parse_only = matches.opt_present("parse");
let json = matches.opt_present("json");
let transform_proplists = matches.opt_present("transform-proplists");
let verbose = matches.opt_present("verbose");
let parser_choice =
if matches.opt_present("bert1") {
ParserChoice::ForceBert1
} else if matches.opt_present("bert2") {
ParserChoice::ForceBert2
} else if matches.opt_present("disk-log") {
ParserChoice::ForceDiskLog
} else {
ParserChoice::ByExtension
};
let pp: Box<dyn PrettyPrinter> = match (json, transform_proplists) {
(true, false) => Box::new(JsonPrettyPrinter::new(false)),
(true, true) => Box::new(JsonPrettyPrinter::new(true)),
(false, false) => Box::new(ErlangPrettyPrinter::new(indent_width, max_per_line)),
(false, true) => {
eprintln!("{}: --transform-proplists is only valid with the --json flag", PROG_NAME);
Box::new(ErlangPrettyPrinter::new(indent_width, max_per_line))
}
};
let mut return_code = 0;
for file in &matches.free {
let res = handle_file(file, parse_only, verbose, parser_choice, &pp);
match res {
Ok(()) => (),
Err(ref e) => {
if broken_pipe(e) {
break;
}
return_code = 1;
eprintln!("{}: {}: {}", PROG_NAME, file, e);
}
}
}
exit(return_code);
}
fn broken_pipe(err: &BertError) -> bool {
match *err {
BertError::IoError(ref ioerr) =>
ioerr.kind() == ErrorKind::BrokenPipe,
_ => false
}
}
fn read_bytes(filename: &str) -> Result<Vec<u8>> {
if filename == "-" {
let stdin = io::stdin();
let mut stdin = stdin.lock();
let mut buf: Vec<u8> = Vec::with_capacity(4096);
stdin.read_to_end(&mut buf)?;
return Ok(buf);
} else {
let buf = fs::read(filename)?;
return Ok(buf);
}
}
fn parser_from_ext(filename: &str, bytes: Vec<u8>) -> Box<Parser> {
let ext: Option<&str> =
Path::new(filename)
.extension()
.and_then(|x| x.to_str());
match ext {
Some("bert") | Some("bert1") => Box::new(Bert1Parser::new(bytes)),
Some("bert2") => Box::new(Bert2Parser::new(bytes)),
Some("log") => Box::new(DiskLogParser::new(bytes)),
_ => {
eprintln!("{}: cannot find an appropriate parser for {}; using BERT",
PROG_NAME, filename);
Box::new(Bert1Parser::new(bytes))
},
}
}
fn handle_file(
filename: &str,
parse_only: bool,
verbose: bool,
parser_choice: ParserChoice,
pp: &Box<dyn PrettyPrinter>,
) -> Result<()> {
// Read file or stdin into buffer
let now = Instant::now();
let bytes = read_bytes(filename)?;
let read_dur = now.elapsed();
let mut parser: Box<Parser> = match parser_choice {
ParserChoice::ForceBert1 => Box::new(Bert1Parser::new(bytes)),
ParserChoice::ForceBert2 => Box::new(Bert2Parser::new(bytes)),
ParserChoice::ForceDiskLog => Box::new(DiskLogParser::new(bytes)),
ParserChoice::ByExtension => parser_from_ext(filename, bytes),
};
let mut parse_dur = Duration::new(0, 0);
let mut pp_dur = Duration::new(0, 0);
loop {
let now = Instant::now();
let term = match parser.next() {
Some(term) => term?,
None => break,
};
parse_dur += now.elapsed();
if !parse_only {
let now = Instant::now();
let stdout = BufWriter::new(io::stdout());
pp.write(&term, Box::new(stdout))?;
pp_dur += now.elapsed();
}
}
if verbose {
eprintln!("{}: {} read time: {:?}", PROG_NAME, filename, read_dur);
eprintln!("{}: {} parse time: {:?}", PROG_NAME, filename, parse_dur);
if !parse_only {
eprintln!("{}: {} print time: {:?}", PROG_NAME, filename, pp_dur);
}
}
return Ok(());
}
|
extern crate byteorder;
extern crate slab;
#[macro_use]
extern crate futures;
#[macro_use]
extern crate tokio_core;
use std::env;
use std::net::SocketAddr;
use std::rc::Rc;
use std::cell::{Cell, RefCell};
use byteorder::{LittleEndian, ByteOrder};
use slab::Slab;
use futures::{Async, Poll, Future};
use futures::stream::Stream;
use tokio_core::io::{read_exact, write_all, Io};
use tokio_core::net::TcpListener;
use tokio_core::reactor::Core;
struct ReadStream<R> where R: ::std::io::Read {
reader: R,
buffer: Vec<u8>,
pos: usize,
frame_end: Option<u8>,
}
impl <R> ReadStream<R> where R: ::std::io::Read {
fn new(reader: R) -> ReadStream<R> {
ReadStream {
reader: reader,
buffer: Vec::new(),
pos: 0,
frame_end: None,
}
}
}
impl <R> Stream for ReadStream<R> where R: ::std::io::Read {
type Item = Vec<u8>;
type Error = ::std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
loop {
if let Some(frame_end) = self.frame_end {
let n = try_nb!(self.reader.read(&mut self.buffer[self.pos..frame_end as usize]));
self.pos += n;
if self.pos == frame_end as usize {
self.pos = 0;
let result = ::std::mem::replace(&mut self.buffer, Vec::new());
return Ok(Async::Ready(Some(result)))
}
} else {
let mut buf = [0u8];
let n = try_nb!(self.reader.read(&mut buf));
if n == 0 { // EOF
return Ok(Async::Ready(None))
}
self.frame_end = Some(buf[0]);
self.buffer = vec![0; buf[0] as usize];
}
}
}
}
pub fn main() {
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut l = Core::new().unwrap();
let handle = l.handle();
// Create a TCP listener which will listen for incoming connections
let socket = TcpListener::bind(&addr, &handle).unwrap();
// Once we've got the TCP listener, inform that we have it
println!("Listening on: {}", addr);
let subscribers: Rc<RefCell<Slab<::write_queue::Sender, usize>>> =
Rc::new(RefCell::new(Slab::with_capacity(1024)));
let done = socket.incoming().for_each(move |(socket, _addr)| {
// what's the spec?
// first byte: 0 means publisher, 1 means subscriber.
let subscribers = subscribers.clone();
let header = [0u8; 1];
let future = read_exact(socket, header).and_then(move |(socket, header)| -> Box<Future<Item=(),Error=::std::io::Error>> {
println!("OK {:?}", header);
match header[0] {
0 => {
// publisher
let num_read = Rc::new(Cell::new(0u64));
let num_read1 = num_read.clone();
let done = futures::lazy(|| Ok(socket.split()))
.and_then(|(reader, writer)| {
ReadStream::new(reader).for_each(move |buf| {
num_read1.set(num_read1.get() + 1);
for ref mut sender in subscribers.borrow_mut().iter_mut() {
if sender.len() < 5 {
drop(sender.send(buf.clone()));
}
}
Ok(())
}).and_then(move |()| {
let mut word = [0u8; 8];
<LittleEndian as ByteOrder>::write_u64(&mut word, num_read.get());
write_all(writer, word).map(|_| ())
})
});
Box::new(done)
}
1 => {
// subscriber
let (sender, queue) = ::write_queue::write_queue(socket);
let idx = match subscribers.borrow_mut().insert(sender) {
Ok(idx) => idx,
Err(_) => unimplemented!(), // should grow the slab.
};
Box::new(queue.then(move |_| {
subscribers.borrow_mut().remove(idx).unwrap();
Ok(())
}))
}
_ => {
// error
unimplemented!()
}
}
}).map_err(|e| {
println!("error: {}", e);
});
handle.spawn(future);
// frame format: first byte is length of body. Then there is body.
Ok(())
});
l.run(done).unwrap();
}
pub mod write_queue {
use std::collections::VecDeque;
use std::rc::Rc;
use std::cell::RefCell;
use futures::{self, task, Async, Future, Poll, Complete, Oneshot};
use tokio_core::io::WriteAll;
enum State<W> where W: ::std::io::Write {
WritingHeader(W, Vec<u8>, Complete<Vec<u8>>),
Writing(WriteAll<W, Vec<u8>>, Complete<Vec<u8>>),
BetweenWrites(W),
Empty,
}
/// A write of messages being written.
pub struct WriteQueue<W> where W: ::std::io::Write {
inner: Rc<RefCell<Inner>>,
state: State<W>,
}
struct Inner {
queue: VecDeque<(Vec<u8>, Complete<Vec<u8>>)>,
sender_count: usize,
task: Option<task::Task>,
}
pub struct Sender {
inner: Rc<RefCell<Inner>>,
}
impl Clone for Sender {
fn clone(&self) -> Sender {
self.inner.borrow_mut().sender_count += 1;
Sender { inner: self.inner.clone() }
}
}
impl Drop for Sender {
fn drop(&mut self) {
self.inner.borrow_mut().sender_count -= 1;
}
}
pub fn write_queue<W>(writer: W) -> (Sender, WriteQueue<W>)
where W: ::std::io::Write
{
let inner = Rc::new(RefCell::new(Inner {
queue: VecDeque::new(),
task: None,
sender_count: 1,
}));
let queue = WriteQueue {
inner: inner.clone(),
state: State::BetweenWrites(writer),
};
let sender = Sender { inner: inner };
(sender, queue)
}
impl Sender {
/// Enqueues a message to be written.
pub fn send(&mut self, message: Vec<u8>) -> Oneshot<Vec<u8>> {
let (complete, oneshot) = futures::oneshot();
self.inner.borrow_mut().queue.push_back((message, complete));
match self.inner.borrow_mut().task.take() {
Some(t) => t.unpark(),
None => (),
}
oneshot
}
/// Returns the number of messages queued to be written, not including any in-progress write.
pub fn len(&mut self) -> usize {
self.inner.borrow().queue.len()
}
}
enum IntermediateState<W> where W: ::std::io::Write {
WriteHeaderDone,
WriteDone(Vec<u8>, W),
StartWrite(Vec<u8>, Complete<Vec<u8>>),
Resolve,
}
impl <W> Future for WriteQueue<W> where W: ::std::io::Write {
type Item = W; // Resolves when all senders have been dropped and all messages written.
type Error = ::std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let next = match self.state {
State::WritingHeader(ref mut write, ref buf, ref mut _complete) => {
let n = try_nb!(write.write(&[buf.len() as u8]));
match n {
0 => unimplemented!(), // TODO return error premature EOF
1 => IntermediateState::WriteHeaderDone,
_ => unreachable!(),
}
}
State::Writing(ref mut write, ref mut _complete) => {
let (w, m) = try_ready!(Future::poll(write));
IntermediateState::WriteDone(m, w)
}
State::BetweenWrites(ref mut _writer) => {
let front = self.inner.borrow_mut().queue.pop_front();
match front {
Some((m, complete)) => {
IntermediateState::StartWrite(m, complete)
}
None => {
let count = self.inner.borrow().sender_count;
if count == 0 {
IntermediateState::Resolve
} else {
self.inner.borrow_mut().task = Some(task::park());
return Ok(Async::NotReady)
}
}
}
}
State::Empty => unreachable!(),
};
match next {
IntermediateState::WriteHeaderDone => {
let new_state = match ::std::mem::replace(&mut self.state, State::Empty) {
State::WritingHeader(writer, buf, complete) => {
State::Writing(::tokio_core::io::write_all(writer, buf), complete)
}
_ => unreachable!(),
};
self.state = new_state;
}
IntermediateState::WriteDone(m, w) => {
match ::std::mem::replace(&mut self.state, State::BetweenWrites(w)) {
State::Writing(_, complete) => {
complete.complete(m)
}
_ => unreachable!(),
}
}
IntermediateState::StartWrite(m, c) => {
let new_state = match ::std::mem::replace(&mut self.state, State::Empty) {
State::BetweenWrites(w) => {
State::WritingHeader(w, m, c)
}
_ => unreachable!(),
};
self.state = new_state;
}
IntermediateState::Resolve => {
match ::std::mem::replace(&mut self.state, State::Empty) {
State::BetweenWrites(w) => {
return Ok(Async::Ready(w))
}
_ => unreachable!(),
}
}
}
}
}
}
}
grow the slab
extern crate byteorder;
extern crate slab;
#[macro_use]
extern crate futures;
#[macro_use]
extern crate tokio_core;
use std::env;
use std::net::SocketAddr;
use std::rc::Rc;
use std::cell::{Cell, RefCell};
use byteorder::{LittleEndian, ByteOrder};
use slab::Slab;
use futures::{Async, Poll, Future};
use futures::stream::Stream;
use tokio_core::io::{read_exact, write_all, Io};
use tokio_core::net::TcpListener;
use tokio_core::reactor::Core;
struct ReadStream<R> where R: ::std::io::Read {
reader: R,
buffer: Vec<u8>,
pos: usize,
frame_end: Option<u8>,
}
impl <R> ReadStream<R> where R: ::std::io::Read {
fn new(reader: R) -> ReadStream<R> {
ReadStream {
reader: reader,
buffer: Vec::new(),
pos: 0,
frame_end: None,
}
}
}
impl <R> Stream for ReadStream<R> where R: ::std::io::Read {
type Item = Vec<u8>;
type Error = ::std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
loop {
if let Some(frame_end) = self.frame_end {
let n = try_nb!(self.reader.read(&mut self.buffer[self.pos..frame_end as usize]));
self.pos += n;
if self.pos == frame_end as usize {
self.pos = 0;
let result = ::std::mem::replace(&mut self.buffer, Vec::new());
return Ok(Async::Ready(Some(result)))
}
} else {
let mut buf = [0u8];
let n = try_nb!(self.reader.read(&mut buf));
if n == 0 { // EOF
return Ok(Async::Ready(None))
}
self.frame_end = Some(buf[0]);
self.buffer = vec![0; buf[0] as usize];
}
}
}
}
pub fn main() {
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut l = Core::new().unwrap();
let handle = l.handle();
// Create a TCP listener which will listen for incoming connections
let socket = TcpListener::bind(&addr, &handle).unwrap();
// Once we've got the TCP listener, inform that we have it
println!("Listening on: {}", addr);
let subscribers: Rc<RefCell<Slab<::write_queue::Sender, usize>>> =
Rc::new(RefCell::new(Slab::with_capacity(1024)));
let done = socket.incoming().for_each(move |(socket, _addr)| {
// what's the spec?
// first byte: 0 means publisher, 1 means subscriber.
let subscribers = subscribers.clone();
let header = [0u8; 1];
let future = read_exact(socket, header).and_then(move |(socket, header)| -> Box<Future<Item=(),Error=::std::io::Error>> {
println!("OK {:?}", header);
match header[0] {
0 => {
// publisher
let num_read = Rc::new(Cell::new(0u64));
let num_read1 = num_read.clone();
let done = futures::lazy(|| Ok(socket.split()))
.and_then(|(reader, writer)| {
ReadStream::new(reader).for_each(move |buf| {
num_read1.set(num_read1.get() + 1);
for ref mut sender in subscribers.borrow_mut().iter_mut() {
if sender.len() < 5 {
drop(sender.send(buf.clone()));
}
}
Ok(())
}).and_then(move |()| {
let mut word = [0u8; 8];
<LittleEndian as ByteOrder>::write_u64(&mut word, num_read.get());
write_all(writer, word).map(|_| ())
})
});
Box::new(done)
}
1 => {
// subscriber
let (sender, queue) = ::write_queue::write_queue(socket);
if !subscribers.borrow().has_available() {
let len = subscribers.borrow().len();
subscribers.borrow_mut().reserve_exact(len);
}
let idx = match subscribers.borrow_mut().insert(sender) {
Ok(idx) => idx,
Err(_) => unreachable!(),
};
Box::new(queue.then(move |_| {
subscribers.borrow_mut().remove(idx).unwrap();
Ok(())
}))
}
_ => {
// error
unimplemented!()
}
}
}).map_err(|e| {
println!("error: {}", e);
});
handle.spawn(future);
// frame format: first byte is length of body. Then there is body.
Ok(())
});
l.run(done).unwrap();
}
pub mod write_queue {
use std::collections::VecDeque;
use std::rc::Rc;
use std::cell::RefCell;
use futures::{self, task, Async, Future, Poll, Complete, Oneshot};
use tokio_core::io::WriteAll;
enum State<W> where W: ::std::io::Write {
WritingHeader(W, Vec<u8>, Complete<Vec<u8>>),
Writing(WriteAll<W, Vec<u8>>, Complete<Vec<u8>>),
BetweenWrites(W),
Empty,
}
/// A write of messages being written.
pub struct WriteQueue<W> where W: ::std::io::Write {
inner: Rc<RefCell<Inner>>,
state: State<W>,
}
struct Inner {
queue: VecDeque<(Vec<u8>, Complete<Vec<u8>>)>,
sender_count: usize,
task: Option<task::Task>,
}
pub struct Sender {
inner: Rc<RefCell<Inner>>,
}
impl Clone for Sender {
fn clone(&self) -> Sender {
self.inner.borrow_mut().sender_count += 1;
Sender { inner: self.inner.clone() }
}
}
impl Drop for Sender {
fn drop(&mut self) {
self.inner.borrow_mut().sender_count -= 1;
}
}
pub fn write_queue<W>(writer: W) -> (Sender, WriteQueue<W>)
where W: ::std::io::Write
{
let inner = Rc::new(RefCell::new(Inner {
queue: VecDeque::new(),
task: None,
sender_count: 1,
}));
let queue = WriteQueue {
inner: inner.clone(),
state: State::BetweenWrites(writer),
};
let sender = Sender { inner: inner };
(sender, queue)
}
impl Sender {
/// Enqueues a message to be written.
pub fn send(&mut self, message: Vec<u8>) -> Oneshot<Vec<u8>> {
let (complete, oneshot) = futures::oneshot();
self.inner.borrow_mut().queue.push_back((message, complete));
match self.inner.borrow_mut().task.take() {
Some(t) => t.unpark(),
None => (),
}
oneshot
}
/// Returns the number of messages queued to be written, not including any in-progress write.
pub fn len(&mut self) -> usize {
self.inner.borrow().queue.len()
}
}
enum IntermediateState<W> where W: ::std::io::Write {
WriteHeaderDone,
WriteDone(Vec<u8>, W),
StartWrite(Vec<u8>, Complete<Vec<u8>>),
Resolve,
}
impl <W> Future for WriteQueue<W> where W: ::std::io::Write {
type Item = W; // Resolves when all senders have been dropped and all messages written.
type Error = ::std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let next = match self.state {
State::WritingHeader(ref mut write, ref buf, ref mut _complete) => {
let n = try_nb!(write.write(&[buf.len() as u8]));
match n {
0 => unimplemented!(), // TODO return error premature EOF
1 => IntermediateState::WriteHeaderDone,
_ => unreachable!(),
}
}
State::Writing(ref mut write, ref mut _complete) => {
let (w, m) = try_ready!(Future::poll(write));
IntermediateState::WriteDone(m, w)
}
State::BetweenWrites(ref mut _writer) => {
let front = self.inner.borrow_mut().queue.pop_front();
match front {
Some((m, complete)) => {
IntermediateState::StartWrite(m, complete)
}
None => {
let count = self.inner.borrow().sender_count;
if count == 0 {
IntermediateState::Resolve
} else {
self.inner.borrow_mut().task = Some(task::park());
return Ok(Async::NotReady)
}
}
}
}
State::Empty => unreachable!(),
};
match next {
IntermediateState::WriteHeaderDone => {
let new_state = match ::std::mem::replace(&mut self.state, State::Empty) {
State::WritingHeader(writer, buf, complete) => {
State::Writing(::tokio_core::io::write_all(writer, buf), complete)
}
_ => unreachable!(),
};
self.state = new_state;
}
IntermediateState::WriteDone(m, w) => {
match ::std::mem::replace(&mut self.state, State::BetweenWrites(w)) {
State::Writing(_, complete) => {
complete.complete(m)
}
_ => unreachable!(),
}
}
IntermediateState::StartWrite(m, c) => {
let new_state = match ::std::mem::replace(&mut self.state, State::Empty) {
State::BetweenWrites(w) => {
State::WritingHeader(w, m, c)
}
_ => unreachable!(),
};
self.state = new_state;
}
IntermediateState::Resolve => {
match ::std::mem::replace(&mut self.state, State::Empty) {
State::BetweenWrites(w) => {
return Ok(Async::Ready(w))
}
_ => unreachable!(),
}
}
}
}
}
}
}
|
use std::io;
use std::env;
struct MeanFeatures {
wordlen: i32, // The length of each of these
sentlen: i32, // characteristics will be averaged
paralen: i32, // and compared with other source.
}
struct FreqFeatures {
comma: i32,
semicolon: i32,
quote: i32,
bangs: i32,
dashes: i32,
ands: i32,
buts: i32,
however: i32,
condition: i32, // Did not want to create confusion with if.
thats: i32,
more: i32,
musts: i32,
mights: i32,
thises: i32,
very: i32,
}
fn main() {
let args: Vec<String> = env::args().collect();
}
Added args code
#![allow(dead_code)]
use std::io;
use std::env;
use std::fs::File;
use std::error::Error;
use std::path::Path;
struct MeanFeatures {
wordlen: i32, // The length of each of these characteristics will be
sentlen: i32, // averaged and compared with other source.
//paralen: i32, // Removing this for now.
}
struct FreqFeatures {
comma: i32,
semicolon: i32,
quote: i32,
bangs: i32,
dashes: i32,
ands: i32,
buts: i32,
however: i32,
condition: i32, // Prevent confusion with if.
thats: i32,
more: i32,
musts: i32,
mights: i32,
thises: i32,
very: i32,
}
fn main() {
let args: Vec<String> = env::args().collect();
let file1 = match File::open(&args[1]) {
Err(why) => panic!("Couldn't open {}: {}", &args[1], Error::description(&why)),
Ok(file1) => file1,
};
let file2 = match File::open(&args[2]) {
Err(why) => panic!("Couldn't open {}: {}", &args[2], Error::description(&why)),
Ok(file2) => file2,
};
} |
use std::io::BufReader;
use std::io::Write;
use std::io::BufRead;
use std::io::{self};
use std::net::TcpStream;
use std::fs::File;
extern crate chrono;
use chrono::*;
extern crate stats;
use stats::median;
extern crate clap;
use clap::{Arg, App, ArgMatches};
mod percentile;
use percentile::percentile;
mod http_status;
mod request_response;
use request_response::*;
// http://stackoverflow.com/a/27590832/376138
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
fn open_logfile(path: &str) -> BufReader<File> {
let file = File::open(path);
match file {
Ok(f) => BufReader::new(f),
Err(err) => panic!("Could not open file {}: {}", path, err)
}
}
pub fn parse_logfile(path: &str, time_filter: Option<Duration>, exclude_term: Option<&str>) -> Result<(Vec<Request>,Vec<Response>), &'static str> {
let f = open_logfile(path);
let mut requests: Vec<Request> = Vec::new();
let mut responses: Vec<Response> = Vec::new();
for line in f.lines() {
let line_value = &line.unwrap();
if exclude_term.is_some() && line_value.contains(exclude_term.unwrap()) {
continue;
}
if line_value.contains("->") {
match Request::new_from_log_line(&line_value, None) {
Ok(r) => {
if time_filter.is_none() ||
(time_filter.is_some() && r.is_between_times(UTC::now().with_timezone(&r.time.timezone()) - time_filter.unwrap(), UTC::now().with_timezone(&r.time.timezone()))) {
requests.push(r);
}
},
Err(err) => println_stderr!("Skipped a line: {}", err)
}
}
if line_value.contains("<-") {
match Response::new_from_log_line(&line_value, None) {
Ok(r) => responses.push(r),
Err(err) => println_stderr!("Skipped a line: {}", err)
}
}
}
responses.sort_by_key(|r| r.id);
Ok((requests, responses))
}
#[derive(Eq, PartialEq)]
#[derive(Debug)]
pub struct RequestLogAnalyzerResult {
count: usize,
max: usize,
min: usize,
avg: usize,
median: usize,
percentile90: usize,
}
pub fn analyze(request_response_pairs: &Vec<RequestResponsePair>) -> Option<RequestLogAnalyzerResult> {
if request_response_pairs.len() == 0 {
return None;
}
let times: Vec<i64> = request_response_pairs.iter()
.map(|rr: &RequestResponsePair| -> i64 {rr.response.response_time.num_milliseconds() })
.collect();
let sum: usize = times.iter().sum::<i64>() as usize;
let avg: usize = sum / times.len();
let max: usize = *times.iter().max().unwrap() as usize;
let min: usize = *times.iter().min().unwrap() as usize;
let percentile90: usize = percentile(×, 0.9) as usize;
let median = median(times.into_iter()).unwrap() as usize;
Some(RequestLogAnalyzerResult {
count: request_response_pairs.len().into(),
max: max,
min: min,
avg: avg,
median: median,
percentile90: percentile90,
})
}
fn render_terminal(result: RequestLogAnalyzerResult) {
println!("count:\t{}", result.count);
println!("time.avg:\t{}", result.avg);
println!("time.min:\t{}", result.min);
println!("time.median:\t{}", result.median);
println!("time.90percent:\t{}", result.percentile90);
println!("time.max:\t{}", result.max);
}
pub fn render_graphite<T: Write>(result: RequestLogAnalyzerResult, time: DateTime<FixedOffset>, prefix: Option<&str>, mut stream: T) {
let prefix_text: &str;
let prefix_separator: &str;
match prefix {
Some(p) => {
prefix_text = p;
prefix_separator = ".";
}
None => {
prefix_text = "";
prefix_separator = "";
}
};
let mut write = |text: String| {
let _ = stream.write(
format!("{}{}{} {}\n", prefix_text, prefix_separator, text, time.timestamp() )
.as_bytes()
);
};
write(format!("requests.count {}", result.count));
write(format!("requests.time.max {}", result.max));
write(format!("requests.time.min {}", result.min));
write(format!("requests.time.avg {}", result.avg));
write(format!("requests.time.median {}", result.median));
write(format!("requests.time.90percent {}", result.percentile90));
}
fn parse_args<'a>() -> ArgMatches<'a> {
App::new("Request.log Analyzer")
.arg(Arg::with_name("filename")
.index(1)
.value_name("FILE")
.required(true)
.help("Log file to analyze")
.takes_value(true))
.arg(Arg::with_name("time_filter_minutes")
.value_name("MINUTES")
.short("t")
.help("Limit to the last n minutes")
.takes_value(true))
.arg(Arg::with_name("include_term")
.value_name("TERM")
.long("include")
.help("Only includes lines that contain this term")
.takes_value(true))
.arg(Arg::with_name("exclude_term")
.value_name("TERM")
.long("exclude")
.help("Excludes lines that contain this term")
.takes_value(true))
.arg(Arg::with_name("graphite-server")
.value_name("GRAPHITE_SERVER")
.long("graphite-server")
.help("Send values to this Graphite server instead of stdout")
.takes_value(true))
.arg(Arg::with_name("graphite-port")
.value_name("GRAPHITE_PORT")
.long("graphite-port")
.takes_value(true)
.default_value("2003"))
.arg(Arg::with_name("graphite-prefix")
.value_name("GRAPHITE_PREFIX")
.long("graphite-prefix")
.help("Prefix for Graphite key, e.g. 'servers.prod.publisher1'")
.takes_value(true))
.get_matches()
}
fn main() {
let args = parse_args();
let filename = args.value_of("filename").unwrap();
let time_filter = match args.value_of("time_filter_minutes") {
Some(minutes) => Some(Duration::minutes(minutes.parse().unwrap())),
None => None
};
let lines = parse_logfile(filename, time_filter, args.value_of("exclude_term"));
let (requests, responses) = lines.unwrap();
let time_zone = &requests[0].time.timezone();
let pairs: Vec<RequestResponsePair> = pair_requests_responses(requests, responses)
.into_iter()
.filter(|rr| rr.matches_include_filter())
.collect();
if args.is_present("graphite-server") {
let stream = TcpStream::connect(
(
args.value_of("graphite-server").unwrap(),
args.value_of("graphite-port").unwrap().parse().unwrap()
)
).unwrap();
match analyze(&pairs) {
Some(result) => render_graphite(result, UTC::now().with_timezone(time_zone), args.value_of("graphite-prefix"), stream),
None => println!("No matching log lines in file.")
}
} else {
match analyze(&pairs) {
Some(result) => render_terminal(result),
None => println!("No matching log lines in file.")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use request_response::*;
extern crate chrono;
use chrono::*;
use std::str;
use std::io::prelude::Write;
use std::io::{self};
#[test]
fn test_parse_logfile() {
let lines = parse_logfile("src/test/simple-1.log", None, None);
let (requests, responses) = lines.unwrap();
assert_eq!(requests.len(), 2);
assert_eq!(responses.len(), 2);
}
#[test]
fn test_open_logfile_time_filter() {
let time_filter: Duration = Duration::minutes(1);
let lines = parse_logfile("src/test/simple-1.log", Some(time_filter), None);
let (requests, _) = lines.unwrap();
assert_eq!(requests.len(), 0);
let time_filter: Duration = Duration::minutes(52560000); // 100 years
let lines = parse_logfile("src/test/simple-1.log", Some(time_filter), None);
let (requests, _) = lines.unwrap();
assert_eq!(requests.len(), 2);
}
#[test]
fn test_parse_logfile_exlude_term_in_request_line() {
let lines = parse_logfile("src/test/simple-1.log", None, Some("other.html"));
let (requests, _) = lines.unwrap();
assert_eq!(requests.len(), 1);
assert_eq!(requests[0].id, 1);
}
#[test]
fn test_parse_logfile_exlude_term_in_response_line() {
let lines = parse_logfile("src/test/simple-1.log", None, Some("text/html"));
let (_, responses) = lines.unwrap();
assert_eq!(responses.len(), 0);
}
#[test]
fn test_parse_logfile_exlude_term_given_but_not_found() {
let lines = parse_logfile("src/test/simple-1.log", None, Some("term that does not exist"));
let (requests, responses) = lines.unwrap();
assert_eq!(requests.len(), 2);
assert_eq!(responses.len(), 2);
}
#[test]
fn test_parse_logfile_ignore_broken_lines() {
let lines = parse_logfile("src/test/broken.log", None, None);
let (requests, responses) = lines.unwrap();
assert_eq!(requests.len(), 1);
assert_eq!(responses.len(), 1);
}
#[test]
fn test_pair_requests_resonses() {
let lines = parse_logfile("src/test/simple-1.log", None, None);
let (requests, responses) = lines.unwrap();
let result = pair_requests_responses(requests, responses);
assert_eq!(result.len(), 2);
assert_eq!(result[0].request.id, result[0].response.id);
assert_eq!(result[1].request.id, result[1].response.id);
}
#[test]
fn test_request_log_analyzer_result() {
let lines = parse_logfile("src/test/response-time-calculations.log", None, None);
let (requests, responses) = lines.unwrap();
let request_response_pairs = pair_requests_responses(requests, responses);
let result = analyze(&request_response_pairs);
let expected = Some(RequestLogAnalyzerResult {
count: 3,
max: 100,
min: 1,
avg: 37,
median: 10,
percentile90: 100,
});
assert_eq!(result, expected);
}
#[test]
fn test_request_log_analyze_none_matching() {
let lines = parse_logfile("src/test/simple-1.log", Some(Duration::minutes(0)), None);
let (requests, responses) = lines.unwrap();
let request_response_pairs = pair_requests_responses(requests, responses);
let result = analyze(&request_response_pairs);
let expected = None;
assert_eq!(result, expected);
}
#[test]
fn test_90_percentile_calculation() {
let lines = parse_logfile("src/test/percentile.log", None, None);
let (requests, responses) = lines.unwrap();
let request_response_pairs = pair_requests_responses(requests, responses);
let result: RequestLogAnalyzerResult = analyze(&request_response_pairs).unwrap();
assert_eq!(result.percentile90, 9);
}
struct MockTcpStream {
write_calls: Vec<String>,
}
impl Write for MockTcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_calls.push(str::from_utf8(buf).unwrap().to_string());
Ok(1)
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[test]
fn test_render_graphite() {
let mut mock_tcp_stream = MockTcpStream {
write_calls: vec![]
};
render_graphite(RequestLogAnalyzerResult {
count: 3,
max: 100,
min: 1,
avg: 37,
median: 10,
percentile90: 100,
},
DateTime::parse_from_str("22/Sep/2016:22:41:59 +0200", "%d/%b/%Y:%H:%M:%S %z").unwrap(),
None,
&mut mock_tcp_stream
);
assert_eq!(&mock_tcp_stream.write_calls[0], "requests.count 3 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[1], "requests.time.max 100 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[2], "requests.time.min 1 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[3], "requests.time.avg 37 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[4], "requests.time.median 10 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[5], "requests.time.90percent 100 1474576919\n");
}
#[test]
fn test_render_graphite_prefix() {
let mut mock_tcp_stream = MockTcpStream {
write_calls: vec![]
};
render_graphite(RequestLogAnalyzerResult {
count: 3,
max: 100,
min: 1,
avg: 37,
median: 10,
percentile90: 100,
},
DateTime::parse_from_str("22/Sep/2016:22:41:59 +0200", "%d/%b/%Y:%H:%M:%S %z").unwrap(),
Some("my.prefix"),
&mut mock_tcp_stream
);
assert_eq!(&mock_tcp_stream.write_calls[0], "my.prefix.requests.count 3 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[1], "my.prefix.requests.time.max 100 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[2], "my.prefix.requests.time.min 1 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[3], "my.prefix.requests.time.avg 37 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[4], "my.prefix.requests.time.median 10 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[5], "my.prefix.requests.time.90percent 100 1474576919\n");
}
}
Unused import
use std::io::BufReader;
use std::io::Write;
use std::io::BufRead;
use std::net::TcpStream;
use std::fs::File;
extern crate chrono;
use chrono::*;
extern crate stats;
use stats::median;
extern crate clap;
use clap::{Arg, App, ArgMatches};
mod percentile;
use percentile::percentile;
mod http_status;
mod request_response;
use request_response::*;
// http://stackoverflow.com/a/27590832/376138
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
fn open_logfile(path: &str) -> BufReader<File> {
let file = File::open(path);
match file {
Ok(f) => BufReader::new(f),
Err(err) => panic!("Could not open file {}: {}", path, err)
}
}
pub fn parse_logfile(path: &str, time_filter: Option<Duration>, exclude_term: Option<&str>) -> Result<(Vec<Request>,Vec<Response>), &'static str> {
let f = open_logfile(path);
let mut requests: Vec<Request> = Vec::new();
let mut responses: Vec<Response> = Vec::new();
for line in f.lines() {
let line_value = &line.unwrap();
if exclude_term.is_some() && line_value.contains(exclude_term.unwrap()) {
continue;
}
if line_value.contains("->") {
match Request::new_from_log_line(&line_value, None) {
Ok(r) => {
if time_filter.is_none() ||
(time_filter.is_some() && r.is_between_times(UTC::now().with_timezone(&r.time.timezone()) - time_filter.unwrap(), UTC::now().with_timezone(&r.time.timezone()))) {
requests.push(r);
}
},
Err(err) => println_stderr!("Skipped a line: {}", err)
}
}
if line_value.contains("<-") {
match Response::new_from_log_line(&line_value, None) {
Ok(r) => responses.push(r),
Err(err) => println_stderr!("Skipped a line: {}", err)
}
}
}
responses.sort_by_key(|r| r.id);
Ok((requests, responses))
}
#[derive(Eq, PartialEq)]
#[derive(Debug)]
pub struct RequestLogAnalyzerResult {
count: usize,
max: usize,
min: usize,
avg: usize,
median: usize,
percentile90: usize,
}
pub fn analyze(request_response_pairs: &Vec<RequestResponsePair>) -> Option<RequestLogAnalyzerResult> {
if request_response_pairs.len() == 0 {
return None;
}
let times: Vec<i64> = request_response_pairs.iter()
.map(|rr: &RequestResponsePair| -> i64 {rr.response.response_time.num_milliseconds() })
.collect();
let sum: usize = times.iter().sum::<i64>() as usize;
let avg: usize = sum / times.len();
let max: usize = *times.iter().max().unwrap() as usize;
let min: usize = *times.iter().min().unwrap() as usize;
let percentile90: usize = percentile(×, 0.9) as usize;
let median = median(times.into_iter()).unwrap() as usize;
Some(RequestLogAnalyzerResult {
count: request_response_pairs.len().into(),
max: max,
min: min,
avg: avg,
median: median,
percentile90: percentile90,
})
}
fn render_terminal(result: RequestLogAnalyzerResult) {
println!("count:\t{}", result.count);
println!("time.avg:\t{}", result.avg);
println!("time.min:\t{}", result.min);
println!("time.median:\t{}", result.median);
println!("time.90percent:\t{}", result.percentile90);
println!("time.max:\t{}", result.max);
}
pub fn render_graphite<T: Write>(result: RequestLogAnalyzerResult, time: DateTime<FixedOffset>, prefix: Option<&str>, mut stream: T) {
let prefix_text: &str;
let prefix_separator: &str;
match prefix {
Some(p) => {
prefix_text = p;
prefix_separator = ".";
}
None => {
prefix_text = "";
prefix_separator = "";
}
};
let mut write = |text: String| {
let _ = stream.write(
format!("{}{}{} {}\n", prefix_text, prefix_separator, text, time.timestamp() )
.as_bytes()
);
};
write(format!("requests.count {}", result.count));
write(format!("requests.time.max {}", result.max));
write(format!("requests.time.min {}", result.min));
write(format!("requests.time.avg {}", result.avg));
write(format!("requests.time.median {}", result.median));
write(format!("requests.time.90percent {}", result.percentile90));
}
fn parse_args<'a>() -> ArgMatches<'a> {
App::new("Request.log Analyzer")
.arg(Arg::with_name("filename")
.index(1)
.value_name("FILE")
.required(true)
.help("Log file to analyze")
.takes_value(true))
.arg(Arg::with_name("time_filter_minutes")
.value_name("MINUTES")
.short("t")
.help("Limit to the last n minutes")
.takes_value(true))
.arg(Arg::with_name("include_term")
.value_name("TERM")
.long("include")
.help("Only includes lines that contain this term")
.takes_value(true))
.arg(Arg::with_name("exclude_term")
.value_name("TERM")
.long("exclude")
.help("Excludes lines that contain this term")
.takes_value(true))
.arg(Arg::with_name("graphite-server")
.value_name("GRAPHITE_SERVER")
.long("graphite-server")
.help("Send values to this Graphite server instead of stdout")
.takes_value(true))
.arg(Arg::with_name("graphite-port")
.value_name("GRAPHITE_PORT")
.long("graphite-port")
.takes_value(true)
.default_value("2003"))
.arg(Arg::with_name("graphite-prefix")
.value_name("GRAPHITE_PREFIX")
.long("graphite-prefix")
.help("Prefix for Graphite key, e.g. 'servers.prod.publisher1'")
.takes_value(true))
.get_matches()
}
fn main() {
let args = parse_args();
let filename = args.value_of("filename").unwrap();
let time_filter = match args.value_of("time_filter_minutes") {
Some(minutes) => Some(Duration::minutes(minutes.parse().unwrap())),
None => None
};
let lines = parse_logfile(filename, time_filter, args.value_of("exclude_term"));
let (requests, responses) = lines.unwrap();
let time_zone = &requests[0].time.timezone();
let pairs: Vec<RequestResponsePair> = pair_requests_responses(requests, responses)
.into_iter()
.filter(|rr| rr.matches_include_filter())
.collect();
if args.is_present("graphite-server") {
let stream = TcpStream::connect(
(
args.value_of("graphite-server").unwrap(),
args.value_of("graphite-port").unwrap().parse().unwrap()
)
).unwrap();
match analyze(&pairs) {
Some(result) => render_graphite(result, UTC::now().with_timezone(time_zone), args.value_of("graphite-prefix"), stream),
None => println!("No matching log lines in file.")
}
} else {
match analyze(&pairs) {
Some(result) => render_terminal(result),
None => println!("No matching log lines in file.")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use request_response::*;
extern crate chrono;
use chrono::*;
use std::str;
use std::io::prelude::Write;
use std::io::{self};
#[test]
fn test_parse_logfile() {
let lines = parse_logfile("src/test/simple-1.log", None, None);
let (requests, responses) = lines.unwrap();
assert_eq!(requests.len(), 2);
assert_eq!(responses.len(), 2);
}
#[test]
fn test_open_logfile_time_filter() {
let time_filter: Duration = Duration::minutes(1);
let lines = parse_logfile("src/test/simple-1.log", Some(time_filter), None);
let (requests, _) = lines.unwrap();
assert_eq!(requests.len(), 0);
let time_filter: Duration = Duration::minutes(52560000); // 100 years
let lines = parse_logfile("src/test/simple-1.log", Some(time_filter), None);
let (requests, _) = lines.unwrap();
assert_eq!(requests.len(), 2);
}
#[test]
fn test_parse_logfile_exlude_term_in_request_line() {
let lines = parse_logfile("src/test/simple-1.log", None, Some("other.html"));
let (requests, _) = lines.unwrap();
assert_eq!(requests.len(), 1);
assert_eq!(requests[0].id, 1);
}
#[test]
fn test_parse_logfile_exlude_term_in_response_line() {
let lines = parse_logfile("src/test/simple-1.log", None, Some("text/html"));
let (_, responses) = lines.unwrap();
assert_eq!(responses.len(), 0);
}
#[test]
fn test_parse_logfile_exlude_term_given_but_not_found() {
let lines = parse_logfile("src/test/simple-1.log", None, Some("term that does not exist"));
let (requests, responses) = lines.unwrap();
assert_eq!(requests.len(), 2);
assert_eq!(responses.len(), 2);
}
#[test]
fn test_parse_logfile_ignore_broken_lines() {
let lines = parse_logfile("src/test/broken.log", None, None);
let (requests, responses) = lines.unwrap();
assert_eq!(requests.len(), 1);
assert_eq!(responses.len(), 1);
}
#[test]
fn test_pair_requests_resonses() {
let lines = parse_logfile("src/test/simple-1.log", None, None);
let (requests, responses) = lines.unwrap();
let result = pair_requests_responses(requests, responses);
assert_eq!(result.len(), 2);
assert_eq!(result[0].request.id, result[0].response.id);
assert_eq!(result[1].request.id, result[1].response.id);
}
#[test]
fn test_request_log_analyzer_result() {
let lines = parse_logfile("src/test/response-time-calculations.log", None, None);
let (requests, responses) = lines.unwrap();
let request_response_pairs = pair_requests_responses(requests, responses);
let result = analyze(&request_response_pairs);
let expected = Some(RequestLogAnalyzerResult {
count: 3,
max: 100,
min: 1,
avg: 37,
median: 10,
percentile90: 100,
});
assert_eq!(result, expected);
}
#[test]
fn test_request_log_analyze_none_matching() {
let lines = parse_logfile("src/test/simple-1.log", Some(Duration::minutes(0)), None);
let (requests, responses) = lines.unwrap();
let request_response_pairs = pair_requests_responses(requests, responses);
let result = analyze(&request_response_pairs);
let expected = None;
assert_eq!(result, expected);
}
#[test]
fn test_90_percentile_calculation() {
let lines = parse_logfile("src/test/percentile.log", None, None);
let (requests, responses) = lines.unwrap();
let request_response_pairs = pair_requests_responses(requests, responses);
let result: RequestLogAnalyzerResult = analyze(&request_response_pairs).unwrap();
assert_eq!(result.percentile90, 9);
}
struct MockTcpStream {
write_calls: Vec<String>,
}
impl Write for MockTcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_calls.push(str::from_utf8(buf).unwrap().to_string());
Ok(1)
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[test]
fn test_render_graphite() {
let mut mock_tcp_stream = MockTcpStream {
write_calls: vec![]
};
render_graphite(RequestLogAnalyzerResult {
count: 3,
max: 100,
min: 1,
avg: 37,
median: 10,
percentile90: 100,
},
DateTime::parse_from_str("22/Sep/2016:22:41:59 +0200", "%d/%b/%Y:%H:%M:%S %z").unwrap(),
None,
&mut mock_tcp_stream
);
assert_eq!(&mock_tcp_stream.write_calls[0], "requests.count 3 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[1], "requests.time.max 100 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[2], "requests.time.min 1 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[3], "requests.time.avg 37 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[4], "requests.time.median 10 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[5], "requests.time.90percent 100 1474576919\n");
}
#[test]
fn test_render_graphite_prefix() {
let mut mock_tcp_stream = MockTcpStream {
write_calls: vec![]
};
render_graphite(RequestLogAnalyzerResult {
count: 3,
max: 100,
min: 1,
avg: 37,
median: 10,
percentile90: 100,
},
DateTime::parse_from_str("22/Sep/2016:22:41:59 +0200", "%d/%b/%Y:%H:%M:%S %z").unwrap(),
Some("my.prefix"),
&mut mock_tcp_stream
);
assert_eq!(&mock_tcp_stream.write_calls[0], "my.prefix.requests.count 3 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[1], "my.prefix.requests.time.max 100 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[2], "my.prefix.requests.time.min 1 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[3], "my.prefix.requests.time.avg 37 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[4], "my.prefix.requests.time.median 10 1474576919\n");
assert_eq!(&mock_tcp_stream.write_calls[5], "my.prefix.requests.time.90percent 100 1474576919\n");
}
}
|
#[macro_use]
extern crate lazy_static;
extern crate regex;
use std::thread;
use std::sync::mpsc::{channel, Sender};
use std::path::Path;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::io::SeekFrom;
mod models;
fn init_log() -> io::Result<()> {
let log_config = b"[Achievements]
LogLevel=1
FilePrinting=true
ConsolePrinting=true
ScreenPrinting=false
[Power]
LogLevel=1
FilePrinting=true
ConsolePrinting=true
ScreenPrinting=false";
let hs_dir = Path::new(r"C:\Program Files (x86)\Hearthstone\log.config");
if !hs_dir.exists() {
let mut handle = File::create(hs_dir)?;
handle.write_all(log_config)?;
}
Ok(())
}
fn tail_log(tx: Sender<models::Play>) -> io::Result<()> {
let hearthstone_path = Path::new(r"C:\Program Files (x86)\Hearthstone\Logs\Power.log");
let mut handle = io::BufReader::new(File::open(hearthstone_path)?);
handle.seek(SeekFrom::End(0))?;
loop {
let mut b = String::new();
match handle.read_line(&mut b) {
Ok(0) => {
continue;
}
Ok(_) => {
match parse_log_line(&b) {
Some(play) => tx.send(play).unwrap(),
_ => (),
}
}
Err(err) => println!("Error!, {}", err),
}
}
}
fn parse_log_line(line: &str) -> Option<models::Play> {
lazy_static! {
static ref CARD_UPDATE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*id=(?P<id>\d*) .*cardId=(?P<card_id>[a-zA-Z0-9_]*) .*player=(?P<player>\d*)")
.unwrap();
static ref GAME_COMPLETE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE.*$")
.unwrap();
}
if GAME_COMPLETE_PATTERN.is_match(line) {
return None;
}
CARD_UPDATE_PATTERN
.captures(line)
.and_then(|group| {
let id = group.name("id").map(|m| m.as_str());
let card_id = group.name("card_id").map(|m| m.as_str());
let player = group.name("player").map(|m| m.as_str());
match (id, card_id, player) {
(Some(id), Some(card_id), Some(player)) if card_id != "" => {
Some(models::Play {
id: id.to_string(),
card_id: card_id.to_string(),
player: player.to_string(),
})
}
_ => None,
}
})
}
fn main() {
println!("Initializing log config");
init_log().unwrap();
println!("Initialized log config");
let (tx, rx) = channel();
println!("Spawning log thread");
thread::spawn(|| tail_log(tx));
println!("Spawned log thread");
println!("Start receiving events");
let mut game_state = models::GameState::default();
while let Ok(play) = rx.recv() {
let updated = game_state.handle_play(play);
if updated {
println!("New state: {:?}", game_state);
println!();
}
}
}
Sleep log thread when no new bytes are written
#[macro_use]
extern crate lazy_static;
extern crate regex;
use std::thread;
use std::sync::mpsc::{channel, Sender};
use std::path::Path;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::io::SeekFrom;
use std::time::Duration;
mod models;
fn init_log() -> io::Result<()> {
let log_config = b"[Achievements]
LogLevel=1
FilePrinting=true
ConsolePrinting=true
ScreenPrinting=false
[Power]
LogLevel=1
FilePrinting=true
ConsolePrinting=true
ScreenPrinting=false";
let hs_dir = Path::new(r"C:\Program Files (x86)\Hearthstone\log.config");
if !hs_dir.exists() {
let mut handle = File::create(hs_dir)?;
handle.write_all(log_config)?;
}
Ok(())
}
fn tail_log(tx: Sender<models::Play>) -> io::Result<()> {
let hearthstone_path = Path::new(r"C:\Program Files (x86)\Hearthstone\Logs\Power.log");
let mut handle = io::BufReader::new(File::open(hearthstone_path)?);
handle.seek(SeekFrom::End(0))?;
loop {
let mut b = String::new();
match handle.read_line(&mut b) {
Ok(0) => {
thread::sleep(Duration::from_millis(250));
}
Ok(_) => {
match parse_log_line(&b) {
Some(play) => tx.send(play).unwrap(),
_ => (),
}
}
Err(err) => println!("Error!, {}", err),
}
}
}
fn parse_log_line(line: &str) -> Option<models::Play> {
lazy_static! {
static ref CARD_UPDATE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*id=(?P<id>\d*) .*cardId=(?P<card_id>[a-zA-Z0-9_]*) .*player=(?P<player>\d*)")
.unwrap();
static ref GAME_COMPLETE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE.*$")
.unwrap();
}
if GAME_COMPLETE_PATTERN.is_match(line) {
return None;
}
CARD_UPDATE_PATTERN
.captures(line)
.and_then(|group| {
let id = group.name("id").map(|m| m.as_str());
let card_id = group.name("card_id").map(|m| m.as_str());
let player = group.name("player").map(|m| m.as_str());
match (id, card_id, player) {
(Some(id), Some(card_id), Some(player)) if card_id != "" => {
Some(models::Play {
id: id.to_string(),
card_id: card_id.to_string(),
player: player.to_string(),
})
}
_ => None,
}
})
}
fn main() {
println!("Initializing log config");
init_log().unwrap();
println!("Initialized log config");
let (tx, rx) = channel();
println!("Spawning log thread");
thread::spawn(|| tail_log(tx));
println!("Spawned log thread");
println!("Start receiving events");
let mut game_state = models::GameState::default();
while let Ok(play) = rx.recv() {
let updated = game_state.handle_play(play);
if updated {
println!("New state: {:?}", game_state);
println!();
}
}
}
|
extern crate telegram_bot;
use telegram_bot::*;
fn main() {
let api = Api::from_env("TELEGRAM_BOT_TOKEN").unwrap();
let mut listener = api.listener(ListeningMethod::LongPoll(None));
let res = listener.listen(|u| {
if let Some(m) = u.message {
match m.msg {
MessageType::Text(t) => {
let mut values = t.splitn(2, " ");
if let Some(c) = values.next() {
match c {
"/huy" => { try!(api.send_message(m.chat.id(), format!("{} думает, {} хуй", m.from.first_name, values.next().unwrap_or("Костя")), None, None, None)); },
_ => {}
}
}
},
_ => {}
}
}
Ok(ListeningAction::Continue)
});
if let Err(e) = res {
println!("An error occured: {}", e);
}
}
made infinite loop
extern crate telegram_bot;
use telegram_bot::*;
fn main() {
let api = Api::from_env("TELEGRAM_BOT_TOKEN").unwrap();
let mut listener = api.listener(ListeningMethod::LongPoll(None));
loop {
let res = listener.listen(|u| {
if let Some(m) = u.message {
match m.msg {
MessageType::Text(t) => {
let mut values = t.splitn(2, " ");
if let Some(c) = values.next() {
match c {
"/huy" => { try!(api.send_message(m.chat.id(), format!("{} думает, {} хуй", m.from.first_name, values.next().unwrap_or("Костя")), None, None, None)); },
_ => {}
}
}
},
_ => {}
}
}
Ok(ListeningAction::Continue)
});
if let Err(e) = res {
println!("An error occured: {}", e);
}
}
}
|
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate netlion;
use std::thread;
use std::sync::{Arc, Mutex};
use conrod::{Labelable, Positionable, Sizeable, Theme, Widget, Canvas, Text, TextBox, DropDownList, Button};
use conrod::color::{Color, Colorable};
use piston_window::{EventLoop, Glyphs, PistonWindow, UpdateEvent, WindowSettings};
use netlion::*;
/// Conrod is backend agnostic. Here, we define the `piston_window` backend to use for our `Ui`.
type Backend = (<piston_window::G2d<'static> as conrod::Graphics>::Texture, Glyphs);
type Ui = conrod::Ui<Backend>;
type UiCell<'a> = conrod::UiCell<'a, Backend>;
fn main() {
// Construct the window.
let mut window: PistonWindow = WindowSettings::new("netlion", [800, 600])
.exit_on_esc(true).build().unwrap();
// construct our `Ui`.
let mut ui = {
let assets = find_folder::Search::KidsThenParents(3, 5)
.for_folder("conrod").unwrap();
let font_path = assets.join("assets/fonts/NotoSans/NotoSans-Regular.ttf");
let theme = Theme::default();
let glyph_cache = Glyphs::new(&font_path, window.factory.clone());
Ui::new(glyph_cache.unwrap(), theme)
};
let port = &mut String::from("8080");
let mut net_mode = String::from("tcp");
let options = &mut vec![String::from("udp"), String::from("tcp")];
let mut sel_option = Some(1);
let text = Arc::new(Mutex::new(String::from("Welcome to netlion:\n")));
window.set_ups(60);
// Poll events from the window.
while let Some(event) = window.next() {
ui.handle_event(&event);
event.update(|_| ui.set_widgets(|mut ui| {
let ui = &mut ui;
// Generate the ID for the Button COUNTER.
widget_ids!(CANVAS, TEXT_BOX, START, PROTO_LIST, RESULT);
// Create a background canvas upon which we'll place the button.
Canvas::new().pad(40.0).set(CANVAS, ui);
TextBox::new(port)
.top_left_of(CANVAS)
.w_h(200.0, 40.0)
.react(|s: &mut String|{println!("react: {}", s)})
.set(TEXT_BOX, ui);
// Draw the button and increment `count` if pressed.
DropDownList::new(options, &mut sel_option)
.right_from(TEXT_BOX, 10.0)
.w_h(150.0, 40.0)
.react(|selected_idx: &mut Option<usize>, new_idx: usize, s: &str| {
*selected_idx = Some(new_idx);
net_mode = String::from(s);
})
.set(PROTO_LIST, ui);
Button::new()
.right_from(PROTO_LIST, 10.0)
.w_h(80.0, 40.0)
.label(&String::from("Start"))
.react(|| {
let text = text.clone();
start(port, text);
})
.set(START, ui);
Text::new(text.lock().unwrap().as_str())
// .below(TEXT_BOX)
.top_left_of(TEXT_BOX)
.color(Color::Rgba(0.5, 0.5, 0.5, 1.0))
.align_text_left()
.line_spacing(10.0)
.set(RESULT, ui);
}));
window.draw_2d(&event, |c, g| ui.draw_if_changed(c, g));
}
}
fn start(port: &String, text: Arc<Mutex<String>>) {
let addr = String::from("0.0.0.0:") + port;
let text = text.clone();
println!("Starting listener on port {}", port);
thread::spawn(move || listen_tcp(addr.as_str(), text));
}
Fix alignment of the text area
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate netlion;
use std::thread;
use std::sync::{Arc, Mutex};
use conrod::{Labelable, Positionable, Sizeable, Theme, Widget, Canvas, Text, TextBox, DropDownList, Button};
use conrod::color::{Color, Colorable};
use piston_window::{EventLoop, Glyphs, PistonWindow, UpdateEvent, WindowSettings};
use netlion::*;
/// Conrod is backend agnostic. Here, we define the `piston_window` backend to use for our `Ui`.
type Backend = (<piston_window::G2d<'static> as conrod::Graphics>::Texture, Glyphs);
type Ui = conrod::Ui<Backend>;
type UiCell<'a> = conrod::UiCell<'a, Backend>;
fn main() {
// Construct the window.
let mut window: PistonWindow = WindowSettings::new("netlion", [800, 600])
.exit_on_esc(true).build().unwrap();
// construct our `Ui`.
let mut ui = {
let assets = find_folder::Search::KidsThenParents(3, 5)
.for_folder("conrod").unwrap();
let font_path = assets.join("assets/fonts/NotoSans/NotoSans-Regular.ttf");
let theme = Theme::default();
let glyph_cache = Glyphs::new(&font_path, window.factory.clone());
Ui::new(glyph_cache.unwrap(), theme)
};
let port = &mut String::from("8080");
let mut net_mode = String::from("tcp");
let options = &mut vec![String::from("udp"), String::from("tcp")];
let mut sel_option = Some(1);
let text = Arc::new(Mutex::new(String::from("Welcome to netlion:\n")));
window.set_ups(60);
// Poll events from the window.
while let Some(event) = window.next() {
ui.handle_event(&event);
event.update(|_| ui.set_widgets(|mut ui| {
let ui = &mut ui;
// Generate the ID for the Button COUNTER.
widget_ids!(CANVAS, TEXT_BOX, START, PROTO_LIST, RESULT);
// Create a background canvas upon which we'll place the button.
Canvas::new().pad(40.0).set(CANVAS, ui);
TextBox::new(port)
.top_left_of(CANVAS)
.w_h(200.0, 40.0)
.react(|s: &mut String|{println!("react: {}", s)})
.set(TEXT_BOX, ui);
// Draw the button and increment `count` if pressed.
DropDownList::new(options, &mut sel_option)
.right_from(TEXT_BOX, 10.0)
.w_h(150.0, 40.0)
.react(|selected_idx: &mut Option<usize>, new_idx: usize, s: &str| {
*selected_idx = Some(new_idx);
net_mode = String::from(s);
})
.set(PROTO_LIST, ui);
Button::new()
.right_from(PROTO_LIST, 10.0)
.w_h(80.0, 40.0)
.label(&String::from("Start"))
.react(|| {
let text = text.clone();
start(port, text);
})
.set(START, ui);
Text::new(text.lock().unwrap().as_str())
.down_from(TEXT_BOX, 10.0)
.color(Color::Rgba(0.5, 0.5, 0.5, 1.0))
.align_text_left()
.line_spacing(10.0)
.set(RESULT, ui);
}));
window.draw_2d(&event, |c, g| ui.draw_if_changed(c, g));
}
}
fn start(port: &String, text: Arc<Mutex<String>>) {
let addr = String::from("0.0.0.0:") + port;
let text = text.clone();
println!("Starting listener on port {}", port);
thread::spawn(move || listen_tcp(addr.as_str(), text));
}
|
use std::fs::File;
use std::io::{Read, Stdin, Result};
#[derive(Debug, Clone, PartialEq)]
enum Instruction {
MoveLeft(usize),
MoveRight(usize),
Inc(u8),
Dec(u8),
Input,
Output,
LoopEntry(usize),
LoopExit(usize),
}
use Instruction::*;
#[derive(Debug)]
struct Program {
instructions: Vec<Instruction>
}
impl Program {
pub fn new(text: String) -> Program
{
// "lex" token stream - aka skip whitespace
let token = text.chars().filter(move |c| {
*c == '>' || *c == '<' || *c == '+' || *c == '-' ||
*c == '.' || *c == ',' || *c == '[' || *c == ']'
}).collect::<Vec<_>>();
Program {
instructions : token.iter().enumerate().map(|(idx, &c)| {
match c {
'<' => MoveLeft(1),
'>' => MoveRight(1),
'+' => Inc(1),
'-' => Dec(1),
'.' => Output,
',' => Input,
'[' => {
let mut bracket_nesting = 1;
let mut pc = idx + 1;
while (bracket_nesting > 0) && (pc < token.len()) {
match token[pc] {
'[' => bracket_nesting += 1,
']' => bracket_nesting -= 1,
_ => (),
};
pc += 1;
}
if 0 == bracket_nesting {
pc -= 1;
}
else {
panic!("unmachted '[' at pc={:}", idx);
}
LoopEntry(pc)
},
']' => {
let mut bracket_nesting = 1;
let mut pc = idx;
while (bracket_nesting > 0) && (pc > 0) {
pc -= 1;
match token[pc] {
'[' => bracket_nesting -= 1,
']' => bracket_nesting += 1,
_ => (),
};
}
if 0 != bracket_nesting {
panic!("unmachted ']' at pc={:}", idx);
}
LoopExit(pc)
},
c => panic!("Unknown instruction {:?} at pc={:}", c, idx),
}
}).collect::<Vec<_>>()
}
}
pub fn interp(&self) {
let mut memory = vec![0u8; 30000];
let mut pc:usize = 0;
let mut dataptr:usize = 0;
let mut stdin = std::io::stdin();
fn get_char(stdin: &mut Stdin) -> u8 {
let mut buf = [0u8; 1];
match stdin.read(&mut buf) {
Err(_) => panic!("Cannot read from stdin"),
Ok(_) => buf[0]
}
}
while pc < self.instructions.len() {
match self.instructions[pc] {
MoveLeft(offset) => dataptr -= offset,
MoveRight(offset) => dataptr += offset,
Inc(increment) => memory[dataptr] = memory[dataptr].wrapping_add(increment),
Dec(decrement) => memory[dataptr] = memory[dataptr].wrapping_sub(decrement),
Output => print!("{:}", memory[dataptr] as char),
Input => memory[dataptr] = get_char(&mut stdin),
LoopEntry(target) => if 0 == memory[dataptr] {
pc = target;
},
LoopExit(target) => if 0 != memory[dataptr] {
pc = target;
},
// ref c => panic!("Unknown instruction {:?} at pc={:}", c, pc),
}
pc += 1;
}
}
}
fn load_program(fname: String) -> Result<Program> {
let mut file = File::open(fname)?;
let mut contents: Vec<u8> = Vec::new();
file.read_to_end(&mut contents)?;
let text = String::from_utf8(contents).unwrap();
Ok(Program::new(text))
}
fn main() {
match load_program("examples/mandelbrot.bf".to_string()) {
Ok(ref mut p) => {
//println!("{:?}", p);
p.interp();
},
Err(err) => panic!("Cannot read file because {:?}", err),
}
}
simplified parser
use std::fs::File;
use std::io::{Read, Stdin, Result};
#[derive(Debug, Clone, PartialEq)]
enum Instruction {
MoveLeft(usize),
MoveRight(usize),
Inc(u8),
Dec(u8),
Input,
Output,
LoopEntry(usize),
LoopExit(usize),
}
use Instruction::*;
#[derive(Debug)]
struct Program {
instructions: Vec<Instruction>
}
impl Program {
pub fn new(text: String) -> Program {
// open brackets while parsing
let mut bracket_stack = Vec::new();
// idx to patch with index of LoopEntry to patch in
let mut relocs = Vec::new();
// "lex" token stream - aka skip whitespace
let token = text.chars().filter(move |c| {
*c == '>' || *c == '<' || *c == '+' || *c == '-' ||
*c == '.' || *c == ',' || *c == '[' || *c == ']'
});
let mut instructions = token.enumerate().map(|(idx, c)| {
match c {
'<' => MoveLeft(1),
'>' => MoveRight(1),
'+' => Inc(1),
'-' => Dec(1),
'.' => Output,
',' => Input,
'[' => {
bracket_stack.push(idx);
// value must be patched later
LoopEntry(std::usize::MAX)
},
']' => {
if let Some(loop_entry) = bracket_stack.pop() {
relocs.push((loop_entry, idx));
LoopExit(loop_entry)
}
else {
panic!("Unbalanced {:?} at pc={:}", c, idx);
}
},
c => panic!("Unknown instruction {:?} at pc={:}", c, idx),
}
}).collect::<Vec<_>>();
if let Some(unbalanced_idx) = bracket_stack.pop() {
panic!("Unbalanced {:?} at pc={:}", '[', unbalanced_idx);
}
for (idx, value) in relocs {
if idx >= instructions.len() || LoopEntry(std::usize::MAX) != instructions[idx] {
panic!("Unexpected instruction {:?} at pc={:} for reloc", instructions[idx], idx);
}
else {
instructions[idx] = LoopEntry(value);
}
}
Program {
instructions : instructions
}
}
pub fn interp(&self) {
let mut memory = vec![0u8; 30000];
let mut pc:usize = 0;
let mut dataptr:usize = 0;
let mut stdin = std::io::stdin();
fn get_char(stdin: &mut Stdin) -> u8 {
let mut buf = [0u8; 1];
match stdin.read(&mut buf) {
Err(_) => panic!("Cannot read from stdin"),
Ok(_) => buf[0]
}
}
while pc < self.instructions.len() {
match self.instructions[pc] {
MoveLeft(offset) => dataptr -= offset,
MoveRight(offset) => dataptr += offset,
Inc(increment) => memory[dataptr] = memory[dataptr].wrapping_add(increment),
Dec(decrement) => memory[dataptr] = memory[dataptr].wrapping_sub(decrement),
Output => print!("{:}", memory[dataptr] as char),
Input => memory[dataptr] = get_char(&mut stdin),
LoopEntry(target) => if 0 == memory[dataptr] {
pc = target;
},
LoopExit(target) => if 0 != memory[dataptr] {
pc = target;
},
// ref c => panic!("Unknown instruction {:?} at pc={:}", c, pc),
}
pc += 1;
}
}
}
fn load_program(fname: String) -> Result<Program> {
let mut file = File::open(fname)?;
let mut contents: Vec<u8> = Vec::new();
file.read_to_end(&mut contents)?;
let text = String::from_utf8(contents).unwrap();
Ok(Program::new(text))
}
fn main() {
match load_program("examples/mandelbrot.bf".to_string()) {
Ok(ref mut p) => {
//println!("{:?}", p);
p.interp();
},
Err(err) => panic!("Cannot read file because {:?}", err),
}
}
|
extern crate fuse;
extern crate libc;
extern crate time;
extern crate hyper;
extern crate rustc_serialize;
extern crate clap;
use std::collections::BTreeMap;
use std::path::Path;
use std::env;
use std::io::prelude::Read;
use libc::{ENOENT, ENOSYS};
use time::Timespec;
use fuse::{FileAttr, FileType, Filesystem, Request, ReplyAttr, ReplyData, ReplyEntry, ReplyDirectory, ReplyOpen};
use hyper::client::{Client, Response};
use rustc_serialize::json::Json;
use clap::{App, Arg};
struct TwitchFileSystem {
attrs: BTreeMap<u64, FileAttr>,
inodes: BTreeMap<String, u64>
}
impl TwitchFileSystem {
fn new() -> TwitchFileSystem {
let mut attrs = BTreeMap::new();
let mut inodes = BTreeMap::new();
let ts = time::now().to_timespec();
let attr = FileAttr {
ino: 1,
size: 0,
blocks: 0,
atime: ts,
mtime: ts,
ctime: ts,
crtime: ts,
kind: FileType::Directory,
perm: 0o755,
nlink: 0,
uid: 0,
gid: 0,
rdev: 0,
flags: 0
};
attrs.insert(1, attr);
inodes.insert("/".to_owned(), 1);
let mut body = String::new();
Client::new()
.get("https://api.twitch.tv/kraken/games/top")
.send()
.expect("Couldn't load twitch")
.read_to_string(&mut body);
match Json::from_str(&body) {
Ok(data) => {
let games = data.find("top").unwrap().as_array().unwrap();
for (i, game) in games.iter().enumerate() {
let attr = FileAttr {
ino: i as u64 + 2,
size: 0,
blocks: 0,
atime: ts,
mtime: ts,
ctime: ts,
crtime: ts,
kind: FileType::Directory,
perm: 0o644,
nlink: 0,
uid: 0,
gid: 0,
rdev: 0,
flags: 0
};
let name = game
.find_path(&["game", "name"])
.unwrap()
.as_string()
.unwrap();
attrs.insert(attr.ino, attr);
inodes.insert(name.to_owned(), attr.ino);
}
},
Err(_) => println!("Twitch returned invalid json")
}
TwitchFileSystem {attrs: attrs, inodes: inodes}
}
}
impl Filesystem for TwitchFileSystem {
fn getattr(&mut self, _req: &Request, ino: u64, reply: ReplyAttr) {
match self.attrs.get(&ino) {
Some(attr) => {
let ttl = Timespec::new(1, 0);
reply.attr(&ttl, attr);
},
None => reply.error(ENOENT)
}
}
fn lookup(&mut self, _req: &Request, parent: u64, name: &Path, reply: ReplyEntry) {
let inode = match self.inodes.get(name.to_str().unwrap()) {
Some(inode) => inode,
None => {
reply.error(ENOENT);
return;
}
};
match self.attrs.get(inode) {
Some(attr) => {
let ttl = Timespec::new(1, 0);
reply.entry(&ttl, attr, 0);
},
None => reply.error(ENOENT),
};
}
fn readdir(&mut self, _req: &Request, ino: u64, fh: u64, offset: u64, mut reply: ReplyDirectory) {
if offset == 0 {
for (game, &inode) in &self.inodes {
if inode == 1 { continue; }
let offset = inode;
reply.add(inode, offset, FileType::RegularFile, &Path::new(game));
}
reply.add(1, 0, FileType::Directory, &Path::new("."));
reply.add(1, 1, FileType::Directory, &Path::new(".."));
} else if offset == 1 {
reply.add(1, 0, FileType::Directory, &Path::new("u wot m8"));
}
reply.ok();
}
fn read(&mut self, _req: &Request, ino: u64, fh: u64, offset: u64, size: u32, reply: ReplyData) {
reply.data("test".as_bytes());
}
}
fn main() {
let matches = App::new("twitch-fs")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or("unknown version"))
.arg(Arg::with_name("mountpoint")
.index(1)
.required(true))
.get_matches();
// unwrap() is safe here because the argument is set as required
let mountpoint = matches.value_of_os("mountpoint").unwrap();
let fs = TwitchFileSystem::new();
fuse::mount(fs , &mountpoint, &[])
}
Make sure specified mountpoint is actually a directory
extern crate fuse;
extern crate libc;
extern crate time;
extern crate hyper;
extern crate rustc_serialize;
extern crate clap;
use std::collections::BTreeMap;
use std::path::Path;
use std::env;
use std::io::prelude::Read;
use libc::{ENOENT, ENOSYS};
use time::Timespec;
use fuse::{FileAttr, FileType, Filesystem, Request, ReplyAttr, ReplyData, ReplyEntry, ReplyDirectory, ReplyOpen};
use hyper::client::{Client, Response};
use rustc_serialize::json::Json;
use clap::{App, Arg};
struct TwitchFileSystem {
attrs: BTreeMap<u64, FileAttr>,
inodes: BTreeMap<String, u64>
}
impl TwitchFileSystem {
fn new() -> TwitchFileSystem {
let mut attrs = BTreeMap::new();
let mut inodes = BTreeMap::new();
let ts = time::now().to_timespec();
let attr = FileAttr {
ino: 1,
size: 0,
blocks: 0,
atime: ts,
mtime: ts,
ctime: ts,
crtime: ts,
kind: FileType::Directory,
perm: 0o755,
nlink: 0,
uid: 0,
gid: 0,
rdev: 0,
flags: 0
};
attrs.insert(1, attr);
inodes.insert("/".to_owned(), 1);
let mut body = String::new();
Client::new()
.get("https://api.twitch.tv/kraken/games/top")
.send()
.expect("Couldn't load twitch")
.read_to_string(&mut body);
match Json::from_str(&body) {
Ok(data) => {
let games = data.find("top").unwrap().as_array().unwrap();
for (i, game) in games.iter().enumerate() {
let attr = FileAttr {
ino: i as u64 + 2,
size: 0,
blocks: 0,
atime: ts,
mtime: ts,
ctime: ts,
crtime: ts,
kind: FileType::Directory,
perm: 0o644,
nlink: 0,
uid: 0,
gid: 0,
rdev: 0,
flags: 0
};
let name = game
.find_path(&["game", "name"])
.unwrap()
.as_string()
.unwrap();
attrs.insert(attr.ino, attr);
inodes.insert(name.to_owned(), attr.ino);
}
},
Err(_) => println!("Twitch returned invalid json")
}
TwitchFileSystem {attrs: attrs, inodes: inodes}
}
}
impl Filesystem for TwitchFileSystem {
fn getattr(&mut self, _req: &Request, ino: u64, reply: ReplyAttr) {
match self.attrs.get(&ino) {
Some(attr) => {
let ttl = Timespec::new(1, 0);
reply.attr(&ttl, attr);
},
None => reply.error(ENOENT)
}
}
fn lookup(&mut self, _req: &Request, parent: u64, name: &Path, reply: ReplyEntry) {
let inode = match self.inodes.get(name.to_str().unwrap()) {
Some(inode) => inode,
None => {
reply.error(ENOENT);
return;
}
};
match self.attrs.get(inode) {
Some(attr) => {
let ttl = Timespec::new(1, 0);
reply.entry(&ttl, attr, 0);
},
None => reply.error(ENOENT),
};
}
fn readdir(&mut self, _req: &Request, ino: u64, fh: u64, offset: u64, mut reply: ReplyDirectory) {
if offset == 0 {
for (game, &inode) in &self.inodes {
if inode == 1 { continue; }
let offset = inode;
reply.add(inode, offset, FileType::RegularFile, &Path::new(game));
}
reply.add(1, 0, FileType::Directory, &Path::new("."));
reply.add(1, 1, FileType::Directory, &Path::new(".."));
} else if offset == 1 {
reply.add(1, 0, FileType::Directory, &Path::new("u wot m8"));
}
reply.ok();
}
fn read(&mut self, _req: &Request, ino: u64, fh: u64, offset: u64, size: u32, reply: ReplyData) {
reply.data("test".as_bytes());
}
}
fn is_valid_dir(mountpoint: String) -> Result<(), String> {
match Path::new(&mountpoint).is_dir() {
true => Ok(()),
false => Err("Mountpoint must be a directory".to_string()),
}
}
fn main() {
let matches = App::new("twitch-fs")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or("unknown version"))
.arg(Arg::with_name("mountpoint")
.validator(is_valid_dir)
.index(1)
.required(true))
.get_matches();
// unwrap() is safe here because the argument is set as required
let mountpoint = matches.value_of_os("mountpoint").unwrap();
let fs = TwitchFileSystem::new();
fuse::mount(fs , &mountpoint, &[])
}
|
use std::fmt;
use std::str::FromStr;
#[derive(Debug, PartialEq)]
enum Atom {
Number(f64),
Symbol(String)
}
#[derive(Debug, PartialEq)]
enum Sexp {
Atom(Atom),
List(Vec<Sexp>)
}
#[derive(Debug, PartialEq)]
enum ParseAtomError {
IncorrectSymbolName
}
impl fmt::Display for Atom {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Atom::Number(n) => {
write!(f, "{}", n)
},
Atom::Symbol(ref name) => {
write!(f, "'{}", name)
}
}
}
}
impl FromStr for Atom {
type Err = ParseAtomError;
fn from_str(s: &str) -> Result<Atom, ParseAtomError> {
match s.parse::<f64>() {
Ok(f) => {
Ok(Atom::Number(f))
},
Err(..) => {
match s.chars().next() {
Some(c) if !c.is_numeric() => {
Ok(Atom::Symbol(s.to_string()))
},
_ => {
Err(ParseAtomError::IncorrectSymbolName)
}
}
}
}
}
}
fn tokenize(s: &str) -> Vec<String> {
s.replace("("," ( ")
.replace(")", " ) ")
.replace("\n", " ")
.split(' ')
.filter(|s| { !s.is_empty() })
.map(|s| { s.to_string() })
.collect()
}
#[derive(Debug, PartialEq)]
enum ReadState {
StartRead,
OpenList,
AtomRead(Atom),
CloseList,
EndRead,
}
#[derive(Debug, PartialEq)]
enum ReadError {
}
fn read(tokens: Vec<String>) -> Result<Vec<Sexp>, ReadError> {
let mut state = ReadState::StartRead;
let mut iter = tokens.iter();
let mut result = vec![];
loop {
match state {
ReadState::StartRead => {
match iter.next() {
Some(s) if *s == "(" => {
state = ReadState::OpenList;
result.push(Sexp::List(vec![]));
},
None => {
state = ReadState::EndRead;
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::OpenList => {
match iter.next() {
Some(s) if *s == ")" => {
state = ReadState::CloseList;
},
Some(s) => {
state = ReadState::AtomRead(s.parse::<Atom>().ok().unwrap());
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::CloseList => {
match iter.next() {
Some(s) if *s == "(" => {
state = ReadState::OpenList;
result.push(Sexp::List(vec![]));
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::AtomRead(atom) => {
if let Some(Sexp::List(mut current_list)) = result.pop() {
current_list.push(Sexp::Atom(atom));
result.push(Sexp::List(current_list));
}
match iter.next() {
Some(s) if *s == ")" => {
state = ReadState::CloseList;
},
Some(s) => {
state = ReadState::AtomRead(s.parse::<Atom>().ok().unwrap());
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::EndRead => {
return Ok(result)
}
}
}
}
#[cfg_attr(test, allow(dead_code))]
fn main() {
}
#[cfg(test)]
mod tests {
use super::Atom::{self, Number, Symbol};
use super::Sexp;
use super::ParseAtomError::IncorrectSymbolName;
use super::{tokenize, read};
#[test]
fn test_parse_integer() {
assert_eq!(Number(64f64), "64".parse::<Atom>().ok().unwrap())
}
#[test]
fn test_parse_float() {
assert_eq!(Number(64.5), "64.5".parse::<Atom>().ok().unwrap())
}
#[test]
fn test_parse_symbol() {
assert_eq!(Symbol("name".to_string()), "name".parse::<Atom>().ok().unwrap())
}
#[test]
fn test_parse_incorrect_symbol_starting_with_digit() {
assert_eq!(IncorrectSymbolName, "6name".parse::<Atom>().err().unwrap())
}
#[test]
fn test_tokenize_dense_expression() {
let expected_result = ["(", "def", "a", "1", ")"]
.iter()
.map(|s| { s.to_string() })
.collect();
assert_eq!(expected_result, tokenize("(def a 1)"))
}
#[test]
fn test_tokenize_sparse_expression() {
let expected_result = ["(", "def", "a", "1", ")"]
.iter()
.map(|s| { s.to_string() })
.collect();
assert_eq!(expected_result, tokenize(" ( \n def a\n1) \n"))
}
#[test]
fn test_read_empty() {
let expected_result = vec![];
assert_eq!(expected_result, read(tokenize("")).ok().unwrap())
}
#[test]
fn test_read_single_expression() {
let expected_result = vec![Sexp::List(vec![Sexp::Atom(Symbol("def".to_string())),
Sexp::Atom(Symbol("a".to_string())),
Sexp::Atom(Number(1f64))])];
assert_eq!(expected_result, read(tokenize("(def a 1)")).ok().unwrap())
}
#[test]
fn test_read_multiple_expression() {
let expected_result = vec![Sexp::List(vec![Sexp::Atom(Symbol("def".to_string())),
Sexp::Atom(Symbol("a".to_string())),
Sexp::Atom(Number(1f64))]),
Sexp::List(vec![Sexp::Atom(Symbol("def".to_string())),
Sexp::Atom(Symbol("b".to_string())),
Sexp::Atom(Number(2f64))]),
Sexp::List(vec![Sexp::Atom(Symbol("+".to_string())),
Sexp::Atom(Symbol("a".to_string())),
Sexp::Atom(Symbol("b".to_string()))])];
assert_eq!(expected_result, read(tokenize("(def a 1)(def b 2)(+ a b)")).ok().unwrap())
}
}
Remove Display impl for Atom
use std::str::FromStr;
#[derive(Debug, PartialEq)]
enum Atom {
Number(f64),
Symbol(String)
}
#[derive(Debug, PartialEq)]
enum Sexp {
Atom(Atom),
List(Vec<Sexp>)
}
#[derive(Debug, PartialEq)]
enum ParseAtomError {
IncorrectSymbolName
}
impl FromStr for Atom {
type Err = ParseAtomError;
fn from_str(s: &str) -> Result<Atom, ParseAtomError> {
match s.parse::<f64>() {
Ok(f) => {
Ok(Atom::Number(f))
},
Err(..) => {
match s.chars().next() {
Some(c) if !c.is_numeric() => {
Ok(Atom::Symbol(s.to_string()))
},
_ => {
Err(ParseAtomError::IncorrectSymbolName)
}
}
}
}
}
}
fn tokenize(s: &str) -> Vec<String> {
s.replace("("," ( ")
.replace(")", " ) ")
.replace("\n", " ")
.split(' ')
.filter(|s| { !s.is_empty() })
.map(|s| { s.to_string() })
.collect()
}
#[derive(Debug, PartialEq)]
enum ReadState {
StartRead,
OpenList,
AtomRead(Atom),
CloseList,
EndRead,
}
#[derive(Debug, PartialEq)]
enum ReadError {
}
fn read(tokens: Vec<String>) -> Result<Vec<Sexp>, ReadError> {
let mut state = ReadState::StartRead;
let mut iter = tokens.iter();
let mut result = vec![];
loop {
match state {
ReadState::StartRead => {
match iter.next() {
Some(s) if *s == "(" => {
state = ReadState::OpenList;
result.push(Sexp::List(vec![]));
},
None => {
state = ReadState::EndRead;
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::OpenList => {
match iter.next() {
Some(s) if *s == ")" => {
state = ReadState::CloseList;
},
Some(s) => {
state = ReadState::AtomRead(s.parse::<Atom>().ok().unwrap());
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::CloseList => {
match iter.next() {
Some(s) if *s == "(" => {
state = ReadState::OpenList;
result.push(Sexp::List(vec![]));
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::AtomRead(atom) => {
if let Some(Sexp::List(mut current_list)) = result.pop() {
current_list.push(Sexp::Atom(atom));
result.push(Sexp::List(current_list));
}
match iter.next() {
Some(s) if *s == ")" => {
state = ReadState::CloseList;
},
Some(s) => {
state = ReadState::AtomRead(s.parse::<Atom>().ok().unwrap());
},
_ => {
state = ReadState::EndRead;
}
}
},
ReadState::EndRead => {
return Ok(result)
}
}
}
}
#[cfg_attr(test, allow(dead_code))]
fn main() {
}
#[cfg(test)]
mod tests {
use super::Atom::{self, Number, Symbol};
use super::Sexp;
use super::ParseAtomError::IncorrectSymbolName;
use super::{tokenize, read};
#[test]
fn test_parse_integer() {
assert_eq!(Number(64f64), "64".parse::<Atom>().ok().unwrap())
}
#[test]
fn test_parse_float() {
assert_eq!(Number(64.5), "64.5".parse::<Atom>().ok().unwrap())
}
#[test]
fn test_parse_symbol() {
assert_eq!(Symbol("name".to_string()), "name".parse::<Atom>().ok().unwrap())
}
#[test]
fn test_parse_incorrect_symbol_starting_with_digit() {
assert_eq!(IncorrectSymbolName, "6name".parse::<Atom>().err().unwrap())
}
#[test]
fn test_tokenize_dense_expression() {
let expected_result = ["(", "def", "a", "1", ")"]
.iter()
.map(|s| { s.to_string() })
.collect();
assert_eq!(expected_result, tokenize("(def a 1)"))
}
#[test]
fn test_tokenize_sparse_expression() {
let expected_result = ["(", "def", "a", "1", ")"]
.iter()
.map(|s| { s.to_string() })
.collect();
assert_eq!(expected_result, tokenize(" ( \n def a\n1) \n"))
}
#[test]
fn test_read_empty() {
let expected_result = vec![];
assert_eq!(expected_result, read(tokenize("")).ok().unwrap())
}
#[test]
fn test_read_single_expression() {
let expected_result = vec![Sexp::List(vec![Sexp::Atom(Symbol("def".to_string())),
Sexp::Atom(Symbol("a".to_string())),
Sexp::Atom(Number(1f64))])];
assert_eq!(expected_result, read(tokenize("(def a 1)")).ok().unwrap())
}
#[test]
fn test_read_multiple_expression() {
let expected_result = vec![Sexp::List(vec![Sexp::Atom(Symbol("def".to_string())),
Sexp::Atom(Symbol("a".to_string())),
Sexp::Atom(Number(1f64))]),
Sexp::List(vec![Sexp::Atom(Symbol("def".to_string())),
Sexp::Atom(Symbol("b".to_string())),
Sexp::Atom(Number(2f64))]),
Sexp::List(vec![Sexp::Atom(Symbol("+".to_string())),
Sexp::Atom(Symbol("a".to_string())),
Sexp::Atom(Symbol("b".to_string()))])];
assert_eq!(expected_result, read(tokenize("(def a 1)(def b 2)(+ a b)")).ok().unwrap())
}
}
|
extern crate mio;
use mio::*;
use mio::net::{SockAddr};
use mio::net::tcp::{TcpSocket, TcpAcceptor};
use mio::buf::{ByteBuf, MutByteBuf, SliceBuf};
fn main() {
const SERVER: Token = Token(1);
let addr = SockAddr::parse("127.0.0.1:13265").unwrap();
let server = TcpSocket::v4().unwrap()
.bind(&addr).unwrap()
.listen(256).unwrap();
let mut event_loop = EventLoop::<(), ()>::new().unwrap();
event_loop.register(&server, SERVER).unwrap();
struct TcpHandler {
conn: Option<TcpSocket>,
sock: TcpAcceptor
};
impl TcpHandler {
fn accept(&mut self, event_loop: &mut EventLoop<(), ()>) {
let conn = self.sock.accept();
let sock = conn.unwrap().unwrap();
let tok = Token(2);
self.conn = Some(sock);
match self.conn {
Some(ref c) => { event_loop.register_opt(c, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot()); }
None => { }
}
}
}
impl Handler<(), ()> for TcpHandler {
fn readable(&mut self, event_loop: &mut EventLoop<(), ()>, token: Token, _: ReadHint) {
match token {
SERVER => {
self.accept(event_loop);
}
tok => {
println!("tok: {}", tok.as_usize());
let mut read_buf = ByteBuf::mut_with_capacity(2048);
let mut interest = Interest::readable();
match self.conn {
Some(ref c) => {
match c.read(&mut read_buf) {
Ok(NonBlock::WouldBlock) => {
panic!("We just got readable, but were unable to read from the socket?");
}
Ok(NonBlock::Ready(r)) => {
let mut buf = read_buf.flip();
let mut sl = [0; 2048];
buf.read_slice(&mut sl);
print!("{}", String::from_utf8(sl.to_vec()).unwrap());
// self.interest.remove(Interest::readable());
// self.interest.insert(Interest::writable());
}
Err(e) => {
event_loop.shutdown();
// println!("not implemented; client err={:?}", e);
// interest = Interest::hup();
// self.interest.remove(Interest::readable());
}
}
event_loop.reregister(c, tok, interest, PollOpt::edge() | PollOpt::oneshot());
},
None => { }
}
}
}
}
}
let mut tcp_server = TcpHandler {
conn: None,
sock: server
};
let _ = event_loop.run(&mut tcp_server);
}
More simplification, adding comments.
/*
* Simple tcp listener, similar to `nc -l 13265`.
* Will listen for TCP connections on port 13265, accept
*/
extern crate mio;
use mio::*;
use mio::net::{SockAddr};
use mio::net::tcp::{TcpSocket, TcpAcceptor};
use mio::buf::{ByteBuf};
fn main() {
const SERVER: Token = Token(1);
let addr = SockAddr::parse("127.0.0.1:13265").unwrap();
// Listen on `addr`, with a connection backlog of just 1, since we only want to handle one
// connection. See `man listen`.
let server = TcpSocket::v4().unwrap()
.bind(&addr).unwrap()
.listen(1).unwrap();
let mut event_loop = EventLoop::<(), ()>::new().unwrap();
event_loop.register(&server, SERVER).unwrap();
struct TcpHandler {
conn: Option<TcpSocket>, // Will store our single socket connection, once established.
sock: TcpAcceptor // Will store our server.
};
impl TcpHandler {
fn accept(&mut self, event_loop: &mut EventLoop<(), ()>) {
match self.conn {
Some(_) => {
// If we already have a connection, do nothing.
return;
},
None => {}
}
let conn = self.sock.accept();
let sock = conn.unwrap().unwrap();
let tok = Token(2);
// The following code will move `sock` into our `tcp_server` struct,
// so we won't be able to access it directly anymore.
self.conn = Some(sock);
match self.conn {
Some(ref c) => {
// Register an interest read events with the event_loop. the `edge` option
// stipulates that we will only be notified when a read event happens, rather
// than as long as reading is possible.
// See http://en.wikipedia.org/wiki/Epoll
let _ = event_loop.register_opt(c, tok, Interest::readable(), PollOpt::edge());
}
None => { }
}
}
}
impl Handler<(), ()> for TcpHandler {
fn readable(&mut self, event_loop: &mut EventLoop<(), ()>, token: Token, _: ReadHint) {
match token {
SERVER => {
// Call `accept` on our `tcp_handler`.
self.accept(event_loop);
}
Token(2) => {
// Artificial limitation -- we'll only read up to 2048 bytes at a time.
let mut read_buf = ByteBuf::mut_with_capacity(2048);
match self.conn {
Some(ref c) => {
match c.read(&mut read_buf) {
Ok(NonBlock::WouldBlock) => {
panic!("Read operation would block, bailing cuz this shouldn't happen.");
}
Ok(NonBlock::Ready(_)) => {
// `_` would be the number of bytes read.
// `flip` will return a `ByteBuf` on which we can call
// `read_slice` to get the data available to be read.
// See http://carllerche.github.io/bytes/bytes/struct.ByteBuf.html
let mut buf = read_buf.flip();
let mut sl = [0; 2048];
buf.read_slice(&mut sl);
// Assuming what was written was encoded as UTF8, print what
// was read to STDOUT.
print!("{}", String::from_utf8(sl.to_vec()).unwrap());
}
Err(e) => {
if e.is_eof() {
println!("Client closed connection, shutting down.");
event_loop.shutdown();
}
else {
panic!(e);
}
}
}
},
None => { }
}
},
_ => { panic!("received token we can't handle".to_string()) }
}
}
}
let mut tcp_server = TcpHandler {
conn: None,
sock: server
};
let _ = event_loop.run(&mut tcp_server);
}
|
#![feature(net)]
#![feature(std_misc)]
use std::thread;
use std::sync::mpsc::{channel, Sender, Receiver};
use std::net::UdpSocket;
use std::net::IpAddr;
use std::old_io::timer;
use std::time::duration::Duration;
// ================================================================================================
const CRTP_VERSION : &'static str = env!("CARGO_PKG_VERSION");
struct SSRC(u32);
struct RtpTimestamp(u32);
struct NtpTimestamp(u64);
struct SenderInfo {
ntp_ts : u64, // FIXME: should be NtpTimestamp,
rtp_ts : u32, // FIXME: should be RtpTimestamp,
pckt_count : u32,
byte_count : u32
}
struct ReportBlock {
ssrc : SSRC,
fract_lost : u8,
cumul_lost : u32,
ext_seq : u32,
jitter : u32,
lsr : u32,
dlsr : u32
}
struct SdesChunk {
ssrc : SSRC,
cname : Option<String>,
name : Option<String>,
email : Option<String>,
phone : Option<String>,
loc : Option<String>,
tool : Option<String>,
note : Option<String>
}
enum RtcpPacket {
SR(SSRC, Vec<ReportBlock>, SenderInfo),
RR(SSRC, Vec<ReportBlock>),
SDES(Vec<SdesChunk>),
BYE(Vec<SSRC>, String),
}
struct CompoundRtcpPacket {
packets : Vec<RtcpPacket>
}
struct RtpPacket;
// ================================================================================================
struct RtpSessionParameters {
rtp_tx : Sender<RtpPacket>,
rtp_rx : Receiver<RtpPacket>,
rtcp_tx : Sender<CompoundRtcpPacket>,
rtcp_rx : Receiver<CompoundRtcpPacket>
}
struct RtpSessionStatistics {
pckt_count : u64
}
struct RtpSession {
parameters : RtpSessionParameters,
ssrc : u32
}
impl RtpSession {
pub fn new(params : RtpSessionParameters) -> RtpSession {
RtpSession {
parameters : params,
ssrc : 0 // FIXME
}
}
pub fn run(&mut self) -> RtpSessionStatistics {
let stats = RtpSessionStatistics{pckt_count : 0};
stats
}
}
// ================================================================================================
fn parse_rtp_packet(buf : &mut [u8], buflen : usize) -> Option<RtpPacket> {
println!("parse_rtp_packet");
None
}
struct RtpSocket {
local_addr : IpAddr,
local_port : u16
}
impl RtpSocket {
pub fn run(&self) -> (Sender<RtpPacket>, Receiver<RtpPacket>) {
let rx_socket = UdpSocket::bind(&(self.local_addr, self.local_port)).unwrap();
let tx_socket = rx_socket.try_clone().unwrap();
let (to_app, from_net) = channel::<RtpPacket>();
let (to_net, from_app) = channel::<RtpPacket>();
thread::spawn(move || {
// The receiving thread
loop {
let mut buf = [0; 1500];
let (buflen, sender) = rx_socket.recv_from(&mut buf).unwrap();
match parse_rtp_packet(&mut buf, buflen) {
Some(packet) => to_app.send(packet).unwrap(),
None => {
println!("Unable to parse packet")
}
}
}
});
thread::spawn(move || {
// The sending thread
let packet = from_app.recv().unwrap();
// FIXME: send the packet
});
(to_net, from_net)
}
}
// ================================================================================================
fn parse_sr(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_rr(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_sdes(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_bye(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_app(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_rtcp_packet(buf : &mut [u8], buflen : usize) -> Option<CompoundRtcpPacket> {
if buflen < 4 {
println!("parse_rtcp_packet: packet is too short to be RTCP");
return None;
}
// FIXME: create a compound packet object
let mut offset = 0;
while offset != buflen {
if offset + 3 >= buflen {
println!("parse_rtcp_packet: packet is too short");
return None;
}
let v = (buf[offset + 0] >> 6) & 0x03;
let p = ((buf[offset + 0] >> 5) & 0x01) == 1;
let rc = (buf[offset + 0] >> 0) & 0x1f;
let pt = buf[offset + 1];
let len = (((buf[offset + 2] as usize) << 8) & 0xff00) |
(((buf[offset + 3] as usize) << 0) & 0x0fff);
if offset + (4 * len) > buflen {
println!("parse_rtcp_packet: packet is too long");
return None;
}
if v != 2 {
println!("parse_rtcp_packet: version number mismatch (v={})", v);
return None;
}
let packet = &buf[offset..offset + (4 * (len + 1))];
let parsed_packet = match pt {
200 => parse_sr(p, rc, len, packet),
201 => parse_rr(p, rc, len, packet),
202 => parse_sdes(p, rc, len, packet),
203 => parse_bye(p, rc, len, packet),
204 => parse_app(p, rc, len, packet),
_ => {
println!("parse_rtcp_packet: unknown packet type (pt={})", pt);
return None;
}
};
// FIXME: append parsed_packet to the compound packet
offset += 4 + (4 * len);
}
None // FIXME: return the compound packet
}
struct RtcpSocket {
local_addr : IpAddr,
local_port : u16
}
impl RtcpSocket {
pub fn run(&self) -> (Sender<CompoundRtcpPacket>, Receiver<CompoundRtcpPacket>) {
let rx_socket = UdpSocket::bind(&(self.local_addr, self.local_port)).unwrap();
let tx_socket = rx_socket.try_clone().unwrap();
let (to_app, from_net) = channel::<CompoundRtcpPacket>();
let (to_net, from_app) = channel::<CompoundRtcpPacket>();
thread::spawn(move || {
// The receiving thread
loop {
let mut buf = [0; 1500];
let (buflen, sender) = rx_socket.recv_from(&mut buf).unwrap();
match parse_rtcp_packet(&mut buf, buflen) {
Some(packet) => to_app.send(packet).unwrap(),
None => {
println!("Unable to parse packet")
}
}
}
});
thread::spawn(move || {
// The sending thread
let packet = from_app.recv().unwrap();
// FIXME: send the packet
});
(to_net, from_net)
}
}
// ================================================================================================
fn main() {
println!("CRTP v{}", CRTP_VERSION);
let rtp_socket = RtpSocket{local_addr: IpAddr::new_v4(0,0,0,0), local_port : 3000};
let (rtp_tx, rtp_rx) = rtp_socket.run();
let rtcp_socket = RtcpSocket{local_addr: IpAddr::new_v4(0,0,0,0), local_port : 3001};
let (rtcp_tx, rtcp_rx) = rtcp_socket.run();
let session_parameters = RtpSessionParameters {
rtp_tx : rtp_tx,
rtp_rx : rtp_rx,
rtcp_tx : rtcp_tx,
rtcp_rx : rtcp_rx
};
let mut session = RtpSession::new(session_parameters);
let session_statistics = session.run();
timer::sleep(Duration::hours(1));
}
// ================================================================================================
// vim: set ts=2 sw=2 tw=0 et ai:
Implement parse_sr() and parse_rr(). Not yet tested.
#![feature(net)]
#![feature(std_misc)]
use std::thread;
use std::sync::mpsc::{channel, Sender, Receiver};
use std::net::UdpSocket;
use std::net::IpAddr;
use std::old_io::timer;
use std::time::duration::Duration;
// ================================================================================================
const CRTP_VERSION : &'static str = env!("CARGO_PKG_VERSION");
struct SSRC(u32);
struct RtpTimestamp(u32);
struct NtpTimestamp(u64);
struct SenderInfo {
ntp_ts : u64, // FIXME: should be NtpTimestamp,
rtp_ts : u32, // FIXME: should be RtpTimestamp,
pckt_count : u32,
byte_count : u32
}
struct ReportBlock {
ssrc : SSRC,
fract_lost : u8,
cumul_lost : u32,
ext_seq : u32,
jitter : u32,
lsr : u32,
dlsr : u32
}
struct SdesChunk {
ssrc : SSRC,
cname : Option<String>,
name : Option<String>,
email : Option<String>,
phone : Option<String>,
loc : Option<String>,
tool : Option<String>,
note : Option<String>
}
enum RtcpPacket {
SR(SSRC, Vec<ReportBlock>, SenderInfo),
RR(SSRC, Vec<ReportBlock>),
SDES(Vec<SdesChunk>),
BYE(Vec<SSRC>, String),
}
struct CompoundRtcpPacket {
packets : Vec<RtcpPacket>
}
struct RtpPacket;
// ================================================================================================
struct RtpSessionParameters {
rtp_tx : Sender<RtpPacket>,
rtp_rx : Receiver<RtpPacket>,
rtcp_tx : Sender<CompoundRtcpPacket>,
rtcp_rx : Receiver<CompoundRtcpPacket>
}
struct RtpSessionStatistics {
pckt_count : u64
}
struct RtpSession {
parameters : RtpSessionParameters,
ssrc : u32
}
impl RtpSession {
pub fn new(params : RtpSessionParameters) -> RtpSession {
RtpSession {
parameters : params,
ssrc : 0 // FIXME
}
}
pub fn run(&mut self) -> RtpSessionStatistics {
let stats = RtpSessionStatistics{pckt_count : 0};
stats
}
}
// ================================================================================================
fn parse_rtp_packet(buf : &mut [u8], buflen : usize) -> Option<RtpPacket> {
println!("parse_rtp_packet");
None
}
struct RtpSocket {
local_addr : IpAddr,
local_port : u16
}
impl RtpSocket {
pub fn run(&self) -> (Sender<RtpPacket>, Receiver<RtpPacket>) {
let rx_socket = UdpSocket::bind(&(self.local_addr, self.local_port)).unwrap();
let tx_socket = rx_socket.try_clone().unwrap();
let (to_app, from_net) = channel::<RtpPacket>();
let (to_net, from_app) = channel::<RtpPacket>();
thread::spawn(move || {
// The receiving thread
loop {
let mut buf = [0; 1500];
let (buflen, sender) = rx_socket.recv_from(&mut buf).unwrap();
match parse_rtp_packet(&mut buf, buflen) {
Some(packet) => to_app.send(packet).unwrap(),
None => {
println!("Unable to parse packet")
}
}
}
});
thread::spawn(move || {
// The sending thread
let packet = from_app.recv().unwrap();
// FIXME: send the packet
});
(to_net, from_net)
}
}
// ================================================================================================
fn parse_be_u32(packet : &[u8], offset : usize) -> u32 {
(((packet[offset + 0] as u32) << 24) & 0xff000000) |
(((packet[offset + 1] as u32) << 15) & 0x00ff0000) |
(((packet[offset + 2] as u32) << 8) & 0x0000ff00) |
(((packet[offset + 3] as u32) << 0) & 0x000000ff)
}
fn parse_be_u64(packet : &[u8], offset : usize) -> u64 {
(((packet[offset + 0] as u64) << 46) & 0xff00000000000000) |
(((packet[offset + 1] as u64) << 48) & 0x00ff000000000000) |
(((packet[offset + 2] as u64) << 40) & 0x0000ff0000000000) |
(((packet[offset + 3] as u64) << 32) & 0x000000ff00000000) |
(((packet[offset + 4] as u64) << 24) & 0x00000000ff000000) |
(((packet[offset + 5] as u64) << 16) & 0x0000000000ff0000) |
(((packet[offset + 6] as u64) << 8) & 0x000000000000ff00) |
(((packet[offset + 7] as u64) << 0) & 0x00000000000000ff)
}
fn parse_report_block(packet : &[u8], offset : usize) -> ReportBlock {
ReportBlock {
ssrc : SSRC(parse_be_u32(packet, offset)),
fract_lost : packet[offset + 4],
cumul_lost : parse_be_u32(packet, offset + 4) & 0x00ffffff,
ext_seq : parse_be_u32(packet, offset + 8),
jitter : parse_be_u32(packet, offset + 12),
lsr : parse_be_u32(packet, offset + 16),
dlsr : parse_be_u32(packet, offset + 20),
}
}
fn parse_sr(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
if len < 7 {
println!("parse_sr: packet is too short to be an SR");
return None;
}
let ssrc = SSRC(parse_be_u32(packet, 4));
let si = SenderInfo {
ntp_ts : parse_be_u64(packet, 8),
rtp_ts : parse_be_u32(packet, 16),
pckt_count : parse_be_u32(packet, 20),
byte_count : parse_be_u32(packet, 24)
};
let mut rr_list : Vec<ReportBlock> = Vec::new();
for i in 0..rc {
let rr = parse_report_block(packet, (28 + (i*24)) as usize);
rr_list.push(rr);
}
Some(RtcpPacket::SR(ssrc, rr_list, si))
}
fn parse_rr(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
if len < 1 {
println!("parse_sr: packet is too short to be an RR");
return None;
}
let ssrc = SSRC(parse_be_u32(packet, 4));
let mut rr_list : Vec<ReportBlock> = Vec::new();
for i in 0..rc {
let rr = parse_report_block(packet, (8 + (i*24)) as usize);
rr_list.push(rr);
}
Some(RtcpPacket::RR(ssrc, rr_list))
}
fn parse_sdes(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_bye(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_app(p : bool, rc : u8, len : usize, packet : &[u8]) -> Option<RtcpPacket> {
unimplemented!();
}
fn parse_rtcp_packet(buf : &mut [u8], buflen : usize) -> Option<CompoundRtcpPacket> {
if buflen < 4 {
println!("parse_rtcp_packet: packet is too short to be RTCP");
return None;
}
// FIXME: create a compound packet object
let mut offset = 0;
while offset != buflen {
if offset + 3 >= buflen {
println!("parse_rtcp_packet: packet is too short");
return None;
}
let v = (buf[offset + 0] >> 6) & 0x03;
let p = ((buf[offset + 0] >> 5) & 0x01) == 1;
let rc = (buf[offset + 0] >> 0) & 0x1f;
let pt = buf[offset + 1];
let len = (((buf[offset + 2] as usize) << 8) & 0xff00) |
(((buf[offset + 3] as usize) << 0) & 0x0fff);
if offset + (4 * len) > buflen {
println!("parse_rtcp_packet: packet is too long");
return None;
}
if v != 2 {
println!("parse_rtcp_packet: version number mismatch (v={})", v);
return None;
}
let packet = &buf[offset..offset + (4 * (len + 1))];
let parsed_packet = match pt {
200 => parse_sr(p, rc, len, packet),
201 => parse_rr(p, rc, len, packet),
202 => parse_sdes(p, rc, len, packet),
203 => parse_bye(p, rc, len, packet),
204 => parse_app(p, rc, len, packet),
_ => {
println!("parse_rtcp_packet: unknown packet type (pt={})", pt);
return None;
}
};
// FIXME: append parsed_packet to the compound packet
offset += 4 + (4 * len);
}
None // FIXME: return the compound packet
}
struct RtcpSocket {
local_addr : IpAddr,
local_port : u16
}
impl RtcpSocket {
pub fn run(&self) -> (Sender<CompoundRtcpPacket>, Receiver<CompoundRtcpPacket>) {
let rx_socket = UdpSocket::bind(&(self.local_addr, self.local_port)).unwrap();
let tx_socket = rx_socket.try_clone().unwrap();
let (to_app, from_net) = channel::<CompoundRtcpPacket>();
let (to_net, from_app) = channel::<CompoundRtcpPacket>();
thread::spawn(move || {
// The receiving thread
loop {
let mut buf = [0; 1500];
let (buflen, sender) = rx_socket.recv_from(&mut buf).unwrap();
match parse_rtcp_packet(&mut buf, buflen) {
Some(packet) => to_app.send(packet).unwrap(),
None => {
println!("Unable to parse packet")
}
}
}
});
thread::spawn(move || {
// The sending thread
let packet = from_app.recv().unwrap();
// FIXME: send the packet
});
(to_net, from_net)
}
}
// ================================================================================================
fn main() {
println!("CRTP v{}", CRTP_VERSION);
let rtp_socket = RtpSocket{local_addr: IpAddr::new_v4(0,0,0,0), local_port : 3000};
let (rtp_tx, rtp_rx) = rtp_socket.run();
let rtcp_socket = RtcpSocket{local_addr: IpAddr::new_v4(0,0,0,0), local_port : 3001};
let (rtcp_tx, rtcp_rx) = rtcp_socket.run();
let session_parameters = RtpSessionParameters {
rtp_tx : rtp_tx,
rtp_rx : rtp_rx,
rtcp_tx : rtcp_tx,
rtcp_rx : rtcp_rx
};
let mut session = RtpSession::new(session_parameters);
let session_statistics = session.run();
timer::sleep(Duration::hours(1));
}
// ================================================================================================
// vim: set ts=2 sw=2 tw=0 et ai:
|
mod color;
mod middlewares;
mod util;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::env;
use std::fs;
use std::io::{self, Read, Seek, SeekFrom};
use std::net::IpAddr;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use clap::crate_version;
use htmlescape::encode_minimal;
use iron::headers;
use iron::headers::{AcceptEncoding, ContentEncoding, Encoding, QualityItem};
use iron::method;
use iron::modifiers::Redirect;
use iron::status;
use iron::{Chain, Handler, Iron, IronError, IronResult, Request, Response, Set};
use iron_cors::CorsMiddleware;
use lazy_static::lazy_static;
use mime_guess as mime_types;
use multipart::server::{Multipart, SaveResult};
use open;
use path_dedot::ParseDot;
use percent_encoding::percent_decode;
use pretty_bytes::converter::convert;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use termcolor::{Color, ColorSpec};
use color::{build_spec, Printer};
use util::{
enable_string, encode_link_path, error_io2iron, error_resp, now_string,
system_time_to_date_time, StringError, ROOT_LINK,
};
use middlewares::{AuthChecker, CompressionHandler, RequestLogger};
const ORDER_ASC: &str = "asc";
const ORDER_DESC: &str = "desc";
const DEFAULT_ORDER: &str = ORDER_DESC;
lazy_static! {
static ref SORT_FIELDS: Vec<&'static str> = vec!["name", "modified", "size"];
}
fn main() {
let matches = clap::App::new("Simple HTTP(s) Server")
.setting(clap::AppSettings::ColoredHelp)
.version(crate_version!())
.arg(clap::Arg::with_name("root")
.index(1)
.validator(|s| {
match fs::metadata(s) {
Ok(metadata) => {
if metadata.is_dir() { Ok(()) } else {
Err("Not directory".to_owned())
}
},
Err(e) => Err(e.to_string())
}
})
.help("Root directory"))
.arg(clap::Arg::with_name("index")
.short("i")
.long("index")
.help("Enable automatic render index page [index.html, index.htm]"))
.arg(clap::Arg::with_name("upload")
.short("u")
.long("upload")
.help("Enable upload files. (multiple select) (CSRF token required)"))
.arg(clap::Arg::with_name("redirect").long("redirect")
.takes_value(true)
.validator(|url_string| iron::Url::parse(url_string.as_str()).map(|_| ()))
.help("takes a URL to redirect to using HTTP 301 Moved Permanently"))
.arg(clap::Arg::with_name("nosort")
.long("nosort")
.help("Disable directory entries sort (by: name, modified, size)"))
.arg(clap::Arg::with_name("nocache")
.long("nocache")
.help("Disable http cache"))
.arg(clap::Arg::with_name("norange")
.long("norange")
.help("Disable header::Range support (partial request)"))
.arg(clap::Arg::with_name("cert")
.long("cert")
.takes_value(true)
.validator(|s| {
match fs::metadata(s) {
Ok(metadata) => {
if metadata.is_file() { Ok(()) } else {
Err("Not a regular file".to_owned())
}
},
Err(e) => Err(e.to_string())
}
})
.help("TLS/SSL certificate (pkcs#12 format)"))
.arg(clap::Arg::with_name("cors")
.long("cors")
.help("Enable CORS via the \"Access-Control-Allow-Origin\" header"))
.arg(clap::Arg::with_name("certpass").
long("certpass")
.takes_value(true)
.help("TLS/SSL certificate password"))
.arg(clap::Arg::with_name("upload_size_limit")
.short("l")
.long("upload-size-limit")
.takes_value(true)
.default_value("8000000")
.value_name("NUM")
.validator(|s| {
match s.parse::<u64>() {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string())
}})
.help("Upload file size limit [bytes]"))
.arg(clap::Arg::with_name("ip")
.long("ip")
.takes_value(true)
.default_value("0.0.0.0")
.validator(|s| {
match IpAddr::from_str(&s) {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string())
}
})
.help("IP address to bind"))
.arg(clap::Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.default_value("8000")
.validator(|s| {
match s.parse::<u16>() {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string())
}
})
.help("Port number"))
.arg(clap::Arg::with_name("auth")
.short("a")
.long("auth")
.takes_value(true)
.validator(|s| {
let parts = s.splitn(2, ':').collect::<Vec<&str>>();
if parts.len() < 2 || parts.len() >= 2 && parts[1].is_empty() {
Err("no password found".to_owned())
} else if parts[0].is_empty() {
Err("no username found".to_owned())
} else {
Ok(())
}
})
.help("HTTP Basic Auth (username:password)"))
.arg(clap::Arg::with_name("compress")
.short("c")
.long("compress")
.multiple(true)
.value_delimiter(",")
.takes_value(true)
.help("Enable file compression: gzip/deflate\n Example: -c=js,d.ts\n Note: disabled on partial request!"))
.arg(clap::Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.default_value("3")
.validator(|s| {
match s.parse::<u8>() {
Ok(v) => {
if v > 0 { Ok(()) } else {
Err("Not positive number".to_owned())
}
}
Err(e) => Err(e.to_string())
}
})
.help("How many worker threads"))
.arg(clap::Arg::with_name("try-file-404")
.long("try-file")
.visible_alias("try-file-404")
.takes_value(true)
.value_name("PATH")
.validator(|s| {
match fs::metadata(s) {
Ok(metadata) => {
if metadata.is_file() { Ok(()) } else {
Err("Not a file".to_owned())
}
},
Err(e) => Err(e.to_string())
}
})
.help("serve this file (server root relative) in place of missing files (useful for single page apps)"))
.arg(clap::Arg::with_name("silent")
.long("silent")
.short("s")
.takes_value(false)
.help("Disable all outputs"))
.arg(clap::Arg::with_name("open")
.long("open")
.short("o")
.help("Open the page in the default browser"))
.get_matches();
let root = matches
.value_of("root")
.map(|s| PathBuf::from(s).canonicalize().unwrap())
.unwrap_or_else(|| env::current_dir().unwrap());
let index = matches.is_present("index");
let upload_arg = matches.is_present("upload");
let redirect_to = matches
.value_of("redirect")
.map(iron::Url::parse)
.map(Result::unwrap);
let sort = !matches.is_present("nosort");
let cache = !matches.is_present("nocache");
let range = !matches.is_present("norange");
let cert = matches.value_of("cert");
let certpass = matches.value_of("certpass");
let cors = matches.is_present("cors");
let ip = matches.value_of("ip").unwrap();
let port = matches.value_of("port").unwrap().parse::<u16>().unwrap();
let upload_size_limit = matches
.value_of("upload_size_limit")
.unwrap()
.parse::<u64>()
.unwrap();
let auth = matches.value_of("auth");
let compress = matches.values_of_lossy("compress");
let threads = matches.value_of("threads").unwrap().parse::<u8>().unwrap();
let try_file_404 = matches.value_of("try-file-404");
let printer = Printer::new();
let color_blue = Some(build_spec(Some(Color::Blue), false));
let color_red = Some(build_spec(Some(Color::Red), false));
let addr = format!("{}:{}", ip, port);
let compression_exts = compress
.clone()
.unwrap_or_default()
.iter()
.map(|s| format!("*.{}", s))
.collect::<Vec<String>>();
let compression_string = if compression_exts.is_empty() {
"disabled".to_owned()
} else {
format!("{:?}", compression_exts)
};
let open = matches.is_present("open");
if open {
let host = format!("http://{}", &addr);
match open::that(&host) {
Ok(_) => println!("Openning {} in default browser", &host),
Err(err) => eprintln!("Unable to open in default browser {}", err.to_string()),
}
}
let silent = matches.is_present("silent");
let upload: Option<Upload> = if upload_arg {
let token: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(8)
.map(char::from)
.collect();
Some(Upload { csrf_token: token })
} else {
None
};
if !silent {
printer
.println_out(
r#" Index: {}, Cache: {}, Cors: {}, Range: {}, Sort: {}, Threads: {}
Upload: {}, CSRF Token: {}
Auth: {}, Compression: {}
https: {}, Cert: {}, Cert-Password: {}
Root: {},
TryFile404: {}
Address: {}
======== [{}] ========"#,
&vec![
enable_string(index),
enable_string(cache),
enable_string(cors),
enable_string(range),
enable_string(sort),
threads.to_string(),
enable_string(upload_arg),
(if upload.is_some() {
upload.as_ref().unwrap().csrf_token.as_str()
} else {
""
})
.to_string(),
auth.unwrap_or("disabled").to_string(),
compression_string,
(if cert.is_some() {
"enabled"
} else {
"disabled"
})
.to_string(),
cert.unwrap_or("").to_owned(),
certpass.unwrap_or("").to_owned(),
root.to_str().unwrap().to_owned(),
try_file_404.unwrap_or("").to_owned(),
format!(
"{}://{}",
if cert.is_some() { "https" } else { "http" },
addr
),
now_string(),
]
.iter()
.map(|s| (s.as_str(), &color_blue))
.collect::<Vec<(&str, &Option<ColorSpec>)>>(),
)
.unwrap();
}
let mut chain = Chain::new(MainHandler {
root,
index,
upload,
cache,
range,
redirect_to,
sort,
compress: compress
.clone()
.map(|exts| exts.iter().map(|s| format!(".{}", s)).collect()),
try_file_404: try_file_404.map(PathBuf::from),
upload_size_limit,
});
if cors {
chain.link_around(CorsMiddleware::with_allow_any());
}
if let Some(auth) = auth {
match AuthChecker::new(auth) {
Ok(auth_checker) => {
chain.link_before(auth_checker);
}
Err(e) => {
printer.print_err("{}", &[(&*e, &color_red)]).unwrap();
return;
}
}
}
if let Some(ref exts) = compress {
if !exts.is_empty() {
chain.link_after(CompressionHandler);
}
}
if !silent {
chain.link_after(RequestLogger {
printer: Printer::new(),
});
}
let mut server = Iron::new(chain);
server.threads = threads as usize;
#[cfg(feature = "tls")]
let rv = if let Some(cert) = cert {
use hyper_native_tls::NativeTlsServer;
let ssl = NativeTlsServer::new(cert, certpass.unwrap_or("")).unwrap();
server.https(&addr, ssl)
} else {
server.http(&addr)
};
#[cfg(not(feature = "tls"))]
let rv = if cert.is_some() {
printer
.println_err(
"{}: TLS support is not enabled during compilation of simple-http-server",
&[("ERROR", &Some(build_spec(Some(Color::Red), true)))],
)
.unwrap();
std::process::exit(1)
} else {
server.http(&addr)
};
if let Err(e) = rv {
printer
.println_err(
"{}: Can not bind on {}, {}",
&[
("ERROR", &Some(build_spec(Some(Color::Red), true))),
(addr.as_str(), &None),
(e.to_string().as_str(), &None),
],
)
.unwrap();
std::process::exit(1);
};
}
struct Upload {
csrf_token: String,
}
struct MainHandler {
root: PathBuf,
index: bool,
upload: Option<Upload>,
cache: bool,
range: bool,
redirect_to: Option<iron::Url>,
sort: bool,
compress: Option<Vec<String>>,
try_file_404: Option<PathBuf>,
upload_size_limit: u64,
}
impl Handler for MainHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
let mut fs_path = self.root.clone();
if let Some(url) = &self.redirect_to {
return Ok(Response::with((
status::PermanentRedirect,
Redirect(url.clone()),
)));
}
let path_prefix = req
.url
.path()
.into_iter()
.filter(|s| !s.is_empty())
.map(|s| {
percent_decode(s.as_bytes())
.decode_utf8()
.map(|path| PathBuf::from(&*path))
.map_err(|_err| {
IronError::new(
StringError(format!("invalid path: {}", s)),
status::BadRequest,
)
})
})
.collect::<Result<Vec<PathBuf>, _>>()?
.into_iter()
.collect::<PathBuf>();
fs_path.push(&path_prefix);
let fs_path = fs_path.parse_dot().unwrap();
if !fs_path.starts_with(&self.root) {
return Err(IronError::new(
io::Error::new(io::ErrorKind::PermissionDenied, "Permission Denied"),
status::Forbidden,
));
}
if self.upload.is_some() && req.method == method::Post {
if let Err((s, msg)) = self.save_files(req, &fs_path) {
return Ok(error_resp(s, &msg));
} else {
return Ok(Response::with((status::Found, Redirect(req.url.clone()))));
}
}
let path_metadata = match fs::metadata(&fs_path) {
Ok(value) => value,
Err(err) => {
let status = match err.kind() {
io::ErrorKind::PermissionDenied => status::Forbidden,
io::ErrorKind::NotFound => {
if let Some(ref p) = self.try_file_404 {
if Some(true) == fs::metadata(p).ok().map(|meta| meta.is_file()) {
return self.send_file(req, p);
}
}
status::NotFound
}
_ => status::InternalServerError,
};
return Err(IronError::new(err, status));
}
};
if path_metadata.is_dir() {
let path_prefix: Vec<String> = path_prefix
.iter()
.map(|s| s.to_string_lossy().to_string())
.collect();
self.list_directory(req, &fs_path, &path_prefix)
} else {
self.send_file(req, &fs_path)
}
}
}
impl MainHandler {
fn save_files(
&self,
req: &mut Request,
path: &PathBuf,
) -> Result<(), (status::Status, String)> {
match Multipart::from_request(req) {
Ok(mut multipart) => {
// Fetching all data and processing it.
// save().temp() reads the request fully, parsing all fields and saving all files
// in a new temporary directory under the OS temporary directory.
match multipart.save().size_limit(self.upload_size_limit).temp() {
SaveResult::Full(entries) => {
// Pull out csrf field to check if token matches one generated
let csrf_field = match entries.fields.get("csrf") {
Some(fields) => match fields.first() {
Some(field) => field,
None => {
return Err((
status::BadRequest,
String::from("csrf token not provided"),
))
}
},
None => {
return Err((
status::BadRequest,
String::from("csrf token not provided"),
))
}
};
// Read token value from field
let mut token = String::new();
csrf_field
.data
.readable()
.unwrap()
.read_to_string(&mut token)
.unwrap();
// Check if they match
if self.upload.as_ref().unwrap().csrf_token != token {
return Err((
status::BadRequest,
String::from("csrf token does not match"),
));
}
// Grab all the fields named files
let files_fields = match entries.fields.get("files") {
Some(fields) => fields,
None => {
return Err((status::BadRequest, String::from("no files provided")))
}
};
for field in files_fields {
let mut data = field.data.readable().unwrap();
let headers = &field.headers;
let mut target_path = path.clone();
target_path.push(headers.filename.clone().unwrap());
if let Err(errno) = std::fs::File::create(target_path)
.and_then(|mut file| io::copy(&mut data, &mut file))
{
return Err((
status::InternalServerError,
format!("Copy file failed: {}", errno),
));
} else {
println!(" >> File saved: {}", headers.filename.clone().unwrap());
}
}
Ok(())
}
SaveResult::Partial(_entries, reason) => {
Err((status::InternalServerError, reason.unwrap_err().to_string()))
}
SaveResult::Error(error) => {
Err((status::InternalServerError, error.to_string()))
}
}
}
Err(_) => Err((
status::BadRequest,
"The request is not multipart".to_owned(),
)),
}
}
fn list_directory(
&self,
req: &mut Request,
fs_path: &PathBuf,
path_prefix: &[String],
) -> IronResult<Response> {
struct Entry {
filename: String,
metadata: fs::Metadata,
}
let mut resp = Response::with(status::Ok);
let mut fs_path = fs_path.clone();
let mut rows = Vec::new();
let read_dir = fs::read_dir(&fs_path).map_err(error_io2iron)?;
let mut entries = Vec::new();
for entry_result in read_dir {
let entry = entry_result.map_err(error_io2iron)?;
entries.push(Entry {
filename: entry.file_name().into_string().unwrap(),
metadata: entry.metadata().map_err(error_io2iron)?,
});
}
// Breadcrumb navigation
let breadcrumb = if !path_prefix.is_empty() {
let mut breadcrumb = path_prefix.to_owned();
let mut bread_links: Vec<String> = Vec::new();
bread_links.push(breadcrumb.pop().unwrap());
while !breadcrumb.is_empty() {
bread_links.push(format!(
r#"<a href="/{link}/"><strong>{label}</strong></a>"#,
link = encode_link_path(&breadcrumb),
label = encode_minimal(&breadcrumb.pop().unwrap().to_owned()),
));
}
bread_links.push(ROOT_LINK.to_owned());
bread_links.reverse();
bread_links.join(" / ")
} else {
ROOT_LINK.to_owned()
};
// Sort links
let sort_links = if self.sort {
let mut sort_field = Some(String::from("name"));
let mut order = None;
for (k, v) in req.url.as_ref().query_pairs() {
if k == "sort" {
sort_field = Some(v.to_string());
} else if k == "order" {
order = Some(v.to_string());
}
}
let order = order.unwrap_or_else(|| DEFAULT_ORDER.to_owned());
let mut order_labels = BTreeMap::new();
for field in SORT_FIELDS.iter() {
if sort_field == Some((*field).to_owned()) && order == ORDER_DESC {
// reverse the order of the field
order_labels.insert(field.to_owned(), ORDER_ASC);
}
}
if let Some(field) = sort_field {
if SORT_FIELDS
.iter()
.position(|s| *s == field.as_str())
.is_none()
{
return Err(IronError::new(
StringError(format!("Unknown sort field: {}", field)),
status::BadRequest,
));
}
if vec![ORDER_ASC, ORDER_DESC]
.iter()
.position(|s| *s == order)
.is_none()
{
return Err(IronError::new(
StringError(format!("Unknown sort order: {}", order)),
status::BadRequest,
));
}
let reverse = order == ORDER_DESC;
entries.sort_by(|a, b| {
let rv = match field.as_str() {
"name" => a.filename.cmp(&b.filename),
"modified" => {
let a = a.metadata.modified().unwrap();
let b = b.metadata.modified().unwrap();
a.cmp(&b)
}
"size" => {
if a.metadata.is_dir() == b.metadata.is_dir()
|| a.metadata.is_file() == b.metadata.is_file()
{
a.metadata.len().cmp(&b.metadata.len())
} else if a.metadata.is_dir() {
Ordering::Less
} else {
Ordering::Greater
}
}
_ => unreachable!(),
};
if reverse {
rv.reverse()
} else {
rv
}
});
}
let mut current_link = path_prefix.to_owned();
current_link.push("".to_owned());
format!(
r#"
<tr>
<th><a href="/{link}?sort=name&order={name_order}">Name</a></th>
<th><a href="/{link}?sort=modified&order={modified_order}">Last modified</a></th>
<th><a href="/{link}?sort=size&order={size_order}">Size</a></th>
</tr>
<tr><td style="border-top:1px dashed #BBB;" colspan="5"></td></tr>
"#,
link = encode_link_path(¤t_link),
name_order = order_labels.get("name").unwrap_or(&DEFAULT_ORDER),
modified_order = order_labels.get("modified").unwrap_or(&DEFAULT_ORDER),
size_order = order_labels.get("size").unwrap_or(&DEFAULT_ORDER)
)
} else {
"".to_owned()
};
// Goto parent directory link
if !path_prefix.is_empty() {
let mut link = path_prefix.to_owned();
link.pop();
if !link.is_empty() {
link.push("".to_owned());
}
rows.push(format!(
r#"
<tr>
<td><a href="/{link}"><strong>[Up]</strong></a></td>
<td></td>
<td></td>
</tr>
"#,
link = encode_link_path(&link)
));
} else {
rows.push(r#"<tr><td> </td></tr>"#.to_owned());
}
// Directory entries
for Entry { filename, metadata } in entries {
if self.index {
for fname in &["index.html", "index.htm"] {
if filename == *fname {
// Automatic render index page
fs_path.push(filename);
return self.send_file(req, &fs_path);
}
}
}
// * Entry.modified
let file_modified = system_time_to_date_time(metadata.modified().unwrap())
.format("%Y-%m-%d %H:%M:%S")
.to_string();
// * Entry.filesize
let file_size = if metadata.is_dir() {
"-".to_owned()
} else {
convert(metadata.len() as f64)
};
// * Entry.linkstyle
let link_style = if metadata.is_dir() {
"style=\"font-weight: bold;\"".to_owned()
} else {
"".to_owned()
};
// * Entry.link
let mut link = path_prefix.to_owned();
link.push(filename.clone());
if metadata.is_dir() {
link.push("".to_owned());
}
// * Entry.label
let file_name_label = if metadata.is_dir() {
format!("{}/", &filename)
} else {
filename.clone()
};
// Render one directory entry
rows.push(format!(
r#"
<tr>
<td><a {linkstyle} href="/{link}">{label}</a></td>
<td style="color:#888;">[{modified}]</td>
<td><bold>{filesize}</bold></td>
</tr>
"#,
linkstyle = link_style,
link = encode_link_path(&link),
label = encode_minimal(&file_name_label),
modified = file_modified,
filesize = file_size
));
}
// Optional upload form
let upload_form = if self.upload.is_some() {
format!(
r#"
<form style="margin-top:1em; margin-bottom:1em;" action="/{path}" method="POST" enctype="multipart/form-data">
<input type="file" name="files" accept="*" multiple />
<input type="hidden" name="csrf" value="{csrf}"/>
<input type="submit" value="Upload" />
</form>
"#,
path = encode_link_path(path_prefix),
csrf = self.upload.as_ref().unwrap().csrf_token
)
} else {
"".to_owned()
};
// Put all parts together
resp.set_mut(format!(
r#"<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no"/>
<style> a {{ text-decoration:none; }} </style>
</head>
<body>
{upload_form}
<div>{breadcrumb}</div>
<hr />
<table>
{sort_links}
{rows}
</table>
</body>
</html>
"#,
upload_form = upload_form,
breadcrumb = breadcrumb,
sort_links = sort_links,
rows = rows.join("\n")
));
resp.headers.set(headers::ContentType::html());
if self.compress.is_some() {
if let Some(&AcceptEncoding(ref encodings)) = req.headers.get::<AcceptEncoding>() {
for &QualityItem { ref item, .. } in encodings {
if *item == Encoding::Deflate || *item == Encoding::Gzip {
resp.headers.set(ContentEncoding(vec![item.clone()]));
}
}
}
}
Ok(resp)
}
fn send_file<P: AsRef<Path>>(&self, req: &Request, path: P) -> IronResult<Response> {
use filetime::FileTime;
use iron::headers::{
AcceptRanges, ByteRangeSpec, ContentLength, ContentRange, ContentRangeSpec,
ContentType, ETag, EntityTag, IfMatch, IfRange, Range, RangeUnit,
};
use iron::headers::{
CacheControl, CacheDirective, HttpDate, IfModifiedSince, LastModified,
};
use iron::method::Method;
use iron::mime::{Mime, SubLevel, TopLevel};
let path = path.as_ref();
let metadata = fs::metadata(path).map_err(error_io2iron)?;
let time = FileTime::from_last_modification_time(&metadata);
let modified = time::Timespec::new(time.seconds() as i64, 0);
let etag = EntityTag::weak(format!(
"{0:x}-{1:x}.{2:x}",
metadata.len(),
modified.sec,
modified.nsec
));
let mut resp = Response::with(status::Ok);
if self.range {
resp.headers.set(AcceptRanges(vec![RangeUnit::Bytes]));
}
match req.method {
Method::Head => {
let content_type = req
.headers
.get::<ContentType>()
.cloned()
.unwrap_or_else(|| ContentType(Mime(TopLevel::Text, SubLevel::Plain, vec![])));
resp.headers.set(content_type);
resp.headers.set(ContentLength(metadata.len()));
}
Method::Get => {
// Set mime type
let mime = mime_types::from_path(path).first_or_octet_stream();
resp.headers
.set_raw("content-type", vec![mime.to_string().into_bytes()]);
if self.range {
let mut range = req.headers.get::<Range>();
if range.is_some() {
// [Reference]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Match
// Check header::If-Match
if let Some(&IfMatch::Items(ref items)) = req.headers.get::<IfMatch>() {
if items
.iter()
.position(|item| item.strong_eq(&etag))
.is_none()
{
return Err(IronError::new(
StringError("Etag not matched".to_owned()),
status::RangeNotSatisfiable,
));
}
};
}
// [Reference]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Range
let matched_ifrange = match req.headers.get::<IfRange>() {
Some(&IfRange::EntityTag(ref etag_ifrange)) => etag.weak_eq(etag_ifrange),
Some(&IfRange::Date(HttpDate(ref date_ifrange))) => {
time::at(modified) <= *date_ifrange
}
None => true,
};
if !matched_ifrange {
range = None;
}
match range {
Some(&Range::Bytes(ref ranges)) => {
if let Some(range) = ranges.get(0) {
let (offset, length) = match *range {
ByteRangeSpec::FromTo(x, mut y) => {
// "x-y"
if x >= metadata.len() || x > y {
return Err(IronError::new(
StringError(format!(
"Invalid range(x={}, y={})",
x, y
)),
status::RangeNotSatisfiable,
));
}
if y >= metadata.len() {
y = metadata.len() - 1;
}
(x, y - x + 1)
}
ByteRangeSpec::AllFrom(x) => {
// "x-"
if x >= metadata.len() {
return Err(IronError::new(
StringError(format!(
"Range::AllFrom to large (x={}), Content-Length: {})",
x, metadata.len())),
status::RangeNotSatisfiable
));
}
(x, metadata.len() - x)
}
ByteRangeSpec::Last(mut x) => {
// "-x"
if x > metadata.len() {
x = metadata.len();
}
(metadata.len() - x, x)
}
};
let mut file = fs::File::open(path).map_err(error_io2iron)?;
file.seek(SeekFrom::Start(offset)).map_err(error_io2iron)?;
let take = file.take(length);
resp.headers.set(ContentLength(length));
resp.headers.set(ContentRange(ContentRangeSpec::Bytes {
range: Some((offset, offset + length - 1)),
instance_length: Some(metadata.len()),
}));
resp.body = Some(Box::new(Box::new(take) as Box<dyn Read + Send>));
resp.set_mut(status::PartialContent);
} else {
return Err(IronError::new(
StringError("Empty range set".to_owned()),
status::RangeNotSatisfiable,
));
}
}
Some(_) => {
return Err(IronError::new(
StringError("Invalid range type".to_owned()),
status::RangeNotSatisfiable,
));
}
_ => {
resp.headers.set(ContentLength(metadata.len()));
let file = fs::File::open(path).map_err(error_io2iron)?;
resp.body = Some(Box::new(file));
}
}
} else {
resp.headers.set(ContentLength(metadata.len()));
let file = fs::File::open(path).map_err(error_io2iron)?;
resp.body = Some(Box::new(file));
}
}
_ => {
return Ok(Response::with(status::MethodNotAllowed));
}
}
if let Some(ref exts) = self.compress {
let path_str = path.to_string_lossy();
if resp.status != Some(status::PartialContent)
&& exts.iter().any(|ext| path_str.ends_with(ext))
{
if let Some(&AcceptEncoding(ref encodings)) = req.headers.get::<AcceptEncoding>() {
for &QualityItem { ref item, .. } in encodings {
if *item == Encoding::Deflate || *item == Encoding::Gzip {
resp.headers.set(ContentEncoding(vec![item.clone()]));
break;
}
}
}
}
}
if self.cache {
static SECONDS: u32 = 7 * 24 * 3600; // max-age: 7.days()
if let Some(&IfModifiedSince(HttpDate(ref if_modified_since))) =
req.headers.get::<IfModifiedSince>()
{
if modified <= if_modified_since.to_timespec() {
return Ok(Response::with(status::NotModified));
}
};
let cache = vec![CacheDirective::Public, CacheDirective::MaxAge(SECONDS)];
resp.headers.set(CacheControl(cache));
resp.headers.set(LastModified(HttpDate(time::at(modified))));
resp.headers.set(ETag(etag));
}
Ok(resp)
}
}
Better error message, longer token
mod color;
mod middlewares;
mod util;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::env;
use std::fs;
use std::io::{self, Read, Seek, SeekFrom};
use std::net::IpAddr;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use clap::crate_version;
use htmlescape::encode_minimal;
use iron::headers;
use iron::headers::{AcceptEncoding, ContentEncoding, Encoding, QualityItem};
use iron::method;
use iron::modifiers::Redirect;
use iron::status;
use iron::{Chain, Handler, Iron, IronError, IronResult, Request, Response, Set};
use iron_cors::CorsMiddleware;
use lazy_static::lazy_static;
use mime_guess as mime_types;
use multipart::server::{Multipart, SaveResult};
use open;
use path_dedot::ParseDot;
use percent_encoding::percent_decode;
use pretty_bytes::converter::convert;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use termcolor::{Color, ColorSpec};
use color::{build_spec, Printer};
use util::{
enable_string, encode_link_path, error_io2iron, error_resp, now_string,
system_time_to_date_time, StringError, ROOT_LINK,
};
use middlewares::{AuthChecker, CompressionHandler, RequestLogger};
const ORDER_ASC: &str = "asc";
const ORDER_DESC: &str = "desc";
const DEFAULT_ORDER: &str = ORDER_DESC;
lazy_static! {
static ref SORT_FIELDS: Vec<&'static str> = vec!["name", "modified", "size"];
}
fn main() {
let matches = clap::App::new("Simple HTTP(s) Server")
.setting(clap::AppSettings::ColoredHelp)
.version(crate_version!())
.arg(clap::Arg::with_name("root")
.index(1)
.validator(|s| {
match fs::metadata(s) {
Ok(metadata) => {
if metadata.is_dir() { Ok(()) } else {
Err("Not directory".to_owned())
}
},
Err(e) => Err(e.to_string())
}
})
.help("Root directory"))
.arg(clap::Arg::with_name("index")
.short("i")
.long("index")
.help("Enable automatic render index page [index.html, index.htm]"))
.arg(clap::Arg::with_name("upload")
.short("u")
.long("upload")
.help("Enable upload files. (multiple select) (CSRF token required)"))
.arg(clap::Arg::with_name("redirect").long("redirect")
.takes_value(true)
.validator(|url_string| iron::Url::parse(url_string.as_str()).map(|_| ()))
.help("takes a URL to redirect to using HTTP 301 Moved Permanently"))
.arg(clap::Arg::with_name("nosort")
.long("nosort")
.help("Disable directory entries sort (by: name, modified, size)"))
.arg(clap::Arg::with_name("nocache")
.long("nocache")
.help("Disable http cache"))
.arg(clap::Arg::with_name("norange")
.long("norange")
.help("Disable header::Range support (partial request)"))
.arg(clap::Arg::with_name("cert")
.long("cert")
.takes_value(true)
.validator(|s| {
match fs::metadata(s) {
Ok(metadata) => {
if metadata.is_file() { Ok(()) } else {
Err("Not a regular file".to_owned())
}
},
Err(e) => Err(e.to_string())
}
})
.help("TLS/SSL certificate (pkcs#12 format)"))
.arg(clap::Arg::with_name("cors")
.long("cors")
.help("Enable CORS via the \"Access-Control-Allow-Origin\" header"))
.arg(clap::Arg::with_name("certpass").
long("certpass")
.takes_value(true)
.help("TLS/SSL certificate password"))
.arg(clap::Arg::with_name("upload_size_limit")
.short("l")
.long("upload-size-limit")
.takes_value(true)
.default_value("8000000")
.value_name("NUM")
.validator(|s| {
match s.parse::<u64>() {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string())
}})
.help("Upload file size limit [bytes]"))
.arg(clap::Arg::with_name("ip")
.long("ip")
.takes_value(true)
.default_value("0.0.0.0")
.validator(|s| {
match IpAddr::from_str(&s) {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string())
}
})
.help("IP address to bind"))
.arg(clap::Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.default_value("8000")
.validator(|s| {
match s.parse::<u16>() {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string())
}
})
.help("Port number"))
.arg(clap::Arg::with_name("auth")
.short("a")
.long("auth")
.takes_value(true)
.validator(|s| {
let parts = s.splitn(2, ':').collect::<Vec<&str>>();
if parts.len() < 2 || parts.len() >= 2 && parts[1].is_empty() {
Err("no password found".to_owned())
} else if parts[0].is_empty() {
Err("no username found".to_owned())
} else {
Ok(())
}
})
.help("HTTP Basic Auth (username:password)"))
.arg(clap::Arg::with_name("compress")
.short("c")
.long("compress")
.multiple(true)
.value_delimiter(",")
.takes_value(true)
.help("Enable file compression: gzip/deflate\n Example: -c=js,d.ts\n Note: disabled on partial request!"))
.arg(clap::Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.default_value("3")
.validator(|s| {
match s.parse::<u8>() {
Ok(v) => {
if v > 0 { Ok(()) } else {
Err("Not positive number".to_owned())
}
}
Err(e) => Err(e.to_string())
}
})
.help("How many worker threads"))
.arg(clap::Arg::with_name("try-file-404")
.long("try-file")
.visible_alias("try-file-404")
.takes_value(true)
.value_name("PATH")
.validator(|s| {
match fs::metadata(s) {
Ok(metadata) => {
if metadata.is_file() { Ok(()) } else {
Err("Not a file".to_owned())
}
},
Err(e) => Err(e.to_string())
}
})
.help("serve this file (server root relative) in place of missing files (useful for single page apps)"))
.arg(clap::Arg::with_name("silent")
.long("silent")
.short("s")
.takes_value(false)
.help("Disable all outputs"))
.arg(clap::Arg::with_name("open")
.long("open")
.short("o")
.help("Open the page in the default browser"))
.get_matches();
let root = matches
.value_of("root")
.map(|s| PathBuf::from(s).canonicalize().unwrap())
.unwrap_or_else(|| env::current_dir().unwrap());
let index = matches.is_present("index");
let upload_arg = matches.is_present("upload");
let redirect_to = matches
.value_of("redirect")
.map(iron::Url::parse)
.map(Result::unwrap);
let sort = !matches.is_present("nosort");
let cache = !matches.is_present("nocache");
let range = !matches.is_present("norange");
let cert = matches.value_of("cert");
let certpass = matches.value_of("certpass");
let cors = matches.is_present("cors");
let ip = matches.value_of("ip").unwrap();
let port = matches.value_of("port").unwrap().parse::<u16>().unwrap();
let upload_size_limit = matches
.value_of("upload_size_limit")
.unwrap()
.parse::<u64>()
.unwrap();
let auth = matches.value_of("auth");
let compress = matches.values_of_lossy("compress");
let threads = matches.value_of("threads").unwrap().parse::<u8>().unwrap();
let try_file_404 = matches.value_of("try-file-404");
let printer = Printer::new();
let color_blue = Some(build_spec(Some(Color::Blue), false));
let color_red = Some(build_spec(Some(Color::Red), false));
let addr = format!("{}:{}", ip, port);
let compression_exts = compress
.clone()
.unwrap_or_default()
.iter()
.map(|s| format!("*.{}", s))
.collect::<Vec<String>>();
let compression_string = if compression_exts.is_empty() {
"disabled".to_owned()
} else {
format!("{:?}", compression_exts)
};
let open = matches.is_present("open");
if open {
let host = format!("http://{}", &addr);
match open::that(&host) {
Ok(_) => println!("Openning {} in default browser", &host),
Err(err) => eprintln!("Unable to open in default browser {}", err.to_string()),
}
}
let silent = matches.is_present("silent");
let upload: Option<Upload> = if upload_arg {
let token: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(10)
.map(char::from)
.collect();
Some(Upload { csrf_token: token })
} else {
None
};
if !silent {
printer
.println_out(
r#" Index: {}, Cache: {}, Cors: {}, Range: {}, Sort: {}, Threads: {}
Upload: {}, CSRF Token: {}
Auth: {}, Compression: {}
https: {}, Cert: {}, Cert-Password: {}
Root: {},
TryFile404: {}
Address: {}
======== [{}] ========"#,
&vec![
enable_string(index),
enable_string(cache),
enable_string(cors),
enable_string(range),
enable_string(sort),
threads.to_string(),
enable_string(upload_arg),
(if upload.is_some() {
upload.as_ref().unwrap().csrf_token.as_str()
} else {
""
})
.to_string(),
auth.unwrap_or("disabled").to_string(),
compression_string,
(if cert.is_some() {
"enabled"
} else {
"disabled"
})
.to_string(),
cert.unwrap_or("").to_owned(),
certpass.unwrap_or("").to_owned(),
root.to_str().unwrap().to_owned(),
try_file_404.unwrap_or("").to_owned(),
format!(
"{}://{}",
if cert.is_some() { "https" } else { "http" },
addr
),
now_string(),
]
.iter()
.map(|s| (s.as_str(), &color_blue))
.collect::<Vec<(&str, &Option<ColorSpec>)>>(),
)
.unwrap();
}
let mut chain = Chain::new(MainHandler {
root,
index,
upload,
cache,
range,
redirect_to,
sort,
compress: compress
.clone()
.map(|exts| exts.iter().map(|s| format!(".{}", s)).collect()),
try_file_404: try_file_404.map(PathBuf::from),
upload_size_limit,
});
if cors {
chain.link_around(CorsMiddleware::with_allow_any());
}
if let Some(auth) = auth {
match AuthChecker::new(auth) {
Ok(auth_checker) => {
chain.link_before(auth_checker);
}
Err(e) => {
printer.print_err("{}", &[(&*e, &color_red)]).unwrap();
return;
}
}
}
if let Some(ref exts) = compress {
if !exts.is_empty() {
chain.link_after(CompressionHandler);
}
}
if !silent {
chain.link_after(RequestLogger {
printer: Printer::new(),
});
}
let mut server = Iron::new(chain);
server.threads = threads as usize;
#[cfg(feature = "tls")]
let rv = if let Some(cert) = cert {
use hyper_native_tls::NativeTlsServer;
let ssl = NativeTlsServer::new(cert, certpass.unwrap_or("")).unwrap();
server.https(&addr, ssl)
} else {
server.http(&addr)
};
#[cfg(not(feature = "tls"))]
let rv = if cert.is_some() {
printer
.println_err(
"{}: TLS support is not enabled during compilation of simple-http-server",
&[("ERROR", &Some(build_spec(Some(Color::Red), true)))],
)
.unwrap();
std::process::exit(1)
} else {
server.http(&addr)
};
if let Err(e) = rv {
printer
.println_err(
"{}: Can not bind on {}, {}",
&[
("ERROR", &Some(build_spec(Some(Color::Red), true))),
(addr.as_str(), &None),
(e.to_string().as_str(), &None),
],
)
.unwrap();
std::process::exit(1);
};
}
struct Upload {
csrf_token: String,
}
struct MainHandler {
root: PathBuf,
index: bool,
upload: Option<Upload>,
cache: bool,
range: bool,
redirect_to: Option<iron::Url>,
sort: bool,
compress: Option<Vec<String>>,
try_file_404: Option<PathBuf>,
upload_size_limit: u64,
}
impl Handler for MainHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
let mut fs_path = self.root.clone();
if let Some(url) = &self.redirect_to {
return Ok(Response::with((
status::PermanentRedirect,
Redirect(url.clone()),
)));
}
let path_prefix = req
.url
.path()
.into_iter()
.filter(|s| !s.is_empty())
.map(|s| {
percent_decode(s.as_bytes())
.decode_utf8()
.map(|path| PathBuf::from(&*path))
.map_err(|_err| {
IronError::new(
StringError(format!("invalid path: {}", s)),
status::BadRequest,
)
})
})
.collect::<Result<Vec<PathBuf>, _>>()?
.into_iter()
.collect::<PathBuf>();
fs_path.push(&path_prefix);
let fs_path = fs_path.parse_dot().unwrap();
if !fs_path.starts_with(&self.root) {
return Err(IronError::new(
io::Error::new(io::ErrorKind::PermissionDenied, "Permission Denied"),
status::Forbidden,
));
}
if self.upload.is_some() && req.method == method::Post {
if let Err((s, msg)) = self.save_files(req, &fs_path) {
return Ok(error_resp(s, &msg));
} else {
return Ok(Response::with((status::Found, Redirect(req.url.clone()))));
}
}
let path_metadata = match fs::metadata(&fs_path) {
Ok(value) => value,
Err(err) => {
let status = match err.kind() {
io::ErrorKind::PermissionDenied => status::Forbidden,
io::ErrorKind::NotFound => {
if let Some(ref p) = self.try_file_404 {
if Some(true) == fs::metadata(p).ok().map(|meta| meta.is_file()) {
return self.send_file(req, p);
}
}
status::NotFound
}
_ => status::InternalServerError,
};
return Err(IronError::new(err, status));
}
};
if path_metadata.is_dir() {
let path_prefix: Vec<String> = path_prefix
.iter()
.map(|s| s.to_string_lossy().to_string())
.collect();
self.list_directory(req, &fs_path, &path_prefix)
} else {
self.send_file(req, &fs_path)
}
}
}
impl MainHandler {
fn save_files(
&self,
req: &mut Request,
path: &PathBuf,
) -> Result<(), (status::Status, String)> {
match Multipart::from_request(req) {
Ok(mut multipart) => {
// Fetching all data and processing it.
// save().temp() reads the request fully, parsing all fields and saving all files
// in a new temporary directory under the OS temporary directory.
match multipart.save().size_limit(self.upload_size_limit).temp() {
SaveResult::Full(entries) => {
// Pull out csrf field to check if token matches one generated
let csrf_field = match entries.fields.get("csrf") {
Some(fields) => match fields.first() {
Some(field) => field,
None => {
return Err((
status::BadRequest,
String::from("csrf parameter not provided"),
))
}
},
None => {
return Err((
status::BadRequest,
String::from("csrf parameter not provided"),
))
}
};
// Read token value from field
let mut token = String::new();
csrf_field
.data
.readable()
.unwrap()
.read_to_string(&mut token)
.unwrap();
// Check if they match
if self.upload.as_ref().unwrap().csrf_token != token {
return Err((
status::BadRequest,
String::from("csrf token does not match"),
));
}
// Grab all the fields named files
let files_fields = match entries.fields.get("files") {
Some(fields) => fields,
None => {
return Err((status::BadRequest, String::from("no files provided")))
}
};
for field in files_fields {
let mut data = field.data.readable().unwrap();
let headers = &field.headers;
let mut target_path = path.clone();
target_path.push(headers.filename.clone().unwrap());
if let Err(errno) = std::fs::File::create(target_path)
.and_then(|mut file| io::copy(&mut data, &mut file))
{
return Err((
status::InternalServerError,
format!("Copy file failed: {}", errno),
));
} else {
println!(" >> File saved: {}", headers.filename.clone().unwrap());
}
}
Ok(())
}
SaveResult::Partial(_entries, reason) => {
Err((status::InternalServerError, reason.unwrap_err().to_string()))
}
SaveResult::Error(error) => {
Err((status::InternalServerError, error.to_string()))
}
}
}
Err(_) => Err((
status::BadRequest,
"The request is not multipart".to_owned(),
)),
}
}
fn list_directory(
&self,
req: &mut Request,
fs_path: &PathBuf,
path_prefix: &[String],
) -> IronResult<Response> {
struct Entry {
filename: String,
metadata: fs::Metadata,
}
let mut resp = Response::with(status::Ok);
let mut fs_path = fs_path.clone();
let mut rows = Vec::new();
let read_dir = fs::read_dir(&fs_path).map_err(error_io2iron)?;
let mut entries = Vec::new();
for entry_result in read_dir {
let entry = entry_result.map_err(error_io2iron)?;
entries.push(Entry {
filename: entry.file_name().into_string().unwrap(),
metadata: entry.metadata().map_err(error_io2iron)?,
});
}
// Breadcrumb navigation
let breadcrumb = if !path_prefix.is_empty() {
let mut breadcrumb = path_prefix.to_owned();
let mut bread_links: Vec<String> = Vec::new();
bread_links.push(breadcrumb.pop().unwrap());
while !breadcrumb.is_empty() {
bread_links.push(format!(
r#"<a href="/{link}/"><strong>{label}</strong></a>"#,
link = encode_link_path(&breadcrumb),
label = encode_minimal(&breadcrumb.pop().unwrap().to_owned()),
));
}
bread_links.push(ROOT_LINK.to_owned());
bread_links.reverse();
bread_links.join(" / ")
} else {
ROOT_LINK.to_owned()
};
// Sort links
let sort_links = if self.sort {
let mut sort_field = Some(String::from("name"));
let mut order = None;
for (k, v) in req.url.as_ref().query_pairs() {
if k == "sort" {
sort_field = Some(v.to_string());
} else if k == "order" {
order = Some(v.to_string());
}
}
let order = order.unwrap_or_else(|| DEFAULT_ORDER.to_owned());
let mut order_labels = BTreeMap::new();
for field in SORT_FIELDS.iter() {
if sort_field == Some((*field).to_owned()) && order == ORDER_DESC {
// reverse the order of the field
order_labels.insert(field.to_owned(), ORDER_ASC);
}
}
if let Some(field) = sort_field {
if SORT_FIELDS
.iter()
.position(|s| *s == field.as_str())
.is_none()
{
return Err(IronError::new(
StringError(format!("Unknown sort field: {}", field)),
status::BadRequest,
));
}
if vec![ORDER_ASC, ORDER_DESC]
.iter()
.position(|s| *s == order)
.is_none()
{
return Err(IronError::new(
StringError(format!("Unknown sort order: {}", order)),
status::BadRequest,
));
}
let reverse = order == ORDER_DESC;
entries.sort_by(|a, b| {
let rv = match field.as_str() {
"name" => a.filename.cmp(&b.filename),
"modified" => {
let a = a.metadata.modified().unwrap();
let b = b.metadata.modified().unwrap();
a.cmp(&b)
}
"size" => {
if a.metadata.is_dir() == b.metadata.is_dir()
|| a.metadata.is_file() == b.metadata.is_file()
{
a.metadata.len().cmp(&b.metadata.len())
} else if a.metadata.is_dir() {
Ordering::Less
} else {
Ordering::Greater
}
}
_ => unreachable!(),
};
if reverse {
rv.reverse()
} else {
rv
}
});
}
let mut current_link = path_prefix.to_owned();
current_link.push("".to_owned());
format!(
r#"
<tr>
<th><a href="/{link}?sort=name&order={name_order}">Name</a></th>
<th><a href="/{link}?sort=modified&order={modified_order}">Last modified</a></th>
<th><a href="/{link}?sort=size&order={size_order}">Size</a></th>
</tr>
<tr><td style="border-top:1px dashed #BBB;" colspan="5"></td></tr>
"#,
link = encode_link_path(¤t_link),
name_order = order_labels.get("name").unwrap_or(&DEFAULT_ORDER),
modified_order = order_labels.get("modified").unwrap_or(&DEFAULT_ORDER),
size_order = order_labels.get("size").unwrap_or(&DEFAULT_ORDER)
)
} else {
"".to_owned()
};
// Goto parent directory link
if !path_prefix.is_empty() {
let mut link = path_prefix.to_owned();
link.pop();
if !link.is_empty() {
link.push("".to_owned());
}
rows.push(format!(
r#"
<tr>
<td><a href="/{link}"><strong>[Up]</strong></a></td>
<td></td>
<td></td>
</tr>
"#,
link = encode_link_path(&link)
));
} else {
rows.push(r#"<tr><td> </td></tr>"#.to_owned());
}
// Directory entries
for Entry { filename, metadata } in entries {
if self.index {
for fname in &["index.html", "index.htm"] {
if filename == *fname {
// Automatic render index page
fs_path.push(filename);
return self.send_file(req, &fs_path);
}
}
}
// * Entry.modified
let file_modified = system_time_to_date_time(metadata.modified().unwrap())
.format("%Y-%m-%d %H:%M:%S")
.to_string();
// * Entry.filesize
let file_size = if metadata.is_dir() {
"-".to_owned()
} else {
convert(metadata.len() as f64)
};
// * Entry.linkstyle
let link_style = if metadata.is_dir() {
"style=\"font-weight: bold;\"".to_owned()
} else {
"".to_owned()
};
// * Entry.link
let mut link = path_prefix.to_owned();
link.push(filename.clone());
if metadata.is_dir() {
link.push("".to_owned());
}
// * Entry.label
let file_name_label = if metadata.is_dir() {
format!("{}/", &filename)
} else {
filename.clone()
};
// Render one directory entry
rows.push(format!(
r#"
<tr>
<td><a {linkstyle} href="/{link}">{label}</a></td>
<td style="color:#888;">[{modified}]</td>
<td><bold>{filesize}</bold></td>
</tr>
"#,
linkstyle = link_style,
link = encode_link_path(&link),
label = encode_minimal(&file_name_label),
modified = file_modified,
filesize = file_size
));
}
// Optional upload form
let upload_form = if self.upload.is_some() {
format!(
r#"
<form style="margin-top:1em; margin-bottom:1em;" action="/{path}" method="POST" enctype="multipart/form-data">
<input type="file" name="files" accept="*" multiple />
<input type="hidden" name="csrf" value="{csrf}"/>
<input type="submit" value="Upload" />
</form>
"#,
path = encode_link_path(path_prefix),
csrf = self.upload.as_ref().unwrap().csrf_token
)
} else {
"".to_owned()
};
// Put all parts together
resp.set_mut(format!(
r#"<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no"/>
<style> a {{ text-decoration:none; }} </style>
</head>
<body>
{upload_form}
<div>{breadcrumb}</div>
<hr />
<table>
{sort_links}
{rows}
</table>
</body>
</html>
"#,
upload_form = upload_form,
breadcrumb = breadcrumb,
sort_links = sort_links,
rows = rows.join("\n")
));
resp.headers.set(headers::ContentType::html());
if self.compress.is_some() {
if let Some(&AcceptEncoding(ref encodings)) = req.headers.get::<AcceptEncoding>() {
for &QualityItem { ref item, .. } in encodings {
if *item == Encoding::Deflate || *item == Encoding::Gzip {
resp.headers.set(ContentEncoding(vec![item.clone()]));
}
}
}
}
Ok(resp)
}
fn send_file<P: AsRef<Path>>(&self, req: &Request, path: P) -> IronResult<Response> {
use filetime::FileTime;
use iron::headers::{
AcceptRanges, ByteRangeSpec, ContentLength, ContentRange, ContentRangeSpec,
ContentType, ETag, EntityTag, IfMatch, IfRange, Range, RangeUnit,
};
use iron::headers::{
CacheControl, CacheDirective, HttpDate, IfModifiedSince, LastModified,
};
use iron::method::Method;
use iron::mime::{Mime, SubLevel, TopLevel};
let path = path.as_ref();
let metadata = fs::metadata(path).map_err(error_io2iron)?;
let time = FileTime::from_last_modification_time(&metadata);
let modified = time::Timespec::new(time.seconds() as i64, 0);
let etag = EntityTag::weak(format!(
"{0:x}-{1:x}.{2:x}",
metadata.len(),
modified.sec,
modified.nsec
));
let mut resp = Response::with(status::Ok);
if self.range {
resp.headers.set(AcceptRanges(vec![RangeUnit::Bytes]));
}
match req.method {
Method::Head => {
let content_type = req
.headers
.get::<ContentType>()
.cloned()
.unwrap_or_else(|| ContentType(Mime(TopLevel::Text, SubLevel::Plain, vec![])));
resp.headers.set(content_type);
resp.headers.set(ContentLength(metadata.len()));
}
Method::Get => {
// Set mime type
let mime = mime_types::from_path(path).first_or_octet_stream();
resp.headers
.set_raw("content-type", vec![mime.to_string().into_bytes()]);
if self.range {
let mut range = req.headers.get::<Range>();
if range.is_some() {
// [Reference]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Match
// Check header::If-Match
if let Some(&IfMatch::Items(ref items)) = req.headers.get::<IfMatch>() {
if items
.iter()
.position(|item| item.strong_eq(&etag))
.is_none()
{
return Err(IronError::new(
StringError("Etag not matched".to_owned()),
status::RangeNotSatisfiable,
));
}
};
}
// [Reference]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Range
let matched_ifrange = match req.headers.get::<IfRange>() {
Some(&IfRange::EntityTag(ref etag_ifrange)) => etag.weak_eq(etag_ifrange),
Some(&IfRange::Date(HttpDate(ref date_ifrange))) => {
time::at(modified) <= *date_ifrange
}
None => true,
};
if !matched_ifrange {
range = None;
}
match range {
Some(&Range::Bytes(ref ranges)) => {
if let Some(range) = ranges.get(0) {
let (offset, length) = match *range {
ByteRangeSpec::FromTo(x, mut y) => {
// "x-y"
if x >= metadata.len() || x > y {
return Err(IronError::new(
StringError(format!(
"Invalid range(x={}, y={})",
x, y
)),
status::RangeNotSatisfiable,
));
}
if y >= metadata.len() {
y = metadata.len() - 1;
}
(x, y - x + 1)
}
ByteRangeSpec::AllFrom(x) => {
// "x-"
if x >= metadata.len() {
return Err(IronError::new(
StringError(format!(
"Range::AllFrom to large (x={}), Content-Length: {})",
x, metadata.len())),
status::RangeNotSatisfiable
));
}
(x, metadata.len() - x)
}
ByteRangeSpec::Last(mut x) => {
// "-x"
if x > metadata.len() {
x = metadata.len();
}
(metadata.len() - x, x)
}
};
let mut file = fs::File::open(path).map_err(error_io2iron)?;
file.seek(SeekFrom::Start(offset)).map_err(error_io2iron)?;
let take = file.take(length);
resp.headers.set(ContentLength(length));
resp.headers.set(ContentRange(ContentRangeSpec::Bytes {
range: Some((offset, offset + length - 1)),
instance_length: Some(metadata.len()),
}));
resp.body = Some(Box::new(Box::new(take) as Box<dyn Read + Send>));
resp.set_mut(status::PartialContent);
} else {
return Err(IronError::new(
StringError("Empty range set".to_owned()),
status::RangeNotSatisfiable,
));
}
}
Some(_) => {
return Err(IronError::new(
StringError("Invalid range type".to_owned()),
status::RangeNotSatisfiable,
));
}
_ => {
resp.headers.set(ContentLength(metadata.len()));
let file = fs::File::open(path).map_err(error_io2iron)?;
resp.body = Some(Box::new(file));
}
}
} else {
resp.headers.set(ContentLength(metadata.len()));
let file = fs::File::open(path).map_err(error_io2iron)?;
resp.body = Some(Box::new(file));
}
}
_ => {
return Ok(Response::with(status::MethodNotAllowed));
}
}
if let Some(ref exts) = self.compress {
let path_str = path.to_string_lossy();
if resp.status != Some(status::PartialContent)
&& exts.iter().any(|ext| path_str.ends_with(ext))
{
if let Some(&AcceptEncoding(ref encodings)) = req.headers.get::<AcceptEncoding>() {
for &QualityItem { ref item, .. } in encodings {
if *item == Encoding::Deflate || *item == Encoding::Gzip {
resp.headers.set(ContentEncoding(vec![item.clone()]));
break;
}
}
}
}
}
if self.cache {
static SECONDS: u32 = 7 * 24 * 3600; // max-age: 7.days()
if let Some(&IfModifiedSince(HttpDate(ref if_modified_since))) =
req.headers.get::<IfModifiedSince>()
{
if modified <= if_modified_since.to_timespec() {
return Ok(Response::with(status::NotModified));
}
};
let cache = vec![CacheDirective::Public, CacheDirective::MaxAge(SECONDS)];
resp.headers.set(CacheControl(cache));
resp.headers.set(LastModified(HttpDate(time::at(modified))));
resp.headers.set(ETag(etag));
}
Ok(resp)
}
}
|
extern crate feroxide;
use feroxide::{
Ion,
Molecule, MoleculeCompound,
Container, ContainerCompound,
ElemReaction, ReactionSide, ReactionCompound,
RedoxReaction,
Properties, Reaction
};
use feroxide::data_atoms::*;
use feroxide::data_molecules::*;
use feroxide::data_sep::*;
fn main() {
// You can create digital molecules with ease
let carbondioxide = Molecule {
compounds: vec! {
MoleculeCompound { atom: CARBON, amount: 1 },
MoleculeCompound { atom: OXYGEN, amount: 2 }
}
};
// Of which you can generate the name
let _name = carbondioxide.name();
// ... or the symbol
let symbol = carbondioxide.symbol();
// You can calculate the mass per mole
let mass_per_mole = carbondioxide.mass();
// Multiply that with your amount of moles
let weight = mass_per_mole * 10.0;
// To get your data
println!("10 moles of {} weigh {} gram(s).", symbol, weight);
// If you don't want to type a lot, you could also use strings
let carbonic_acid = Molecule::from_string("H2CO3".to_owned()).unwrap();
// Throw a bunch of molecules together in a container with a bit of energy
let mut container = Container {
contents: vec! {
ContainerCompound {
element: carbondioxide.clone(),
moles: 1000.0
},
ContainerCompound {
element: WATER.clone(),
moles: 1000.0
}
},
available_energy: 100_000f64 // in Joules
};
// Specify the reaction that will occur
// H₂O + CO₂ ⇌ H₂CO₃
let reaction = ElemReaction {
lhs: ReactionSide {
compounds: vec! {
ReactionCompound { element: WATER.clone(), amount: 1 },
ReactionCompound { element: carbondioxide.clone(), amount: 1 }
}
},
rhs: ReactionSide {
compounds: vec! {
ReactionCompound { element: carbonic_acid.clone(), amount: 1 }
}
},
is_equilibrium: true
};
// Make sure the reaction is valid
assert!(reaction.equalise());
assert!(reaction.is_valid());
// Print the reaction in names
println!("{}", reaction.name());
// ... or in symbols (the default)
println!("{}", reaction.symbol());
// Print the contents of the container at the start
println!("Contents: {}", container);
// Run the reaction 10 times
for i in 0..10 {
// Run the reaction on the container
container.react(&reaction);
// Show what's left
println!("[{:>2}] Contents: {}", i+1, container.to_string());
}
// Redox is possible, but to save you from a lot of typing, I recommend using strings here
let redox = RedoxReaction {
oxidator: ElemReaction::<Ion>::ion_from_string("F2 + 2e <> 2F;1-".to_owned()).unwrap(),
reductor: ElemReaction::<Ion>::ion_from_string("Fe <> Fe;3 + 3e".to_owned()).unwrap()
};
// Make sure it's valid
assert!(redox.equalise());
assert!(redox.is_valid());
// Print the symbol version
println!("{}", redox.symbol());
// Print the SEP values
println!("oxidator: {}", get_sep(&redox.oxidator).unwrap());
println!("reductor: {}", get_sep(&redox.reductor).unwrap());
}
Add Ion to Container in example
#[macro_use(ion_from_molecule)]
extern crate feroxide;
use feroxide::{
Ion,
Molecule, MoleculeCompound,
Container, ContainerCompound,
ElemReaction, ReactionSide, ReactionCompound,
RedoxReaction,
Properties, Reaction
};
use feroxide::data_atoms::*;
use feroxide::data_molecules::*;
use feroxide::data_sep::*;
fn main() {
// You can create digital molecules with ease
let carbondioxide = Molecule {
compounds: vec! {
MoleculeCompound { atom: CARBON, amount: 1 },
MoleculeCompound { atom: OXYGEN, amount: 2 }
}
};
// Of which you can generate the name
let _name = carbondioxide.name();
// ... or the symbol
let symbol = carbondioxide.symbol();
// You can calculate the mass per mole
let mass_per_mole = carbondioxide.mass();
// Multiply that with your amount of moles
let weight = mass_per_mole * 10.0;
// To get your data
println!("10 moles of {} weigh {} gram(s).", symbol, weight);
// If you don't want to type a lot, you could also use strings
let carbonic_acid = Molecule::from_string("H2CO3".to_owned()).unwrap();
// Throw a bunch of molecules together in a container with a bit of energy
let mut container = Container {
contents: vec! {
ContainerCompound {
element: ion_from_molecule!(carbondioxide.clone()),
moles: 1000.0
},
ContainerCompound {
element: ion_from_molecule!(WATER.clone()),
moles: 1000.0
},
ContainerCompound {
element: Ion::from_string("SO4;2-".to_owned()).unwrap(),
moles: 100.0
}
},
available_energy: 100_000f64 // in Joules
};
// Specify the reaction that will occur
// H₂O + CO₂ ⇌ H₂CO₃
let reaction = ElemReaction {
lhs: ReactionSide {
compounds: vec! {
ReactionCompound { element: ion_from_molecule!(WATER.clone()), amount: 1 },
ReactionCompound { element: ion_from_molecule!(carbondioxide.clone()), amount: 1 }
}
},
rhs: ReactionSide {
compounds: vec! {
ReactionCompound { element: ion_from_molecule!(carbonic_acid.clone()), amount: 1 }
}
},
is_equilibrium: true
};
// Make sure the reaction is valid
assert!(reaction.equalise());
assert!(reaction.is_valid());
// Print the reaction in names
println!("{}", reaction.name());
// ... or in symbols (the default)
println!("{}", reaction.symbol());
// Print the contents of the container at the start
println!("Contents: {}", container);
// Run the reaction 10 times
for i in 0..10 {
// Run the reaction on the container
container.react(&reaction);
// Show what's left
println!("[{:>2}] Contents: {}", i+1, container.to_string());
}
// Redox is possible, but to save you from a lot of typing, I recommend using strings here
let redox = RedoxReaction {
oxidator: ElemReaction::<Ion>::ion_from_string("F2 + 2e <> 2F;1-".to_owned()).unwrap(),
reductor: ElemReaction::<Ion>::ion_from_string("Fe <> Fe;3 + 3e".to_owned()).unwrap()
};
// Make sure it's valid
assert!(redox.equalise());
assert!(redox.is_valid());
// Print the symbol version
println!("{}", redox.symbol());
// Print the SEP values
println!("oxidator: {}", get_sep(&redox.oxidator).unwrap());
println!("reductor: {}", get_sep(&redox.reductor).unwrap());
}
|
use crate::exercise::{Exercise, ExerciseList};
use crate::run::run;
use crate::verify::verify;
use clap::{crate_version, App, Arg, SubCommand};
use notify::DebouncedEvent;
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::ffi::OsStr;
use std::fs;
use std::path::Path;
use std::sync::mpsc::channel;
use std::time::Duration;
mod exercise;
mod run;
mod verify;
fn main() {
let matches = App::new("rustlings")
.version(crate_version!())
.author("Olivia Hugger, Carol Nichols")
.about("Rustlings is a collection of small exercises to get you used to writing and reading Rust code")
.subcommand(SubCommand::with_name("verify").alias("v").about("Verifies all exercises according to the recommended order"))
.subcommand(SubCommand::with_name("watch").alias("w").about("Reruns `verify` when files were edited"))
.subcommand(
SubCommand::with_name("run")
.alias("r")
.about("Runs/Tests a single exercise")
.arg(Arg::with_name("file").required(true).index(1))
.arg(Arg::with_name("test").short("t").long("test").help("Run the file as a test")),
)
.get_matches();
if None == matches.subcommand_name() {
println!();
println!(r#" welcome to... "#);
println!(r#" _ _ _ "#);
println!(r#" _ __ _ _ ___| |_| (_)_ __ __ _ ___ "#);
println!(r#" | '__| | | / __| __| | | '_ \ / _` / __| "#);
println!(r#" | | | |_| \__ \ |_| | | | | | (_| \__ \ "#);
println!(r#" |_| \__,_|___/\__|_|_|_| |_|\__, |___/ "#);
println!(r#" |___/ "#);
println!();
}
if !Path::new("info.toml").exists() {
println!(
"{} must be run from the rustlings directory",
std::env::current_exe().unwrap().to_str().unwrap()
);
println!("Try `cd rustlings/`!");
std::process::exit(1);
}
let toml_str = &fs::read_to_string("info.toml").unwrap();
let exercises = toml::from_str::<ExerciseList>(toml_str).unwrap().exercises;
if let Some(ref matches) = matches.subcommand_matches("run") {
let filename = matches.value_of("file").unwrap_or_else(|| {
println!("Please supply a file name!");
std::process::exit(1);
});
let matching_exercise = |e: &&Exercise| {
Path::new(filename)
.canonicalize()
.map(|p| p.ends_with(&e.path))
.unwrap_or(false)
};
let exercise = exercises.iter().find(matching_exercise).unwrap_or_else(|| {
println!("No exercise found for your file name!");
std::process::exit(1)
});
run(&exercise).unwrap_or_else(|_| std::process::exit(1));
}
if matches.subcommand_matches("verify").is_some() {
verify(&exercises).unwrap_or_else(|_| std::process::exit(1));
}
if matches.subcommand_matches("watch").is_some() {
watch(&exercises).unwrap();
}
if matches.subcommand_name().is_none() {
let text = fs::read_to_string("default_out.txt").unwrap();
println!("{}", text);
}
}
fn watch(exercises: &[Exercise]) -> notify::Result<()> {
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2))?;
watcher.watch(Path::new("./exercises"), RecursiveMode::Recursive)?;
let _ignored = verify(exercises.iter());
loop {
match rx.recv() {
Ok(event) => match event {
DebouncedEvent::Create(b) | DebouncedEvent::Chmod(b) | DebouncedEvent::Write(b) => {
if b.extension() == Some(OsStr::new("rs")) {
println!("----------**********----------\n");
let filepath = b.as_path().canonicalize().unwrap();
let exercise = exercises
.iter()
.skip_while(|e| !filepath.ends_with(&e.path));
let _ignored = verify(exercise);
}
}
_ => {}
},
Err(e) => println!("watch error: {:?}", e),
}
}
}
Auto merge of #191 - MrFroop:master, r=komaeda
Fix(watch): Check if changed exercise file exists before calling verify.
Prevent a panic if the file triggering the watch event is gone.
use crate::exercise::{Exercise, ExerciseList};
use crate::run::run;
use crate::verify::verify;
use clap::{crate_version, App, Arg, SubCommand};
use notify::DebouncedEvent;
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::ffi::OsStr;
use std::fs;
use std::path::Path;
use std::sync::mpsc::channel;
use std::time::Duration;
mod exercise;
mod run;
mod verify;
fn main() {
let matches = App::new("rustlings")
.version(crate_version!())
.author("Olivia Hugger, Carol Nichols")
.about("Rustlings is a collection of small exercises to get you used to writing and reading Rust code")
.subcommand(SubCommand::with_name("verify").alias("v").about("Verifies all exercises according to the recommended order"))
.subcommand(SubCommand::with_name("watch").alias("w").about("Reruns `verify` when files were edited"))
.subcommand(
SubCommand::with_name("run")
.alias("r")
.about("Runs/Tests a single exercise")
.arg(Arg::with_name("file").required(true).index(1))
.arg(Arg::with_name("test").short("t").long("test").help("Run the file as a test")),
)
.get_matches();
if None == matches.subcommand_name() {
println!();
println!(r#" welcome to... "#);
println!(r#" _ _ _ "#);
println!(r#" _ __ _ _ ___| |_| (_)_ __ __ _ ___ "#);
println!(r#" | '__| | | / __| __| | | '_ \ / _` / __| "#);
println!(r#" | | | |_| \__ \ |_| | | | | | (_| \__ \ "#);
println!(r#" |_| \__,_|___/\__|_|_|_| |_|\__, |___/ "#);
println!(r#" |___/ "#);
println!();
}
if !Path::new("info.toml").exists() {
println!(
"{} must be run from the rustlings directory",
std::env::current_exe().unwrap().to_str().unwrap()
);
println!("Try `cd rustlings/`!");
std::process::exit(1);
}
let toml_str = &fs::read_to_string("info.toml").unwrap();
let exercises = toml::from_str::<ExerciseList>(toml_str).unwrap().exercises;
if let Some(ref matches) = matches.subcommand_matches("run") {
let filename = matches.value_of("file").unwrap_or_else(|| {
println!("Please supply a file name!");
std::process::exit(1);
});
let matching_exercise = |e: &&Exercise| {
Path::new(filename)
.canonicalize()
.map(|p| p.ends_with(&e.path))
.unwrap_or(false)
};
let exercise = exercises.iter().find(matching_exercise).unwrap_or_else(|| {
println!("No exercise found for your file name!");
std::process::exit(1)
});
run(&exercise).unwrap_or_else(|_| std::process::exit(1));
}
if matches.subcommand_matches("verify").is_some() {
verify(&exercises).unwrap_or_else(|_| std::process::exit(1));
}
if matches.subcommand_matches("watch").is_some() {
watch(&exercises).unwrap();
}
if matches.subcommand_name().is_none() {
let text = fs::read_to_string("default_out.txt").unwrap();
println!("{}", text);
}
}
fn watch(exercises: &[Exercise]) -> notify::Result<()> {
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2))?;
watcher.watch(Path::new("./exercises"), RecursiveMode::Recursive)?;
let _ignored = verify(exercises.iter());
loop {
match rx.recv() {
Ok(event) => match event {
DebouncedEvent::Create(b) | DebouncedEvent::Chmod(b) | DebouncedEvent::Write(b) => {
if b.extension() == Some(OsStr::new("rs")) && b.exists() {
println!("----------**********----------\n");
let filepath = b.as_path().canonicalize().unwrap();
let exercise = exercises
.iter()
.skip_while(|e| !filepath.ends_with(&e.path));
let _ignored = verify(exercise);
}
}
_ => {}
},
Err(e) => println!("watch error: {:?}", e),
}
}
}
|
#![deny(warnings)]
#![feature(phase, slicing_syntax)]
#[phase(plugin, link)] extern crate log;
extern crate criterion;
extern crate serialize;
use criterion::Criterion;
use std::io::{USER_DIR, Command, File, fs};
use std::str;
use executable::Executable;
use problem::Problem;
use solution::Solution;
mod compiler;
mod executable;
mod interpreter;
mod language;
mod problem;
mod solution;
fn lscpu() {
match Command::new("lscpu").output() {
Err(_) => panic!("Couldn't spawn `lscpu`"),
Ok(po) => match str::from_utf8(po.output[]) {
None => panic!("Couldn't parse the output of `lscpu`"),
Some(output) => {
let s = output.lines().filter(|line| {
!line.starts_with("CPU MHz:")
}).collect::<Vec<_>>().connect("\n");
match File::create(&Path::new("lscpu")).write_str(s.as_slice()) {
Err(_) => panic!("Couln't write to the cpu file"),
Ok(_) => {},
}
}
},
}
}
fn main() {
let languages = language::all();
lscpu();
for problem in fs::readdir(&Path::new("problems")).unwrap().into_iter().filter_map(|dir| {
Problem::new(dir)
}) {
let mut processed = 0u;
for solution in languages.iter().filter_map(|language| {
Solution::new(language, &problem)
}) {
match Executable::new(&solution) {
Some(executable) => if executable.validate() == Some(true) {
executable.bench();
processed += 1;
},
None => {},
}
}
if processed > 0 {
Criterion::default().summarize(problem.id());
let pid = problem.id();
let summary_plot =
Path::new(format!(".criterion/{}/summary/new/violin_plot.svg", pid));
let plots_dir = Path::new("plots");
fs::mkdir_recursive(&plots_dir, USER_DIR).ok().
expect("Couldn't create the plots directory");
fs::copy(&summary_plot, &plots_dir.join(pid).with_extension("svg")).ok().
expect("Couldn't copy summary plot to the plots directory");
}
}
}
fix(main): `from_utf8` now returns `Result` instead of `Option`
#![deny(warnings)]
#![feature(phase, slicing_syntax)]
#[phase(plugin, link)] extern crate log;
extern crate criterion;
extern crate serialize;
use criterion::Criterion;
use std::io::{USER_DIR, Command, File, fs};
use std::str;
use executable::Executable;
use problem::Problem;
use solution::Solution;
mod compiler;
mod executable;
mod interpreter;
mod language;
mod problem;
mod solution;
fn lscpu() {
match Command::new("lscpu").output() {
Err(_) => panic!("Couldn't spawn `lscpu`"),
Ok(po) => match str::from_utf8(po.output[]) {
Err(_) => panic!("Couldn't parse the output of `lscpu`"),
Ok(output) => {
let s = output.lines().filter(|line| {
!line.starts_with("CPU MHz:")
}).collect::<Vec<_>>().connect("\n");
match File::create(&Path::new("lscpu")).write_str(s.as_slice()) {
Err(_) => panic!("Couln't write to the cpu file"),
Ok(_) => {},
}
}
},
}
}
fn main() {
let languages = language::all();
lscpu();
for problem in fs::readdir(&Path::new("problems")).unwrap().into_iter().filter_map(|dir| {
Problem::new(dir)
}) {
let mut processed = 0u;
for solution in languages.iter().filter_map(|language| {
Solution::new(language, &problem)
}) {
match Executable::new(&solution) {
Some(executable) => if executable.validate() == Some(true) {
executable.bench();
processed += 1;
},
None => {},
}
}
if processed > 0 {
Criterion::default().summarize(problem.id());
let pid = problem.id();
let summary_plot =
Path::new(format!(".criterion/{}/summary/new/violin_plot.svg", pid));
let plots_dir = Path::new("plots");
fs::mkdir_recursive(&plots_dir, USER_DIR).ok().
expect("Couldn't create the plots directory");
fs::copy(&summary_plot, &plots_dir.join(pid).with_extension("svg")).ok().
expect("Couldn't copy summary plot to the plots directory");
}
}
}
|
extern crate xml;
extern crate rustc;
use std::os;
use std::io::{Command, File, BufferedReader};
use std::io::process::ProcessExit;
use std::fmt;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::thread::Thread;
use rustc::middle::graph::{Graph, NodeIndex, Node, EdgeIndex, Edge};
use xml::reader::EventReader;
use xml::reader::events::XmlEvent;
struct TaskMessage {
index: NodeIndex,
task: BuildTask
}
impl fmt::Show for TaskMessage {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "index={}, title={}", self .index, self .task.title)
}
}
struct ResultMessage {
index: NodeIndex,
result: Result<BuildResult, String>
}
impl fmt::Show for ResultMessage {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "index={}, result={}", self .index, self .result)
}
}
struct BuildResult {
exit_code: ProcessExit,
}
impl fmt::Show for BuildResult {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "exit_code={}", self .exit_code)
}
}
fn main() {
println!("XGConsole:");
for arg in parse_command_line(os::args()).iter() {
println!(" {}", arg);
}
let (tx_result, rx_result): (Sender<ResultMessage>, Receiver<ResultMessage>) = channel();
let (tx_task, rx_task): (Sender<TaskMessage>, Receiver<TaskMessage>) = channel();
let mutex_rx_task = Arc::new(Mutex::new(rx_task));
for cpu_id in range(0, std::os::num_cpus()) {
let local_rx_task = mutex_rx_task.clone();
let local_tx_result = tx_result .clone();
Thread::spawn(move || {
loop {
let message: TaskMessage;
match local_rx_task.lock().recv_opt() {
Ok(v) => {message = v;
}
Err(_) => {break;}
}
println!("{}: {}", cpu_id, message);
local_tx_result.send(execute_task(message));
}
}).detach();
}
let args = os::args();
let mut path;
if args.len() <= 1 {
path = Path::new(&args[0]).dir_path();
path.push("../tests/graph-parser.xml");
} else {
path =Path::new(&args[1]);
}
println!("Example path: {}", path.display());
match xg_parse(&path) {
Ok(graph) => {
execute_graph(&graph, tx_task, rx_result);
}
Err(msg) =>{panic! (msg);}
}
println!("done");
}
fn validate_graph(graph: Graph<BuildTask, ()>) -> Result<Graph<BuildTask, ()>, String> {
let mut completed:Vec<bool> = vec![];
let mut queue:Vec<NodeIndex> = vec![];
graph. each_node(|index: NodeIndex, _:&Node<BuildTask>|->bool {
completed.push(false);
queue.push(index);
true
});
let mut count:uint = 0;
let mut i:uint = 0;
while i < queue.len() {
let index = queue[i];
if (!completed[index.node_id()]) && (is_ready(&graph, &completed, &index)) {
completed[index.node_id()] = true;
graph.each_incoming_edge(index, |_:EdgeIndex, edge:&Edge<()>| -> bool {
queue.push(edge.source());
true
});
count += 1;
if count ==completed.len() {
return Ok(graph);
}
}
i = i + 1;
}
return Err("Found cycles in build dependencies.".to_string());
}
fn execute_task(message: TaskMessage) -> ResultMessage {
println!("{}", message.task.title);
match Command::new(message.task.exec)
.args(message.task.args.as_slice())
.cwd(&Path::new(&message.task.working_dir))
.output(){
Ok(output) => {
ResultMessage {
index: message.index,
result: Ok(BuildResult {
exit_code: output.status
})
}}
Err(e) => {
ResultMessage {
index: message.index,
result: Err(format!("Failed to start process: {}", e))}
}
}
}
fn execute_graph(graph: &Graph<BuildTask, ()>, tx_task: Sender<TaskMessage>, rx_result: Receiver<ResultMessage>) {
let mut completed:Vec<bool> = vec![];
graph. each_node(|index: NodeIndex, node:&Node<BuildTask>|->bool {
let mut has_edges = false;
graph.each_outgoing_edge(index, |_:EdgeIndex, _:&Edge<()>| -> bool {
has_edges = true;
false
});
if !has_edges {
tx_task.send(TaskMessage{
index: index,
task: node.data.clone(),
}) ;
}
completed.push(false);
true
});
let mut count:uint = 0;
for message in rx_result.iter() {
assert!(!completed[message.index.node_id()]);
completed[message.index.node_id()] = true;
graph.each_incoming_edge(message.index, |_:EdgeIndex, edge:&Edge<()>| -> bool {
let source = edge.source();
if !completed[source.node_id()] {
if is_ready(graph, &completed, &source) {
tx_task.send(TaskMessage{
index: source,
task: graph.node(source).data.clone(),
}) ;
}
}
true
});
println!("R: {}", message);
count += 1;
if count ==completed.len() {
break;
}
}
}
fn is_ready(graph: &Graph<BuildTask, ()>, completed: &Vec<bool>, source: &NodeIndex) -> bool {
let mut ready = true;
graph.each_outgoing_edge(*source, |_:EdgeIndex, deps:&Edge<()>| -> bool {
if !completed[deps.target().node_id()]{
ready = false;
false
} else {
true
}
});
ready
}
fn parse_command_line(args: Vec<String>) -> Vec<String> {
let mut result: Vec<String> = Vec::new();
for arg in args.slice(1, args.len()).iter() {
result.push(arg.clone());
}
result
}
struct BuildTask {
title: String,
exec: String,
args: Vec<String>,
working_dir: String,
}
impl Clone for BuildTask {
fn clone(& self) -> BuildTask {
BuildTask {
title: self.title.clone(),
exec: self.exec.clone(),
args: self.args.clone(),
working_dir: self .working_dir.clone(),
}
}
}
struct XgTask {
id: Option<String>,
title: Option<String>,
tool: String,
working_dir: String,
depends_on: Vec<String>,
}
impl fmt::Show for XgTask {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id={}, title={}, tool={}, working_dir={}, depends_on={}", self .id, self .title, self .tool, self .working_dir, self .depends_on)
}
}
struct XgTool {
id: String,
exec: String,
args: String,
output: Option<String>,
}
impl fmt::Show for XgTool {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id={}, exec={}", self .id, self .exec)
}
}
fn xg_parse(path: &Path) -> Result<Graph<BuildTask, ()>, String> {
let file = File::open(path).unwrap();
let reader = BufferedReader::new(file);
let mut parser = EventReader::new(reader);
let mut tasks:Vec<XgTask> = vec![];
let mut tools:HashMap<String, XgTool> = HashMap::new();
for e in parser.events() {
match e {
XmlEvent::StartElement {name, attributes, ..} => {
match name.local_name.as_slice() {
"Task" =>
{
match xg_parse_task(&attributes) {
Ok(task) =>
{
tasks.push(task);
}
Err(msg) =>
{
return Err(msg);
}
};
}
"Tool" =>
{
match xg_parse_tool(&attributes) {
Ok(tool) =>
{
tools.insert(tool.id.to_string(), tool);
}
Err(msg) =>
{
return Err(msg);
}
};
}
_ => {}
}
}
XmlEvent::EndElement{..} => {
}
_ => {
}
}
}
xg_parse_create_graph(&tasks, &tools)
}
fn xg_parse_create_graph(tasks:&Vec<XgTask>, tools:&HashMap<String, XgTool>) -> Result<Graph<BuildTask, ()>, String> {
let mut graph: Graph<BuildTask, ()> = Graph::new();
let mut nodes: Vec<NodeIndex> = vec![];
let mut task_refs: HashMap<&str, NodeIndex> = HashMap::new();
for task in tasks.iter() {
match tools.get(task.tool.as_slice()){
Some(tool) => {
let node = graph.add_node(BuildTask {
title: match task.title {
Some(ref v) => {v.clone()}
_ => {
match tool.output {
Some(ref v) => {v.clone()}
_ => "".to_string()
}
}
},
exec: tool.exec.clone(),
args: cmd_parse(tool.args.as_slice()),
working_dir : task.working_dir.clone(),
});
match task.id {
Some(ref v) => {
task_refs.insert(v.as_slice(), node);
}
_ => {}
}
nodes.push(node);
}
_ => {
return Err(format!("Can't find tool with id: {}", task.tool));
}
}
}
for idx in range(0, nodes.len()) {
let ref task = tasks[idx];
let ref node = nodes[idx];
for id in task.depends_on.iter() {
let dep_node = task_refs.get(id.as_slice());
match dep_node {
Some(v) => {
graph.add_edge(*node, *v, ());
}
_ => {
return Err(format!("Can't find task for dependency with id: {}", id));
}
}
}
}
validate_graph(graph)
}
fn map_attributes (attributes: &Vec<xml::attribute::OwnedAttribute>) -> HashMap< String, String> {
let mut attrs: HashMap<String, String> = HashMap::new();
for attr in attributes.iter() {
attrs.insert(attr.name.local_name.clone(), attr.value.clone());
}
attrs
}
fn xg_parse_task (attributes: & Vec<xml::attribute::OwnedAttribute>)->Result<XgTask, String> {
let mut attrs = map_attributes(attributes);
// Tool
let tool: String;
match attrs.remove("Tool") {
Some(v) => {tool = v;}
_ => {return Err("Invalid task data: attribute @Tool not found.".to_string());}
}
// WorkingDir
let working_dir: String;
match attrs.remove("WorkingDir") {
Some(v) => {working_dir = v;}
_ => {return Err("Invalid task data: attribute @WorkingDir not found.".to_string());}
}
// DependsOn
let mut depends_on : Vec<String> = vec![];
match attrs.remove("DependsOn") {
Some(v) =>
{
for item in v.split_str(";").collect::<Vec<&str>>().iter() {
depends_on.push(item.to_string())
}
}
_ =>
{
}
};
Ok(XgTask {
id: attrs.remove("Name"),
title: attrs.remove("Caption"),
tool: tool,
working_dir: working_dir,
depends_on: depends_on,
})
}
fn xg_parse_tool (attributes: &Vec<xml::attribute::OwnedAttribute>)->Result<XgTool, String> {
let mut attrs = map_attributes(attributes);
// Name
let id: String;
match attrs.remove("Name") {
Some(v) => {id = v;}
_ => {return Err("Invalid task data: attribute @Name not found.".to_string());}
}
// Path
let exec: String;
match attrs.remove("Path") {
Some(v) => {exec = v;}
_ => {return Err("Invalid task data: attribute @Name not found.".to_string());}
}
Ok(XgTool {
id: id,
exec: exec,
output: attrs.remove("OutputPrefix"),
args: match attrs.remove("Params") {
Some(v) => {v}
_ => {"".to_string()}
},
})
}
fn cmd_parse(cmd: &str) -> Vec<String> {
let mut args: Vec<String> = vec![];
let mut arg: String = "".to_string();
let mut escape = false;
let mut quote = false;
let mut data = false;
for c in cmd.chars() {
match c {
' ' | '\t' => {
if escape {
arg.push('\\');
escape = false;
}
if quote {
arg.push(c);
data = true;
} else if data {
args.push(arg);
arg = "".to_string();
data = false;
}
}
'\\' => {
if escape {
arg.push(c);
}
data = true;
escape = !escape;
}
'"' => {
if escape {
arg.push(c);
escape = false;
} else {
quote = !quote;
}
data = true;
}
_ => {
if escape {
arg.push('\\');
escape = false;
}
arg.push(c);
data = true;
}
}
}
if data {
args.push(arg);
}
return args;
}
#[test]
fn test_cmd_parse_1() {
assert_eq!(cmd_parse("\"abc\" d e"), ["abc", "d", "e"]);
}
#[test]
fn test_cmd_parse_2() {
assert_eq!(cmd_parse(" \"abc\" d e "), ["abc", "d", "e"]);
}
#[test]
fn test_cmd_parse_3() {
assert_eq!(cmd_parse("\"\" \"abc\" d e \"\""), ["", "abc", "d", "e", ""]);
}
#[test]
fn test_cmd_parse_4() {
assert_eq!(cmd_parse("a\\\\\\\\b d\"e f\"g h"), ["a\\\\b", "de fg", "h"]);
}
#[test]
fn test_cmd_parse_5() {
assert_eq!(cmd_parse("a\\\\\\\"b c d"), ["a\\\"b", "c", "d"]);
}
#[test]
fn test_cmd_parse_6() {
assert_eq!(cmd_parse("a\\\\\\\\\"b c\" d e"), ["a\\\\b c", "d", "e"]);
}
#[test]
fn test_cmd_parse_7() {
assert_eq!(cmd_parse("C:\\Windows\\System32 d e"), ["C:\\Windows\\System32", "d", "e"]);
}
Add some error handling.
extern crate xml;
extern crate rustc;
use std::os;
use std::io::{Command, File, BufferedReader};
use std::io::process::ProcessExit;
use std::fmt;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::thread::Thread;
use rustc::middle::graph::{Graph, NodeIndex, Node, EdgeIndex, Edge};
use xml::reader::EventReader;
use xml::reader::events::XmlEvent;
struct TaskMessage {
index: NodeIndex,
task: BuildTask
}
impl fmt::Show for TaskMessage {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "index={}, title={}", self .index, self .task.title)
}
}
struct ResultMessage {
index: NodeIndex,
result: Result<BuildResult, String>
}
impl fmt::Show for ResultMessage {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "index={}, result={}", self .index, self .result)
}
}
struct BuildResult {
exit_code: ProcessExit,
}
impl fmt::Show for BuildResult {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "exit_code={}", self .exit_code)
}
}
fn main() {
println!("XGConsole:");
for arg in parse_command_line(os::args()).iter() {
println!(" {}", arg);
}
let (tx_result, rx_result): (Sender<ResultMessage>, Receiver<ResultMessage>) = channel();
let (tx_task, rx_task): (Sender<TaskMessage>, Receiver<TaskMessage>) = channel();
let mutex_rx_task = Arc::new(Mutex::new(rx_task));
for cpu_id in range(0, std::os::num_cpus()) {
let local_rx_task = mutex_rx_task.clone();
let local_tx_result = tx_result .clone();
Thread::spawn(move || {
loop {
let message: TaskMessage;
match local_rx_task.lock().recv_opt() {
Ok(v) => {message = v;
}
Err(_) => {
break;
}
}
println!("{}: {}", cpu_id, message);
local_tx_result.send(execute_task(message));
}
}).detach();
}
free(tx_result);
let args = os::args();
let mut path;
if args.len() <= 1 {
path = Path::new(&args[0]).dir_path();
path.push("../tests/graph-parser.xml");
} else {
path =Path::new(&args[1]);
}
println!("Example path: {}", path.display());
match xg_parse(&path) {
Ok(graph) => {
execute_graph(&graph, tx_task, rx_result);
}
Err(msg) =>{panic! (msg);}
}
println!("done");
}
fn validate_graph(graph: Graph<BuildTask, ()>) -> Result<Graph<BuildTask, ()>, String> {
let mut completed:Vec<bool> = vec![];
let mut queue:Vec<NodeIndex> = vec![];
graph. each_node(|index: NodeIndex, _:&Node<BuildTask>|->bool {
completed.push(false);
queue.push(index);
true
});
let mut count:uint = 0;
let mut i:uint = 0;
while i < queue.len() {
let index = queue[i];
if (!completed[index.node_id()]) && (is_ready(&graph, &completed, &index)) {
completed[index.node_id()] = true;
graph.each_incoming_edge(index, |_:EdgeIndex, edge:&Edge<()>| -> bool {
queue.push(edge.source());
true
});
count += 1;
if count ==completed.len() {
return Ok(graph);
}
}
i = i + 1;
}
return Err("Found cycles in build dependencies.".to_string());
}
fn execute_task(message: TaskMessage) -> ResultMessage {
println!("{}", message.task.title);
println!("{} {} {}", message.task.working_dir, message.task.exec, message.task.args);
match Command::new(message.task.exec)
.args(message.task.args.as_slice())
.cwd(&Path::new(&message.task.working_dir))
.output(){
Ok(output) => {
println!("stdout: {}", String::from_utf8_lossy(output.output.as_slice()));
println!("stderr: {}", String::from_utf8_lossy(output.error.as_slice()));
ResultMessage {
index: message.index,
result: Ok(BuildResult {
exit_code: output.status
})
}}
Err(e) => {
ResultMessage {
index: message.index,
result: Err(format!("Failed to start process: {}", e))}
}
}
}
fn execute_graph(graph: &Graph<BuildTask, ()>, tx_task: Sender<TaskMessage>, rx_result: Receiver<ResultMessage>) {
let mut completed:Vec<bool> = vec![];
graph. each_node(|index: NodeIndex, node:&Node<BuildTask>|->bool {
let mut has_edges = false;
graph.each_outgoing_edge(index, |_:EdgeIndex, _:&Edge<()>| -> bool {
has_edges = true;
false
});
if !has_edges {
tx_task.send(TaskMessage{
index: index,
task: node.data.clone(),
}) ;
}
completed.push(false);
true
});
let mut count:uint = 0;
for message in rx_result.iter() {
assert!(!completed[message.index.node_id()]);
println!("R: {}", message);
match message.result {
Ok (result) => {
if !result.exit_code.success() {
break;
}
completed[message.index.node_id()] = true;
graph.each_incoming_edge(message.index, |_:EdgeIndex, edge:&Edge<()>| -> bool {
let source = edge.source();
if !completed[source.node_id()] {
if is_ready(graph, &completed, &source) {
tx_task.send(TaskMessage{
index: source,
task: graph.node(source).data.clone(),
}) ;
}
}
true
});
}
Err (e) => {
println!("{}", e);
break;
}
}
count += 1;
if count ==completed.len() {
break;
}
}
free(tx_task);
for message in rx_result.iter() {
}
}
fn free<T>(_:T) {
}
fn is_ready(graph: &Graph<BuildTask, ()>, completed: &Vec<bool>, source: &NodeIndex) -> bool {
let mut ready = true;
graph.each_outgoing_edge(*source, |_:EdgeIndex, deps:&Edge<()>| -> bool {
if !completed[deps.target().node_id()]{
ready = false;
false
} else {
true
}
});
ready
}
fn parse_command_line(args: Vec<String>) -> Vec<String> {
let mut result: Vec<String> = Vec::new();
for arg in args.slice(1, args.len()).iter() {
result.push(arg.clone());
}
result
}
struct BuildTask {
title: String,
exec: String,
args: Vec<String>,
working_dir: String,
}
impl Clone for BuildTask {
fn clone(& self) -> BuildTask {
BuildTask {
title: self.title.clone(),
exec: self.exec.clone(),
args: self.args.clone(),
working_dir: self .working_dir.clone(),
}
}
}
struct XgTask {
id: Option<String>,
title: Option<String>,
tool: String,
working_dir: String,
depends_on: Vec<String>,
}
impl fmt::Show for XgTask {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id={}, title={}, tool={}, working_dir={}, depends_on={}", self .id, self .title, self .tool, self .working_dir, self .depends_on)
}
}
struct XgTool {
id: String,
exec: String,
args: String,
output: Option<String>,
}
impl fmt::Show for XgTool {
fn fmt(& self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id={}, exec={}", self .id, self .exec)
}
}
fn xg_parse(path: &Path) -> Result<Graph<BuildTask, ()>, String> {
let file = File::open(path).unwrap();
let reader = BufferedReader::new(file);
let mut parser = EventReader::new(reader);
let mut tasks:Vec<XgTask> = vec![];
let mut tools:HashMap<String, XgTool> = HashMap::new();
for e in parser.events() {
match e {
XmlEvent::StartElement {name, attributes, ..} => {
match name.local_name.as_slice() {
"Task" =>
{
match xg_parse_task(&attributes) {
Ok(task) =>
{
tasks.push(task);
}
Err(msg) =>
{
return Err(msg);
}
};
}
"Tool" =>
{
match xg_parse_tool(&attributes) {
Ok(tool) =>
{
tools.insert(tool.id.to_string(), tool);
}
Err(msg) =>
{
return Err(msg);
}
};
}
_ => {}
}
}
XmlEvent::EndElement{..} => {
}
_ => {
}
}
}
xg_parse_create_graph(&tasks, &tools)
}
fn xg_parse_create_graph(tasks:&Vec<XgTask>, tools:&HashMap<String, XgTool>) -> Result<Graph<BuildTask, ()>, String> {
let mut graph: Graph<BuildTask, ()> = Graph::new();
let mut nodes: Vec<NodeIndex> = vec![];
let mut task_refs: HashMap<&str, NodeIndex> = HashMap::new();
for task in tasks.iter() {
match tools.get(task.tool.as_slice()){
Some(tool) => {
let node = graph.add_node(BuildTask {
title: match task.title {
Some(ref v) => {v.clone()}
_ => {
match tool.output {
Some(ref v) => {v.clone()}
_ => "".to_string()
}
}
},
exec: tool.exec.clone(),
args: cmd_parse(tool.args.as_slice()),
working_dir : task.working_dir.clone(),
});
match task.id {
Some(ref v) => {
task_refs.insert(v.as_slice(), node);
}
_ => {}
}
nodes.push(node);
}
_ => {
return Err(format!("Can't find tool with id: {}", task.tool));
}
}
}
for idx in range(0, nodes.len()) {
let ref task = tasks[idx];
let ref node = nodes[idx];
for id in task.depends_on.iter() {
let dep_node = task_refs.get(id.as_slice());
match dep_node {
Some(v) => {
graph.add_edge(*node, *v, ());
}
_ => {
return Err(format!("Can't find task for dependency with id: {}", id));
}
}
}
}
validate_graph(graph)
}
fn map_attributes (attributes: &Vec<xml::attribute::OwnedAttribute>) -> HashMap< String, String> {
let mut attrs: HashMap<String, String> = HashMap::new();
for attr in attributes.iter() {
attrs.insert(attr.name.local_name.clone(), attr.value.clone());
}
attrs
}
fn xg_parse_task (attributes: & Vec<xml::attribute::OwnedAttribute>)->Result<XgTask, String> {
let mut attrs = map_attributes(attributes);
// Tool
let tool: String;
match attrs.remove("Tool") {
Some(v) => {tool = v;}
_ => {return Err("Invalid task data: attribute @Tool not found.".to_string());}
}
// WorkingDir
let working_dir: String;
match attrs.remove("WorkingDir") {
Some(v) => {working_dir = v;}
_ => {return Err("Invalid task data: attribute @WorkingDir not found.".to_string());}
}
// DependsOn
let mut depends_on : Vec<String> = vec![];
match attrs.remove("DependsOn") {
Some(v) =>
{
for item in v.split_str(";").collect::<Vec<&str>>().iter() {
depends_on.push(item.to_string())
}
}
_ =>
{
}
};
Ok(XgTask {
id: attrs.remove("Name"),
title: attrs.remove("Caption"),
tool: tool,
working_dir: working_dir,
depends_on: depends_on,
})
}
fn xg_parse_tool (attributes: &Vec<xml::attribute::OwnedAttribute>)->Result<XgTool, String> {
let mut attrs = map_attributes(attributes);
// Name
let id: String;
match attrs.remove("Name") {
Some(v) => {id = v;}
_ => {return Err("Invalid task data: attribute @Name not found.".to_string());}
}
// Path
let exec: String;
match attrs.remove("Path") {
Some(v) => {exec = v;}
_ => {return Err("Invalid task data: attribute @Name not found.".to_string());}
}
Ok(XgTool {
id: id,
exec: exec,
output: attrs.remove("OutputPrefix"),
args: match attrs.remove("Params") {
Some(v) => {v}
_ => {"".to_string()}
},
})
}
fn cmd_parse(cmd: &str) -> Vec<String> {
let mut args: Vec<String> = vec![];
let mut arg: String = "".to_string();
let mut escape = false;
let mut quote = false;
let mut data = false;
for c in cmd.chars() {
match c {
' ' | '\t' => {
if escape {
arg.push('\\');
escape = false;
}
if quote {
arg.push(c);
data = true;
} else if data {
args.push(arg);
arg = "".to_string();
data = false;
}
}
'\\' => {
if escape {
arg.push(c);
}
data = true;
escape = !escape;
}
'"' => {
if escape {
arg.push(c);
escape = false;
} else {
quote = !quote;
}
data = true;
}
_ => {
if escape {
arg.push('\\');
escape = false;
}
arg.push(c);
data = true;
}
}
}
if data {
args.push(arg);
}
return args;
}
#[test]
fn test_cmd_parse_1() {
assert_eq!(cmd_parse("\"abc\" d e"), ["abc", "d", "e"]);
}
#[test]
fn test_cmd_parse_2() {
assert_eq!(cmd_parse(" \"abc\" d e "), ["abc", "d", "e"]);
}
#[test]
fn test_cmd_parse_3() {
assert_eq!(cmd_parse("\"\" \"abc\" d e \"\""), ["", "abc", "d", "e", ""]);
}
#[test]
fn test_cmd_parse_4() {
assert_eq!(cmd_parse("a\\\\\\\\b d\"e f\"g h"), ["a\\\\b", "de fg", "h"]);
}
#[test]
fn test_cmd_parse_5() {
assert_eq!(cmd_parse("a\\\\\\\"b c d"), ["a\\\"b", "c", "d"]);
}
#[test]
fn test_cmd_parse_6() {
assert_eq!(cmd_parse("a\\\\\\\\\"b c\" d e"), ["a\\\\b c", "d", "e"]);
}
#[test]
fn test_cmd_parse_7() {
assert_eq!(cmd_parse("C:\\Windows\\System32 d e"), ["C:\\Windows\\System32", "d", "e"]);
}
|
// Copyright 2015 Corey Farwell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::env;
use std::io::{Write, Read};
use std::process::{Command, Stdio};
use tiny_http::{Header, Response};
extern crate crates_index;
extern crate tiny_http;
static INDEX_LOCAL_PATH: &'static str = "crates.io-index";
fn build_dot(crate_name: &str, dep_map: &HashMap<String, Vec<String>>) -> Vec<u8> {
let mut crate_names = vec![crate_name];
let mut dot = String::new();
dot.push_str("digraph graphname {");
// Which dependencies we've already seen
let mut seen_set = HashSet::new();
while let Some(crate_name) = crate_names.pop() {
if seen_set.contains(crate_name as &str) {
continue;
}
seen_set.insert(crate_name);
for crate_dep in dep_map.get(crate_name).unwrap() {
dot.push_str(&format!("{} -> {};", crate_name.replace("-", "_"), crate_dep.replace("-", "_")));
if !seen_set.contains(crate_dep as &str) {
crate_names.push(crate_dep);
}
}
}
dot.push_str("}");
let child = Command::new("dot").arg("-Tpng").stdin(Stdio::piped()).stdout(Stdio::piped()).spawn().unwrap();
{
child.stdin.unwrap().write_all(dot.as_bytes()).unwrap();
}
let mut ret = vec![];
child.stdout.unwrap().read_to_end(&mut ret).unwrap();
ret
}
fn main() {
let index = crates_index::CratesIndex::new(INDEX_LOCAL_PATH.into());
if !index.exists() {
println!("Cloning crates.io-index");
index.clone_index();
}
let dep_map = index.dependency_map();
let port = match env::var("PORT") {
Ok(p) => p.parse::<u16>().unwrap(),
Err(..) => 8000,
};
let server = tiny_http::ServerBuilder::new().with_port(port).build().unwrap();
println!("Server listening on port {}", port);
for req in server.incoming_requests() {
let response = {
let crate_name = req.get_url().trim_left_matches("/");
if dep_map.get(crate_name).is_some() {
let data = build_dot(crate_name, &dep_map);
let content_type_header = "Content-Type: image/png".parse::<Header>().unwrap();
Response::from_data(data).with_header(content_type_header)
} else {
Response::from_string("could not find crate")
}
};
req.respond(response);
}
}
Add status code to error response
// Copyright 2015 Corey Farwell
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use std::env;
use std::io::{Write, Read};
use std::process::{Command, Stdio};
use tiny_http::{Header, Response};
extern crate crates_index;
extern crate tiny_http;
static INDEX_LOCAL_PATH: &'static str = "crates.io-index";
fn build_dot(crate_name: &str, dep_map: &HashMap<String, Vec<String>>) -> Vec<u8> {
let mut crate_names = vec![crate_name];
let mut dot = String::new();
dot.push_str("digraph graphname {");
// Which dependencies we've already seen
let mut seen_set = HashSet::new();
while let Some(crate_name) = crate_names.pop() {
if seen_set.contains(crate_name as &str) {
continue;
}
seen_set.insert(crate_name);
for crate_dep in dep_map.get(crate_name).unwrap() {
dot.push_str(&format!("{} -> {};", crate_name.replace("-", "_"), crate_dep.replace("-", "_")));
if !seen_set.contains(crate_dep as &str) {
crate_names.push(crate_dep);
}
}
}
dot.push_str("}");
let child = Command::new("dot").arg("-Tpng").stdin(Stdio::piped()).stdout(Stdio::piped()).spawn().unwrap();
{
child.stdin.unwrap().write_all(dot.as_bytes()).unwrap();
}
let mut ret = vec![];
child.stdout.unwrap().read_to_end(&mut ret).unwrap();
ret
}
fn main() {
let index = crates_index::CratesIndex::new(INDEX_LOCAL_PATH.into());
if !index.exists() {
println!("Cloning crates.io-index");
index.clone_index();
}
let dep_map = index.dependency_map();
let port = match env::var("PORT") {
Ok(p) => p.parse::<u16>().unwrap(),
Err(..) => 8000,
};
let server = tiny_http::ServerBuilder::new().with_port(port).build().unwrap();
println!("Server listening on port {}", port);
for req in server.incoming_requests() {
let response = {
let crate_name = req.get_url().trim_left_matches("/");
if dep_map.get(crate_name).is_some() {
let data = build_dot(crate_name, &dep_map);
let content_type_header = "Content-Type: image/png".parse::<Header>().unwrap();
Response::from_data(data).with_header(content_type_header)
} else {
Response::from_string("could not find crate").with_status_code(400)
}
};
req.respond(response);
}
}
|
#![feature(std_misc)]
#[macro_use]
#[macro_use]
extern crate clap;
extern crate yaml_rust;
extern crate time;
mod models;
mod parser;
mod mayberef;
use clap::{Arg, App};
use std::fs::{File, read_dir};
use std::io::{BufReader, BufRead, Read, stdin};
use std::path::{Path};
use std::collections::HashMap;
use models::*;
fn get_line() -> String {
let stdin = stdin();
let mut buffer = String::new();
stdin.read_line(&mut buffer).unwrap();
buffer
}
fn find_most_common_words(corpus: &Path) -> Vec<String> {
let words = files(corpus).flat_map(|file| read_words(file));
let mut word_counts = HashMap::new();
for word in words {
*word_counts.entry(word).or_insert(0) += 1;
}
let mut counts: Vec<_> = word_counts.into_iter().collect();
counts.sort_by(|a, b| b.1.cmp(&a.1));
// let mut out = File::create(&Path::new(outfile)).unwrap();
// for &(ref word, count) in &counts {
// out.write_all(format!("{}, {}\n", &word, count).as_bytes()).unwrap();
// }
counts.into_iter().map(|x| x.0).collect()
}
fn load_most_common_words(filename: &str, num: usize) -> Vec<String> {
let file = File::open(&Path::new(filename)).unwrap();
let reader = BufReader::new(file);
reader.lines().map(|line|
line.unwrap().split(',').next().unwrap().to_string()
)
.take(num)
.collect()
}
fn sentences<T: Read + 'static>(reader: BufReader<T>) -> Box<Iterator<Item = String>> {
Box::new(reader.split('.' as u8)
.filter_map(|v| {
let lowercase = v.unwrap();
String::from_utf8(lowercase).ok().map(|s| s.to_lowercase())
}))
}
fn read_words<R: BufRead + 'static>(reader: R) -> Box<Iterator<Item = String>> {
Box::new(reader.lines()
.filter_map(|line| line.ok())
.flat_map(|line| {
line.split(|c| match c {
'a'...'z' | 'A'...'Z' => false,
_ => true,
})
.filter(|word| !word.is_empty())
.map(|word| word.to_lowercase())
.collect::<Vec<_>>()
.into_iter()
}))
}
fn files(path: &Path) -> Box<Iterator<Item = BufReader<File>>> {
Box::new(read_dir(path)
.unwrap()
.map(|path| path.unwrap().path())
.filter(|path| path.to_str().unwrap().ends_with(".txt"))
.map(|path| {
let file = File::open(&path).unwrap();
BufReader::new(file)
}))
}
fn create_model(corpus: &Path, words: Vec<String>) -> LanguageModelBuilder {
let start_time = time::get_time();
let mut builder = LanguageModelBuilder::new(10, words);
let mut num_words = 0;
for sentence in files(corpus).flat_map(sentences) {
let mut acc = builder.new_sentence();
for word in sentence.split(|c| match c {
'a'...'z' => false,
_ => true,
}).filter(|w| !w.is_empty())
{
num_words += 1;
acc.add_word(word);
if num_words % 10_000_000 == 0 {
println!("Loaded {} million words in {} seconds",
num_words / 1_000_000,
time::get_time().sec - start_time.sec);
}
}
}
let end_time = time::get_time();
println!("Model built in {}s", end_time.sec - start_time.sec);
builder
}
fn main() {
let matches = App::new("gauntlet")
.version("0.0.1")
.author("James Moughan <jamougha@gmail.com>")
.about("Implementation of GloVe algorithm")
.arg(Arg::with_name("CORPUS")
.short("c")
.long("corpus")
.help("Sets a directory to search for the corpus")
.takes_value(true))
.arg(Arg::with_name("LOAD")
.short("l")
.long("load")
.help("Loads a pre-saved language model")
.takes_value(true))
.arg(Arg::with_name("SAVE")
.short("s")
.long("save")
.help("Generates a model from the corpus specified and saves it")
.takes_value(true))
.get_matches();
let (load, save, corpus) = (matches.value_of("LOAD"), matches.value_of("SAVE"), matches.value_of("CORPUS"));
let builder = match (load, save, corpus) {
(Some(l), None, None) => LanguageModelBuilder::load(Path::new(&l)).expect("Couldn't load model"),
(Some(_), _, _) => { println!("You must specify either a model to load or a corpus directory location"); return; }
(_, save, Some(corpus)) => {
let corpus = Path::new(corpus);
let words = find_most_common_words(corpus);
let builder = create_model(&corpus, words);
if let Some(save) = save {
if let Err(e) = builder.save(Path::new(save)) {
println!("Couldn't save model: {}", e);
}
}
builder
}
_ => panic!("what you want?")
};
let start_time = time::get_time();
let model = builder.build();
println!("Model built in {}s", time::get_time().sec - start_time.sec);
loop {
println!("");
let input = get_line();
if input.starts_with(":q") {
break;
}
match parser::parse(input.trim_matches(|c: char| c.is_whitespace()), &model) {
Ok(word_vec) => {
let nearest = model.nearest_words(&word_vec);
println!(" = {:?}", word_vec);
println!("-------------");
for word in nearest.iter().take(20) {
println!("{:?}, {}", word, word_vec.distance(word));
}
}
Err(e) => println!("{:?}", e)
}
}
}
Remove unused pragma
#[macro_use]
#[macro_use]
extern crate clap;
extern crate yaml_rust;
extern crate time;
mod models;
mod parser;
mod mayberef;
use clap::{Arg, App};
use std::fs::{File, read_dir};
use std::io::{BufReader, BufRead, Read, stdin};
use std::path::{Path};
use std::collections::HashMap;
use models::*;
fn get_line() -> String {
let stdin = stdin();
let mut buffer = String::new();
stdin.read_line(&mut buffer).unwrap();
buffer
}
fn find_most_common_words(corpus: &Path) -> Vec<String> {
let words = files(corpus).flat_map(|file| read_words(file));
let mut word_counts = HashMap::new();
for word in words {
*word_counts.entry(word).or_insert(0) += 1;
}
let mut counts: Vec<_> = word_counts.into_iter().collect();
counts.sort_by(|a, b| b.1.cmp(&a.1));
// let mut out = File::create(&Path::new(outfile)).unwrap();
// for &(ref word, count) in &counts {
// out.write_all(format!("{}, {}\n", &word, count).as_bytes()).unwrap();
// }
counts.into_iter().map(|x| x.0).collect()
}
fn load_most_common_words(filename: &str, num: usize) -> Vec<String> {
let file = File::open(&Path::new(filename)).unwrap();
let reader = BufReader::new(file);
reader.lines().map(|line|
line.unwrap().split(',').next().unwrap().to_string()
)
.take(num)
.collect()
}
fn sentences<T: Read + 'static>(reader: BufReader<T>) -> Box<Iterator<Item = String>> {
Box::new(reader.split('.' as u8)
.filter_map(|v| {
let lowercase = v.unwrap();
String::from_utf8(lowercase).ok().map(|s| s.to_lowercase())
}))
}
fn read_words<R: BufRead + 'static>(reader: R) -> Box<Iterator<Item = String>> {
Box::new(reader.lines()
.filter_map(|line| line.ok())
.flat_map(|line| {
line.split(|c| match c {
'a'...'z' | 'A'...'Z' => false,
_ => true,
})
.filter(|word| !word.is_empty())
.map(|word| word.to_lowercase())
.collect::<Vec<_>>()
.into_iter()
}))
}
fn files(path: &Path) -> Box<Iterator<Item = BufReader<File>>> {
Box::new(read_dir(path)
.unwrap()
.map(|path| path.unwrap().path())
.filter(|path| path.to_str().unwrap().ends_with(".txt"))
.map(|path| {
let file = File::open(&path).unwrap();
BufReader::new(file)
}))
}
fn create_model(corpus: &Path, words: Vec<String>) -> LanguageModelBuilder {
let start_time = time::get_time();
let mut builder = LanguageModelBuilder::new(10, words);
let mut num_words = 0;
for sentence in files(corpus).flat_map(sentences) {
let mut acc = builder.new_sentence();
for word in sentence.split(|c| match c {
'a'...'z' => false,
_ => true,
}).filter(|w| !w.is_empty())
{
num_words += 1;
acc.add_word(word);
if num_words % 10_000_000 == 0 {
println!("Loaded {} million words in {} seconds",
num_words / 1_000_000,
time::get_time().sec - start_time.sec);
}
}
}
let end_time = time::get_time();
println!("Model built in {}s", end_time.sec - start_time.sec);
builder
}
fn main() {
let matches = App::new("gauntlet")
.version("0.0.1")
.author("James Moughan <jamougha@gmail.com>")
.about("Implementation of GloVe algorithm")
.arg(Arg::with_name("CORPUS")
.short("c")
.long("corpus")
.help("Sets a directory to search for the corpus")
.takes_value(true))
.arg(Arg::with_name("LOAD")
.short("l")
.long("load")
.help("Loads a pre-saved language model")
.takes_value(true))
.arg(Arg::with_name("SAVE")
.short("s")
.long("save")
.help("Generates a model from the corpus specified and saves it")
.takes_value(true))
.get_matches();
let (load, save, corpus) = (matches.value_of("LOAD"), matches.value_of("SAVE"), matches.value_of("CORPUS"));
let builder = match (load, save, corpus) {
(Some(l), None, None) => LanguageModelBuilder::load(Path::new(&l)).expect("Couldn't load model"),
(Some(_), _, _) => { println!("You must specify either a model to load or a corpus directory location"); return; }
(_, save, Some(corpus)) => {
let corpus = Path::new(corpus);
let words = find_most_common_words(corpus);
let builder = create_model(&corpus, words);
if let Some(save) = save {
if let Err(e) = builder.save(Path::new(save)) {
println!("Couldn't save model: {}", e);
}
}
builder
}
_ => panic!("what you want?")
};
let start_time = time::get_time();
let model = builder.build();
println!("Model built in {}s", time::get_time().sec - start_time.sec);
loop {
println!("");
let input = get_line();
if input.starts_with(":q") {
break;
}
match parser::parse(input.trim_matches(|c: char| c.is_whitespace()), &model) {
Ok(word_vec) => {
let nearest = model.nearest_words(&word_vec);
println!(" = {:?}", word_vec);
println!("-------------");
for word in nearest.iter().take(20) {
println!("{:?}, {}", word, word_vec.distance(word));
}
}
Err(e) => println!("{:?}", e)
}
}
}
|
extern crate hkg;
extern crate rustbox;
use std::default::Default;
use rustbox::{Color, RustBox};
use rustbox::Key;
fn main() {
let rustbox = match RustBox::init(Default::default()) {
Result::Ok(v) => v,
Result::Err(e) => panic!("{}", e),
};
let w = rustbox.width();
let h = rustbox.height();
let title = String::from("高登");
print_header(&rustbox, w, h, title);
// print_body(&rustbox, w, h, 2, 2);
// rustbox.print(1, 23, rustbox::RB_BOLD, Color::White, Color::Black, "Press 'q' to quit.");
let mut offset_y = 2;
let s1 = String::from("紅魔英超睇敢帥 十分之高招");
for (b, c) in s1.chars().enumerate() {
rustbox.print(1,
b + 2,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("{} 0x{:X}", c, c as u32));
}
offset_y = 16;
let s2 = String::from("<<100%成功率>>如何成為成功?香港Youtuber");
let mut s2count = 0;
for (d, c) in s2.chars().enumerate() {
if contains(c) {
rustbox.print(1,
d + offset_y,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("[{:<2}] {:>2} 0x{:X} {}", d + offset_y, c, c as u32, &"YES"));
s2count = s2count + 2;
} else {
rustbox.print(1,
d + offset_y,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("[{:<2}] {:>2} 0x{:X} {}", d + offset_y, c, c as u32, &"NO"));
s2count = s2count + 1;
}
}
let sum = s2.chars().map(|x| if contains(x) { 2 } else { 1 } ).collect::<Vec<u32>>().iter().fold(0, |acc, &x| acc + x);
rustbox.print(1,
45,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("{} {}", sum, s2count));
loop {
rustbox.present();
match rustbox.poll_event(false) {
Ok(rustbox::Event::KeyEvent(key)) => {
match key {
Key::Char('q') => {
break;
}
_ => {}
}
}
Err(e) => panic!("{}", e),
_ => {}
}
}
}
fn contains(c: char) -> bool {
let cjks = vec![(0x4E00..0xA000),
(0x3400..0x4DC0),
(0x20000..0x2A6E0),
(0x2A700..0x2B740),
(0x2B740..0x2B820),
(0xF900..0xFB00),
(0x2F800..0x2FA20),
(0x9FA6..0x9FCC)];
for cjk in cjks {
let h = c as u32;
if cjk.start <= h && h < cjk.end {
return true;
}
}
return false;
}
fn print_header(rustbox: &rustbox::RustBox, width: usize, height: usize, text: String) {
let padding = (width - text.len()) / 2;
let header_bottom = (0..width).map(|i| "─").collect::<Vec<_>>().join("");
rustbox.print(padding,
0,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&text);
rustbox.print(0,
1,
rustbox::RB_BOLD,
Color::Yellow,
Color::Black,
&header_bottom);
}
fn print_body(rustbox: &rustbox::RustBox,
width: usize,
height: usize,
offset_x: usize,
rows: usize) {
let titles = vec!["紅魔英超睇敢帥 十分之高招",
"發覺好多後生仔女搭火車地鐵 有位都唔坐"];
let authors = vec!["電超", "程詠樂"];
for i in (0..rows) {
rustbox.print(0,
i + offset_x,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("{no:>2}|{title:<50}|{author}",
no = i + 1,
title = &titles[i],
author = &authors[i]));
}
}
extract print cjk count function
extern crate hkg;
extern crate rustbox;
use std::default::Default;
use rustbox::{Color, RustBox};
use rustbox::Key;
fn main() {
let rustbox = match RustBox::init(Default::default()) {
Result::Ok(v) => v,
Result::Err(e) => panic!("{}", e),
};
let w = rustbox.width();
let h = rustbox.height();
let title = String::from("高登");
print_header(&rustbox, w, h, title);
// print_body(&rustbox, w, h, 2, 2);
// rustbox.print(1, 23, rustbox::RB_BOLD, Color::White, Color::Black, "Press 'q' to quit.");
print_cjk_count(&rustbox);
loop {
rustbox.present();
match rustbox.poll_event(false) {
Ok(rustbox::Event::KeyEvent(key)) => {
match key {
Key::Char('q') => {
break;
}
_ => {}
}
}
Err(e) => panic!("{}", e),
_ => {}
}
}
}
fn contains(c: char) -> bool {
let cjks = vec![(0x4E00..0xA000),
(0x3400..0x4DC0),
(0x20000..0x2A6E0),
(0x2A700..0x2B740),
(0x2B740..0x2B820),
(0xF900..0xFB00),
(0x2F800..0x2FA20),
(0x9FA6..0x9FCC)];
for cjk in cjks {
let h = c as u32;
if cjk.start <= h && h < cjk.end {
return true;
}
}
return false;
}
fn print_header(rustbox: &rustbox::RustBox, width: usize, height: usize, text: String) {
let padding = (width - text.len()) / 2;
let header_bottom = (0..width).map(|i| "─").collect::<Vec<_>>().join("");
rustbox.print(padding,
0,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&text);
rustbox.print(0,
1,
rustbox::RB_BOLD,
Color::Yellow,
Color::Black,
&header_bottom);
}
fn print_body(rustbox: &rustbox::RustBox,
width: usize,
height: usize,
offset_x: usize,
rows: usize) {
let titles = vec!["紅魔英超睇敢帥 十分之高招",
"發覺好多後生仔女搭火車地鐵 有位都唔坐"];
let authors = vec!["電超", "程詠樂"];
for i in (0..rows) {
rustbox.print(0,
i + offset_x,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("{no:>2}|{title:<50}|{author}",
no = i + 1,
title = &titles[i],
author = &authors[i]));
}
}
fn print_cjk_count(rustbox: &rustbox::RustBox){
let mut offset_y = 2;
let s1 = String::from("紅魔英超睇敢帥 十分之高招");
for (b, c) in s1.chars().enumerate() {
rustbox.print(1,
b + 2,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("{} 0x{:X}", c, c as u32));
}
offset_y = 16;
let s2 = String::from("<<100%成功率>>如何成為成功?香港Youtuber");
let mut s2count = 0;
for (d, c) in s2.chars().enumerate() {
if contains(c) {
rustbox.print(1,
d + offset_y,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("[{:<2}] {:>2} 0x{:X} {}", d + offset_y, c, c as u32, &"YES"));
s2count = s2count + 2;
} else {
rustbox.print(1,
d + offset_y,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("[{:<2}] {:>2} 0x{:X} {}", d + offset_y, c, c as u32, &"NO"));
s2count = s2count + 1;
}
}
let sum = s2.chars().map(|x| if contains(x) { 2 } else { 1 } ).collect::<Vec<u32>>().iter().fold(0, |acc, &x| acc + x);
rustbox.print(1,
45,
rustbox::RB_BOLD,
Color::White,
Color::Black,
&format!("{} {}", sum, s2count));
}
|
extern crate mio;
extern crate http_muncher;
extern crate sha1;
extern crate rustc_serialize;
use std::collections::HashMap;
use std::cell::RefCell;
use std::rc::Rc;
use std::fmt;
use mio::*;
use mio::tcp::*;
use http_muncher::{Parser, ParserHandler};
use rustc_serialize::base64::{ToBase64, STANDARD};
fn gen_key(key: &String) -> String {
let mut m = sha1::Sha1::new();
let mut buf = [0u8; 20];
m.update(key.as_bytes());
m.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11".as_bytes());
m.output(&mut buf);
return buf.to_base64(STANDARD);
}
struct HttpParser {
current_key: Option<String>,
headers: Rc<RefCell<HashMap<String, String>>>
}
impl ParserHandler for HttpParser {
fn on_header_field(&mut self, s: &[u8]) -> bool {
self.current_key = Some(std::str::from_utf8(s).unwrap().to_string());
true
}
fn on_header_value(&mut self, s: &[u8]) -> bool {
self.headers.borrow_mut()
.insert(self.current_key.clone().unwrap(),
std::str::from_utf8(s).unwrap().to_string());
true
}
fn on_headers_complete(&mut self) -> bool {
false
}
}
#[derive(PartialEq)]
enum ClientState {
AwaitingHandshake,
HandshakeResponse,
Connected
}
struct WebSocketClient {
socket: TcpStream,
headers: Rc<RefCell<HashMap<String, String>>>,
http_parser: Parser<HttpParser>,
interest: EventSet,
state: ClientState
}
impl WebSocketClient {
fn new(socket: TcpStream) -> WebSocketClient {
let headers = Rc::new(RefCell::new(HashMap::new()));
WebSocketClient {
socket: socket,
headers: headers.clone(),
http_parser: Parser::request(HttpParser {
current_key: None,
headers: headers.clone()
}),
interest: EventSet::readable(),
state: ClientState::AwaitingHandshake
}
}
fn write(&mut self) {
let headers = self.headers.borrow();
let response_key = gen_key(&headers.get("Sec-WebSocket-Key").unwrap());
let response = fmt::format(format_args!("HTTP/1.1 101 Switching Protocols\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Accept: {}\r\n\
Upgrade: websocket\r\n\r\n", response_key));
self.socket.try_write(response.as_bytes()).unwrap();
// Change the state
self.state = ClientState::Connected;
self.interest.remove(EventSet::writable());
self.interest.insert(EventSet::readable());
}
fn read(&mut self) {
loop {
let mut buf = [0; 2048];
match self.socket.try_read(&mut buf) {
Err(e) => {
println!("Error while reading socket: {:?}", e);
return
},
Ok(None) =>
// Socket buffer has got no more bytes.
break,
Ok(Some(len)) => {
self.http_parser.parse(&buf);
if self.http_parser.is_upgrade() {
// Change the current state
self.state = ClientState::HandshakeResponse;
// Change current interest to `Writable`
self.interest.remove(EventSet::readable());
self.interest.insert(EventSet::writable());
break;
}
}
}
}
}
}
struct WebSocketServer {
socket: TcpListener,
clients: HashMap<Token, WebSocketClient>,
token_counter: usize
}
const SERVER_TOKEN: Token = Token(0);
impl Handler for WebSocketServer {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<WebSocketServer>, token: Token, events: EventSet) {
if events.is_readable() {
match token {
SERVER_TOKEN => {
let client_socket = match self.socket.accept() {
Ok(Some(sock)) => sock,
Ok(None) => unreachable!(),
Err(e) => {
println!("Accept error: {}", e);
return;
}
};
let new_token = Token(self.token_counter);
self.clients.insert(new_token, WebSocketClient::new(client_socket));
self.token_counter += 1;
event_loop.register_opt(&self.clients[&new_token].socket, new_token, EventSet::readable(),
PollOpt::edge() | PollOpt::oneshot()).unwrap();
},
token => {
let mut client = self.clients.get_mut(&token).unwrap();
client.read();
event_loop.reregister(&client.socket, token, client.interest,
PollOpt::edge() | PollOpt::oneshot()).unwrap();
}
}
}
if events.is_writable() {
match token {
token => {
let mut client = self.clients.get_mut(&token).unwrap();
client.write();
event_loop.reregister(&client.socket, token, client.interest,
PollOpt::edge() | PollOpt::oneshot()).unwrap();
}
}
}
}
}
use std::str::FromStr;
fn main() {
let server_socket = TcpSocket::v4().unwrap();
let address = FromStr::from_str("0.0.0.0:10000").unwrap();
server_socket.bind(&address).unwrap();
let server_socket = server_socket.listen(256).unwrap();
let mut event_loop = EventLoop::new().unwrap();
let mut server = WebSocketServer {
token_counter: 1,
clients: HashMap::new(),
socket: server_socket
};
event_loop.register_opt(&server.socket,
SERVER_TOKEN,
EventSet::readable(),
PollOpt::edge()).unwrap();
event_loop.run(&mut server).unwrap();
}
Simplified code
extern crate mio;
extern crate http_muncher;
extern crate sha1;
extern crate rustc_serialize;
use std::collections::HashMap;
use std::cell::RefCell;
use std::rc::Rc;
use std::fmt;
use mio::*;
use mio::tcp::*;
use http_muncher::{Parser, ParserHandler};
use rustc_serialize::base64::{ToBase64, STANDARD};
fn gen_key(key: &String) -> String {
let mut m = sha1::Sha1::new();
let mut buf = [0u8; 20];
m.update(key.as_bytes());
m.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11".as_bytes());
m.output(&mut buf);
return buf.to_base64(STANDARD);
}
struct HttpParser {
current_key: Option<String>,
headers: Rc<RefCell<HashMap<String, String>>>
}
impl ParserHandler for HttpParser {
fn on_header_field(&mut self, s: &[u8]) -> bool {
self.current_key = Some(std::str::from_utf8(s).unwrap().to_string());
true
}
fn on_header_value(&mut self, s: &[u8]) -> bool {
self.headers.borrow_mut()
.insert(self.current_key.clone().unwrap(),
std::str::from_utf8(s).unwrap().to_string());
true
}
fn on_headers_complete(&mut self) -> bool {
false
}
}
#[derive(PartialEq)]
enum ClientState {
AwaitingHandshake,
HandshakeResponse,
Connected
}
struct WebSocketClient {
socket: TcpStream,
headers: Rc<RefCell<HashMap<String, String>>>,
http_parser: Parser<HttpParser>,
interest: EventSet,
state: ClientState
}
impl WebSocketClient {
fn new(socket: TcpStream) -> WebSocketClient {
let headers = Rc::new(RefCell::new(HashMap::new()));
WebSocketClient {
socket: socket,
headers: headers.clone(),
http_parser: Parser::request(HttpParser {
current_key: None,
headers: headers.clone()
}),
interest: EventSet::readable(),
state: ClientState::AwaitingHandshake
}
}
fn write(&mut self) {
let headers = self.headers.borrow();
let response_key = gen_key(&headers.get("Sec-WebSocket-Key").unwrap());
let response = fmt::format(format_args!("HTTP/1.1 101 Switching Protocols\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Accept: {}\r\n\
Upgrade: websocket\r\n\r\n", response_key));
self.socket.try_write(response.as_bytes()).unwrap();
// Change the state
self.state = ClientState::Connected;
self.interest.remove(EventSet::writable());
self.interest.insert(EventSet::readable());
}
fn read(&mut self) {
loop {
let mut buf = [0; 2048];
match self.socket.try_read(&mut buf) {
Err(e) => {
println!("Error while reading socket: {:?}", e);
return
},
Ok(None) =>
// Socket buffer has got no more bytes.
break,
Ok(Some(len)) => {
self.http_parser.parse(&buf);
if self.http_parser.is_upgrade() {
// Change the current state
self.state = ClientState::HandshakeResponse;
// Change current interest to `Writable`
self.interest.remove(EventSet::readable());
self.interest.insert(EventSet::writable());
break;
}
}
}
}
}
}
struct WebSocketServer {
socket: TcpListener,
clients: HashMap<Token, WebSocketClient>,
token_counter: usize
}
const SERVER_TOKEN: Token = Token(0);
impl Handler for WebSocketServer {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<WebSocketServer>, token: Token, events: EventSet) {
if events.is_readable() {
match token {
SERVER_TOKEN => {
let client_socket = match self.socket.accept() {
Ok(Some(sock)) => sock,
Ok(None) => unreachable!(),
Err(e) => {
println!("Accept error: {}", e);
return;
}
};
let new_token = Token(self.token_counter);
self.clients.insert(new_token, WebSocketClient::new(client_socket));
self.token_counter += 1;
event_loop.register_opt(&self.clients[&new_token].socket, new_token, EventSet::readable(),
PollOpt::edge() | PollOpt::oneshot()).unwrap();
},
token => {
let mut client = self.clients.get_mut(&token).unwrap();
client.read();
event_loop.reregister(&client.socket, token, client.interest,
PollOpt::edge() | PollOpt::oneshot()).unwrap();
}
}
}
if events.is_writable() {
let mut client = self.clients.get_mut(&token).unwrap();
client.write();
event_loop.reregister(&client.socket, token, client.interest,
PollOpt::edge() | PollOpt::oneshot()).unwrap();
}
}
}
use std::str::FromStr;
fn main() {
let server_socket = TcpSocket::v4().unwrap();
let address = FromStr::from_str("0.0.0.0:10000").unwrap();
server_socket.bind(&address).unwrap();
let server_socket = server_socket.listen(256).unwrap();
let mut event_loop = EventLoop::new().unwrap();
let mut server = WebSocketServer {
token_counter: 1,
clients: HashMap::new(),
socket: server_socket
};
event_loop.register_opt(&server.socket,
SERVER_TOKEN,
EventSet::readable(),
PollOpt::edge()).unwrap();
event_loop.run(&mut server).unwrap();
}
|
extern crate diff;
extern crate regex;
use std::fs::File;
use std::process::{Command, Stdio};
use std::collections::BinaryHeap;
use std::fs;
use regex::Regex;
use std::io::{self, Read, Write};
#[derive(Debug)]
struct Testcase {
input: String,
output: String,
}
enum Error {
Io(io::Error),
TooManyInputs((BinaryHeap<String>, BinaryHeap<String>)),
TooManyOutputs((BinaryHeap<String>, BinaryHeap<String>)),
}
impl std::convert::From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::Io(e)
}
}
fn get_testcases() -> Result<Vec<Testcase>, Error> {
let input_re = Regex::new(r"input.*").unwrap();
let output_re = Regex::new(r"output.*").unwrap();
let mut testcases = vec![];
let mut inputs = BinaryHeap::new();
let mut outputs = BinaryHeap::new();
for entry in fs::read_dir(".")? {
let path = entry?.path().into_os_string();
if let Some(input) = input_re.captures(path.to_str().unwrap()) {
inputs.push(input[0].to_string());
}
if let Some(output) = output_re.captures(path.to_str().unwrap()) {
outputs.push(output[0].to_string());
}
}
if inputs.len() < outputs.len() {
return Err(Error::TooManyOutputs((inputs, outputs)));
} else if inputs.len() > outputs.len() {
return Err(Error::TooManyInputs((inputs, outputs)));
}
while !inputs.is_empty() {
testcases.push(Testcase {
input: inputs.pop().unwrap(),
output: outputs.pop().unwrap(),
});
}
Ok(testcases)
}
fn run_testcase(binary: &str, testcase: &Testcase) -> Result<(), io::Error> {
let process = Command::new(binary).stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?;
let mut f = File::open(&testcase.input)?;
let mut inp = String::new();
f.read_to_string(&mut inp)?;
process.stdin.unwrap().write_all(inp.as_bytes())?;
let mut outf = File::open(&testcase.output)?;
let mut out_expected = String::new();
outf.read_to_string(&mut out_expected)?;
let mut out = String::new();
process.stdout.unwrap().read_to_string(&mut out)?;
if out == out_expected {
Ok(())
} else {
let mut s = String::new();
for diff in diff::lines(&out, &out_expected) {
s += &match diff {
diff::Result::Left(l) => format!("-{}\n", l),
diff::Result::Both(l, _) => format!(" {}\n", l),
diff::Result::Right(r) => format!("+{}\n", r),
};
}
Err(io::Error::new(io::ErrorKind::Other, s))
}
}
fn main() {
let binary = if let Some(name) = std::env::args().nth(2) {
name
} else {
String::from("./a.out")
};
let mut failed_testcases = vec![];
match get_testcases() {
Ok(testcases) => {
println!("{} testcases found", testcases.len());
for testcase in testcases {
match run_testcase(&binary, &testcase) {
Ok(_) => print!("."),
Err(e) => {
failed_testcases.push((testcase, e));
print!("E");
}
}
}
println!();
}
Err(Error::Io(e)) => println!("I/O error: {}", e),
Err(Error::TooManyInputs((i, o))) => {
println!("There are more inputs than outputs.
Inputs : {:?}
Outputs: {:?}",
i,
o);
}
Err(Error::TooManyOutputs((i, o))) => {
println!("There are more outputs than inputs.
Inputs : {:?}
Outputs: {:?}",
i,
o);
}
}
if !failed_testcases.is_empty() {
println!("Failed testcases:");
for (testcase, e) in failed_testcases {
println!("{:?}:\n{}\n", testcase, e);
}
std::process::exit(1);
} else {
println!("OK");
}
}
Exit immediately when binary does not exist
extern crate diff;
extern crate regex;
use std::fs::File;
use std::process::{Command, Stdio};
use std::collections::BinaryHeap;
use std::fs;
use regex::Regex;
use std::io::{self, Read, Write};
#[derive(Debug)]
struct Testcase {
input: String,
output: String,
}
enum Error {
Io(io::Error),
TooManyInputs((BinaryHeap<String>, BinaryHeap<String>)),
TooManyOutputs((BinaryHeap<String>, BinaryHeap<String>)),
}
impl std::convert::From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::Io(e)
}
}
fn get_testcases() -> Result<Vec<Testcase>, Error> {
let input_re = Regex::new(r"input.*").unwrap();
let output_re = Regex::new(r"output.*").unwrap();
let mut testcases = vec![];
let mut inputs = BinaryHeap::new();
let mut outputs = BinaryHeap::new();
for entry in fs::read_dir(".")? {
let path = entry?.path().into_os_string();
if let Some(input) = input_re.captures(path.to_str().unwrap()) {
inputs.push(input[0].to_string());
}
if let Some(output) = output_re.captures(path.to_str().unwrap()) {
outputs.push(output[0].to_string());
}
}
if inputs.len() < outputs.len() {
return Err(Error::TooManyOutputs((inputs, outputs)));
} else if inputs.len() > outputs.len() {
return Err(Error::TooManyInputs((inputs, outputs)));
}
while !inputs.is_empty() {
testcases.push(Testcase {
input: inputs.pop().unwrap(),
output: outputs.pop().unwrap(),
});
}
Ok(testcases)
}
fn run_testcase(binary: &str, testcase: &Testcase) -> Result<(), io::Error> {
let process = Command::new(binary).stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?;
let mut f = File::open(&testcase.input)?;
let mut inp = String::new();
f.read_to_string(&mut inp)?;
process.stdin.unwrap().write_all(inp.as_bytes())?;
let mut outf = File::open(&testcase.output)?;
let mut out_expected = String::new();
outf.read_to_string(&mut out_expected)?;
let mut out = String::new();
process.stdout.unwrap().read_to_string(&mut out)?;
if out == out_expected {
Ok(())
} else {
let mut s = String::new();
for diff in diff::lines(&out, &out_expected) {
s += &match diff {
diff::Result::Left(l) => format!("-{}\n", l),
diff::Result::Both(l, _) => format!(" {}\n", l),
diff::Result::Right(r) => format!("+{}\n", r),
};
}
Err(io::Error::new(io::ErrorKind::Other, s))
}
}
fn main() {
let binary = if let Some(name) = std::env::args().nth(2) {
name
} else {
String::from("./a.out")
};
if !std::path::Path::new(&binary).exists() {
println!("Binary {} does not exist", binary);
std::process::exit(1);
}
let mut failed_testcases = vec![];
match get_testcases() {
Ok(testcases) => {
println!("{} testcases found", testcases.len());
for testcase in testcases {
match run_testcase(&binary, &testcase) {
Ok(_) => print!("."),
Err(e) => {
failed_testcases.push((testcase, e));
print!("E");
}
}
}
println!();
}
Err(Error::Io(e)) => println!("I/O error: {}", e),
Err(Error::TooManyInputs((i, o))) => {
println!("There are more inputs than outputs.
Inputs : {:?}
Outputs: {:?}",
i,
o);
}
Err(Error::TooManyOutputs((i, o))) => {
println!("There are more outputs than inputs.
Inputs : {:?}
Outputs: {:?}",
i,
o);
}
}
if !failed_testcases.is_empty() {
println!("Failed testcases:");
for (testcase, e) in failed_testcases {
println!("{:?}:\n{}\n", testcase, e);
}
std::process::exit(1);
} else {
println!("OK");
}
}
|
use std::env;
use std::fs::File;
use std::io::Read;
extern crate syn;
use syn::*;
fn has_attribute(target: MetaItem, attrs: &Vec<Attribute>) -> bool {
return attrs.iter().any(|ref attr| {
attr.style == AttrStyle::Outer &&
attr.value == target
});
}
fn has_no_mangle(attrs: &Vec<Attribute>) -> bool {
has_attribute(MetaItem::Word(Ident::new("no_mangle")), attrs)
}
fn map_path(p: &Path) -> String {
let l = p.segments[0].ident.to_string();
match l.as_ref() {
"usize" => "size_t".to_string(),
"u8" => "uint8_t".to_string(),
"u32" => "uint32_t".to_string(),
"f32" => "float".to_string(),
_ => l
}
}
fn map_mut_ty(mut_ty: &MutTy) -> String {
map_ty(&mut_ty.ty)
}
fn map_ty(ty: &Ty) -> String {
match ty {
&Ty::Path(_, ref p) => {
map_path(p)
},
&Ty::Ptr(ref p) => {
format!("{}*", map_ty(&p.ty))
},
&Ty::Rptr(_, ref mut_ty) => {
format!("{}*", map_mut_ty(mut_ty))
}
_ => format!("unknown {:?}", ty)
}
}
fn map_return_type(ret: &FunctionRetTy) -> String
{
match ret {
&FunctionRetTy::Default => "void".to_string(),
&FunctionRetTy::Ty(ref ty) => {
map_ty(ty)
}
}
}
fn map_pat(pat: &Pat) -> String {
match pat {
&Pat::Ident(_, ref ident, _) => {
ident.to_string()
},
_ => { format!("unknown {:?}", pat) }
}
}
fn map_arg(f: &FnArg) -> String {
match f {
&FnArg::Captured(ref pat, ref ty) => {
format!("{} {}", map_ty(ty), map_pat(pat))
}
_ => { "unknown".to_string() }
}
}
fn main() {
let p = env::args().nth(1).unwrap();
let mut s = String::new();
let mut f = File::open(p).unwrap();
f.read_to_string(&mut s).unwrap();
let krate = syn::parse_crate(&s).unwrap();
for item in krate.items {
match item.node {
ItemKind::Fn(decl, ..) => {
if has_no_mangle(&item.attrs) {
println!("{} {}({})", map_return_type(&decl.output), item.ident, decl.inputs.iter().map(map_arg).collect::<Vec<_>>().join(", "));
}
}
_ => {}
}
}
}
Add WR_INLINE and WR_FUNC decorations
use std::env;
use std::fs::File;
use std::io::Read;
extern crate syn;
use syn::*;
fn has_attribute(target: MetaItem, attrs: &Vec<Attribute>) -> bool {
return attrs.iter().any(|ref attr| {
attr.style == AttrStyle::Outer &&
attr.value == target
});
}
fn has_no_mangle(attrs: &Vec<Attribute>) -> bool {
has_attribute(MetaItem::Word(Ident::new("no_mangle")), attrs)
}
fn map_path(p: &Path) -> String {
let l = p.segments[0].ident.to_string();
match l.as_ref() {
"usize" => "size_t".to_string(),
"u8" => "uint8_t".to_string(),
"u32" => "uint32_t".to_string(),
"f32" => "float".to_string(),
_ => l
}
}
fn map_mut_ty(mut_ty: &MutTy) -> String {
map_ty(&mut_ty.ty)
}
fn map_ty(ty: &Ty) -> String {
match ty {
&Ty::Path(_, ref p) => {
map_path(p)
},
&Ty::Ptr(ref p) => {
format!("{}*", map_ty(&p.ty))
},
&Ty::Rptr(_, ref mut_ty) => {
format!("{}*", map_mut_ty(mut_ty))
}
_ => format!("unknown {:?}", ty)
}
}
fn map_return_type(ret: &FunctionRetTy) -> String
{
match ret {
&FunctionRetTy::Default => "void".to_string(),
&FunctionRetTy::Ty(ref ty) => {
map_ty(ty)
}
}
}
fn map_pat(pat: &Pat) -> String {
match pat {
&Pat::Ident(_, ref ident, _) => {
ident.to_string()
},
_ => { format!("unknown {:?}", pat) }
}
}
fn map_arg(f: &FnArg) -> String {
match f {
&FnArg::Captured(ref pat, ref ty) => {
format!("{} {}", map_ty(ty), map_pat(pat))
}
_ => { "unknown".to_string() }
}
}
fn main() {
let p = env::args().nth(1).unwrap();
let mut s = String::new();
let mut f = File::open(p).unwrap();
f.read_to_string(&mut s).unwrap();
let krate = syn::parse_crate(&s).unwrap();
for item in krate.items {
match item.node {
ItemKind::Fn(decl, ..) => {
if has_no_mangle(&item.attrs) {
println!("WR_INLINE {}\n{}({})\nWR_FUNC;\n", map_return_type(&decl.output), item.ident, decl.inputs.iter().map(map_arg).collect::<Vec<_>>().join(", "));
}
}
_ => {}
}
}
}
|
use std::io::Read;
use std::collections::HashMap;
use std::str::Split;
pub struct Command<'a> {
command: &'a str,
exec: Box<FnMut(Vec<&str>) + 'a>,
complete: Option<Box<FnMut(Vec<&str>) + 'a>>
}
impl<'a> Command<'a> {
fn new<T: FnMut(Vec<&str>) + 'a, U: FnMut(Vec<&str>) + 'a>(cmd: &'a str, exec_handler: T, complete_handler: Option<U>) -> Command<'a> {
let mut obj = Command {
command: cmd,
exec: Box::new(exec_handler),
complete: None
};
if let Some(cb) = complete_handler {
obj.complete = Some(Box::new(cb));
}
obj
}
}
pub struct Cli<'a> {
commands: HashMap<&'a str, Command<'a>>
}
impl<'a> Cli<'a>{
fn new() -> Cli<'a> {
Cli {
commands: HashMap::new()
}
}
fn register<T: FnMut(Vec<&str>) + 'a>(&mut self, cmd: &'a str, exec: T) -> Result<(), Command> {
let tmp = Command {
command: cmd,
exec: Box::new(exec),
complete: None
};
match self.commands.insert(cmd, tmp) {
Some(existing) => Err(existing),
None => Ok(())
}
}
fn complete(&self, argv: &str) -> Vec<&str> {
Vec::new()
}
fn exec(&mut self, cmd: &str) {
if let Some(command) = self.commands.get_mut(cmd) {
//TODO: figure out why I can't do that in one line
let ref mut x = command.exec;
x(vec!["blah"]);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn testRegister() {
let mut called = false;
{
let mut cli = Cli::new();
cli.register("my first command", | args | { called=true } );
cli.exec("my first command");
}
assert!(called == true)
}
}
//fn new(callback: Option<Box<Fn()>>) -> Cli<'a> {
//Cli {
//commands: HashMap::new(),
//exec: callback,
//complete_cb: None,
//}
//}
//fn register<T: Fn() + 'static>(&mut self, command: Vec<&'a str>, callback: T) {
//let mut it = command.iter();
//self._register(it, callback);
//}
//fn _register<T: Fn() + 'static>(&mut self, mut it: std::slice::Iter<&'a str>, callback: T) {
//if let Some(portion) = it.next() {
//if !self.commands.contains_key(portion) {
//self.commands.insert(portion, Cli::new(Some(Box::new(callback))));
//}
//self.commands.get_mut(portion).unwrap()._register(it, callback);
//}
//}
//fn suggest(&mut self, command: &str) -> Vec<&str> {
//let mut portions = command.trim().split(" ");
//let mut suggestions = self._suggest(&mut portions);
//if let Some(ref cb) = self.complete_cb {
//cb();
//println!("got callback");
//}
//suggestions
//}
//fn _suggest(&self, portions: &mut std::str::Split<&str>) -> Vec<&str> {
//let mut ret = Vec::with_capacity(self.commands.len());
//if let Some(portion) = portions.next() {
//if !portion.is_empty() {
//if let Some(cmd) = self.commands.get(portion) {
//ret = cmd._suggest(portions);
//}
//} else {
//for key in self.commands.keys() {
//ret.push(*key);
//}
//}
//} else {
//for key in self.commands.keys() {
//ret.push(*key);
//}
//}
//ret
//}
//}
//fn foo() {
//}
fn main() {
//let mut cli = Cli::new(Some(Box::new(foo)));
//cli.register(vec!["show", "stuff"], foo);
//cli.register(vec!["show", "other"], foo);
//cli.register(vec!["list", "other", "cool"], foo);
//cli.register(vec!["list", "other", "uncool"], foo);
//loop {
//let mut line = String::new();
//std::io::stdin().read_line(&mut line).unwrap();
//println!("got: {:?}", cli.suggest(&line));
//}
}
registering functions and competing done, still not quite right though
use std::io::Read;
use std::collections::HashMap;
use std::str::Split;
pub struct Command<'a> {
command: &'a str,
exec: Box<FnMut(Vec<&str>) + 'a>,
complete: Option<Box<FnMut(Vec<&str>) + 'a>>
}
impl<'a> Command<'a> {
fn new<T: FnMut(Vec<&str>) + 'a, U: FnMut(Vec<&str>) + 'a>(cmd: &'a str, exec_handler: T, complete_handler: Option<U>) -> Command<'a> {
let mut obj = Command {
command: cmd,
exec: Box::new(exec_handler),
complete: None
};
if let Some(cb) = complete_handler {
obj.complete = Some(Box::new(cb));
}
obj
}
}
pub struct Cli<'a> {
commands: HashMap<&'a str, Command<'a>>
}
impl<'a> Cli<'a>{
fn new() -> Cli<'a> {
Cli {
commands: HashMap::new()
}
}
fn register<T: FnMut(Vec<&str>) + 'a>(&mut self, cmd: &'a str, exec: T) -> Result<(), Command> {
let tmp = Command {
command: cmd,
exec: Box::new(exec),
complete: None
};
match self.commands.insert(cmd, tmp) {
Some(existing) => Err(existing),
None => Ok(())
}
}
fn complete(&self, argv: &str) -> Vec<&str> {
println!("complete for '{}'", argv.trim());
self.commands.keys()
.filter(|cmd| cmd.starts_with(argv.trim()))
.cloned()
.collect()
}
fn exec(&mut self, cmd: &str) {
if let Some(command) = self.commands.get_mut(cmd) {
//TODO: figure out why I can't do that in one line
let ref mut x = command.exec;
x(vec!["blah"]);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_register_and_execute() {
let mut called = false;
{
let mut cli = Cli::new();
cli.register("my first command", | args | { called=true } );
cli.exec("my first command");
}
assert!(called == true)
}
#[test]
fn test_complete_empty_str() {
let mut cli = Cli::new();
cli.register("foo", | args | { } );
cli.register("bar", | args | { } );
assert!(vec!["foo", "bar"] == cli.complete(""))
}
#[test]
fn test_complete_partial() {
let mut cli = Cli::new();
cli.register("foo", | args | { } );
assert!(vec!["foo"] == cli.complete("f"));
assert!(vec!["foo"] == cli.complete("fo"));
assert!(vec!["foo"] == cli.complete("foo"));
}
#[test]
fn test_complete_composite() {
let mut cli = Cli::new();
cli.register("foo bar", | args | { } );
assert!(vec!["foo bar"] == cli.complete("f"));
assert!(vec!["foo bar"] == cli.complete("foo"));
assert!(vec!["foo bar"] == cli.complete("foo "));
assert!(vec!["foo bar"] == cli.complete("foo b"));
}
}
//fn new(callback: Option<Box<Fn()>>) -> Cli<'a> {
//Cli {
//commands: HashMap::new(),
//exec: callback,
//complete_cb: None,
//}
//}
//fn register<T: Fn() + 'static>(&mut self, command: Vec<&'a str>, callback: T) {
//let mut it = command.iter();
//self._register(it, callback);
//}
//fn _register<T: Fn() + 'static>(&mut self, mut it: std::slice::Iter<&'a str>, callback: T) {
//if let Some(portion) = it.next() {
//if !self.commands.contains_key(portion) {
//self.commands.insert(portion, Cli::new(Some(Box::new(callback))));
//}
//self.commands.get_mut(portion).unwrap()._register(it, callback);
//}
//}
//fn suggest(&mut self, command: &str) -> Vec<&str> {
//let mut portions = command.trim().split(" ");
//let mut suggestions = self._suggest(&mut portions);
//if let Some(ref cb) = self.complete_cb {
//cb();
//println!("got callback");
//}
//suggestions
//}
//fn _suggest(&self, portions: &mut std::str::Split<&str>) -> Vec<&str> {
//let mut ret = Vec::with_capacity(self.commands.len());
//if let Some(portion) = portions.next() {
//if !portion.is_empty() {
//if let Some(cmd) = self.commands.get(portion) {
//ret = cmd._suggest(portions);
//}
//} else {
//for key in self.commands.keys() {
//ret.push(*key);
//}
//}
//} else {
//for key in self.commands.keys() {
//ret.push(*key);
//}
//}
//ret
//}
//}
fn foo(argv: Vec<&str>) {
}
fn main() {
let mut cli = Cli::new();
cli.register("show stuff", foo);
cli.register("show other", foo);
cli.register("list other cool", foo);
cli.register("list other uncool", foo);
loop {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
println!("got: {:?}", cli.complete(&line));
}
}
|
extern crate clap;
extern crate git2;
use git2::{Repository, Commit, Oid, Revwalk, Index};
use std::collections::HashMap;
#[macro_use]
mod macros;
const E_SUCCESS: i32 = 0;
const E_NO_GIT_REPO: i32 = 1;
fn main() {
let exit_code = real_main();
std::process::exit(exit_code);
}
fn real_main() -> i32 {
let submodule_dir = parse_cli_arguments();
println!("Merging {}...", submodule_dir);
let repo = match Repository::open(".") {
Ok(repo) => repo,
Err(e) => {
eprintln!("Couldn't find Git repo in the current directory: {}",
e.message());
return E_NO_GIT_REPO;
}
};
let mut submodule_revwalk = get_submodule_revwalk(&repo, &submodule_dir);
let mut old_id_to_new = HashMap::new();
rewrite_submodule_history(&repo,
&mut submodule_revwalk,
&mut old_id_to_new,
&submodule_dir);
let mut repo_revwalk = get_repo_revwalk(&repo);
rewrite_repo_history(&repo, &mut repo_revwalk, &mut old_id_to_new, &submodule_dir);
checkout_rewritten_history(&repo, &old_id_to_new);
E_SUCCESS
}
fn parse_cli_arguments() -> String {
let options = clap::App::new("git-submerge")
.version("0.1")
.author("Alexander Batischev <eual.jp@gmail.com>")
.about("Merge Git submodule into the main repo as if they've never been separate at all")
.arg(clap::Arg::with_name("SUBMODULE_DIR")
.help("The submodule to merge")
.required(true)
.index(1))
.get_matches();
// We can safely use unwrap() here because the argument is marked as "required" and Clap checks
// its presence for us.
String::from(options.value_of("SUBMODULE_DIR").unwrap())
}
fn get_submodule_revwalk<'repo>(repo: &'repo Repository, submodule_dir: &str) -> Revwalk<'repo> {
let submodule_url = String::from("./") + submodule_dir;
let mut remote = repo.remote_anonymous(&submodule_url)
.expect("Couldn't create an anonymous remote");
remote.fetch(&[], None, None).expect("Couldn't fetch submodule's history");
let submodule = repo.find_submodule(submodule_dir)
.expect("Couldn't find the submodule with expected path");
let submodule_head = submodule.head_id()
.expect("Couldn't obtain submodule's HEAD");
let mut revwalk = repo.revwalk().expect("Couldn't obtain RevWalk object for the repo");
// "Topological" and reverse means "parents are always visited before their children".
// We need that in order to be sure that our old-to-new-ids map always contains everything we
// need it to contain.
revwalk.set_sorting(git2::SORT_REVERSE | git2::SORT_TOPOLOGICAL);
// TODO (#6): push all branches and tags, not just HEAD
revwalk.push(submodule_head).expect("Couldn't add submodule's HEAD to RevWalk list");
revwalk
}
fn rewrite_submodule_history(repo: &Repository,
revwalk: &mut Revwalk,
old_id_to_new: &mut HashMap<Oid, Oid>,
submodule_dir: &str) {
for maybe_oid in revwalk {
match maybe_oid {
Ok(oid) => {
let commit = repo.find_commit(oid)
.expect(&format!("Couldn't get a commit with ID {}", oid));
let tree = commit.tree()
.expect(&format!("Couldn't obtain the tree of a commit with ID {}", oid));
let mut old_index = Index::new()
.expect("Couldn't create an in-memory index for commit");
let mut new_index = Index::new().expect("Couldn't create an in-memory index");
old_index.read_tree(&tree)
.expect(&format!("Couldn't read the commit {} into index", oid));
// Obtain the new tree, where everything from the old one is moved under
// a directory named after the submodule
for entry in old_index.iter() {
let mut new_entry = entry;
let mut new_path = String::from(submodule_dir);
new_path += "/";
new_path += &String::from_utf8(new_entry.path)
.expect("Failed to convert a path to str");
new_entry.path = new_path.into_bytes();
new_index.add(&new_entry).expect("Couldn't add an entry to the index");
}
let tree_id = new_index.write_tree_to(&repo)
.expect("Couldn't write the index into a tree");
old_id_to_new.insert(tree.id(), tree_id);
let tree = repo.find_tree(tree_id)
.expect("Couldn't retrieve the tree we just created");
let parents = {
let mut p: Vec<Commit> = Vec::new();
for parent_id in commit.parent_ids() {
let new_parent_id = old_id_to_new[&parent_id];
let parent = repo.find_commit(new_parent_id)
.expect("Couldn't find parent commit by its id");
p.push(parent);
}
p
};
let mut parents_refs: Vec<&Commit> = Vec::new();
for i in 0..parents.len() {
parents_refs.push(&parents[i]);
}
let new_commit_id = repo.commit(None,
&commit.author(),
&commit.committer(),
&commit.message().expect("Couldn't retrieve commit's message"),
&tree,
&parents_refs[..])
.expect("Failed to commit");
old_id_to_new.insert(oid, new_commit_id);
}
Err(e) => eprintln!("Error walking the submodule's history: {:?}", e),
}
}
}
fn get_repo_revwalk<'repo>(repo: &'repo Repository) -> Revwalk<'repo> {
let mut revwalk = repo.revwalk().expect("Couldn't obtain RevWalk object for the repo");
revwalk.set_sorting(git2::SORT_REVERSE | git2::SORT_TOPOLOGICAL);
let head = repo.head().expect("Couldn't obtain repo's HEAD");
let head_id = head.target().expect("Couldn't resolve repo's HEAD to a commit ID");
// TODO (#6): push all branches and tags, not just HEAD
revwalk.push(head_id).expect("Couldn't add repo's HEAD to RevWalk list");
revwalk
}
fn rewrite_repo_history(repo: &Repository,
revwalk: &mut Revwalk,
old_id_to_new: &mut HashMap<Oid, Oid>,
submodule_dir: &str) {
let submodule_path = std::path::Path::new(submodule_dir);
for maybe_oid in revwalk {
match maybe_oid {
Ok(oid) => {
let commit = repo.find_commit(oid)
.expect(&format!("Couldn't get a commit with ID {}", oid));
let tree = commit.tree()
.expect(&format!("Couldn't obtain the tree of a commit with ID {}", oid));
let submodule_subdir = match tree.get_path(submodule_path) {
Ok(tree) => tree,
Err(e) => {
if e.code() == git2::ErrorCode::NotFound &&
e.class() == git2::ErrorClass::Tree {
// It's okay. The tree lacks the subtree corresponding to the
// submodule. In other words, the commit doesn't include the submodule.
// That's totally fine. Let's map it into itself and move on.
old_id_to_new.insert(oid, oid);
continue;
} else {
// Unexpected error; let's report it and abort the program
panic!("Error getting submodule's subdir from the tree: {:?}", e);
};
}
};
// **INVARIANT**: if we got this far, current commit contains a submodule and
// should be rewritten
let submodule_commit_id = submodule_subdir.id();
let new_submodule_commit_id = old_id_to_new[&submodule_commit_id];
let submodule_commit = repo.find_commit(new_submodule_commit_id)
.expect("Couldn't obtain submodule's commit");
let subtree_id = submodule_commit.tree()
.and_then(|t| t.get_path(submodule_path))
.and_then(|te| Ok(te.id()))
.expect("Couldn't obtain submodule's subtree ID");
let mut treebuilder = repo.treebuilder(Some(&tree))
.expect("Couldn't create TreeBuilder");
treebuilder.remove(submodule_path)
.expect("Couldn't remove submodule path from TreeBuilder");
treebuilder.insert(submodule_path, subtree_id, 0o040000)
.expect("Couldn't add submodule as a subdir to TreeBuilder");
let new_tree_id = treebuilder.write()
.expect("Couldn't write TreeBuilder into a Tree");
let new_tree = repo.find_tree(new_tree_id)
.expect("Couldn't read back the Tree we just wrote");
// In commits that used to update the submodule, add a parent pointing to
// appropriate commit in new submodule history
let mut parent_subtree_ids = std::collections::HashSet::new();
for parent in commit.parents() {
let parent_tree = parent.tree().expect("Couldn't obtain parent's tree");
let parent_subdir_tree_id = parent_tree.get_path(submodule_path)
.and_then(|x| Ok(x.id()));
match parent_subdir_tree_id {
Ok(id) => {
parent_subtree_ids.insert(id);
()
}
Err(e) => {
if e.code() == git2::ErrorCode::NotFound &&
e.class() == git2::ErrorClass::Tree {
continue;
} else {
panic!("Error getting submodule's subdir from the tree: {:?}", e);
};
}
}
}
// Here's a few pictures to help you understand how we figure out if current commit
// updated the submodule. If we draw a DAG and name submodule states, the following
// situations will mean that the submodule wasn't updated:
//
// o--o--o--A--
// `,-A
// o--o--o--B-
//
// or
//
// o--o--o--A--
// `,-B
// o--o--o--B-
//
// And in the following graphs the submodule was updated:
//
// o--o--o--A--
// `,-C
// o--o--o--B-
//
// or
//
// o--o--o--o--A--B
//
// Put into words, the rule will be "the submodule state in current commit is
// different from states in all its parents". Or, more formally, the current state
// doesn't belong to the set of states in parents.
let submodule_updated: bool = !parent_subtree_ids.contains(&submodule_commit_id);
// Rewrite the parents if the submodule was updated
let parents = {
let mut p: Vec<Commit> = Vec::new();
for parent_id in commit.parent_ids() {
let actual_parent_id = old_id_to_new[&parent_id];
let parent = repo.find_commit(actual_parent_id)
.expect("Couldn't find parent commit by its id");
p.push(parent);
}
if submodule_updated {
p.push(submodule_commit);
}
p
};
let mut parents_refs: Vec<&Commit> = Vec::new();
for i in 0..parents.len() {
parents_refs.push(&parents[i]);
}
let new_commit_id = repo.commit(None,
&commit.author(),
&commit.committer(),
&commit.message().expect("Couldn't retrieve commit's message"),
&new_tree,
&parents_refs[..])
.expect("Failed to commit");
old_id_to_new.insert(oid, new_commit_id);
}
Err(e) => eprintln!("Error walking the repo's history: {:?}", e),
}
}
}
fn checkout_rewritten_history(repo: &Repository, old_id_to_new: &HashMap<Oid, Oid>) {
let mut checkoutbuilder = git2::build::CheckoutBuilder::new();
checkoutbuilder.force();
let head = repo.head().expect("Couldn't obtain repo's HEAD");
let head_id = head.target().expect("Couldn't resolve repo's HEAD to a commit ID");
let updated_id = old_id_to_new[&head_id];
let object = repo.find_object(updated_id, None)
.expect("Couldn't look up an object at which HEAD points");
repo.reset(&object, git2::ResetType::Hard, Some(&mut checkoutbuilder))
.expect("Couldn't run force-reset");
}
Detect dangling references to sub (#16)
extern crate clap;
extern crate git2;
use git2::{Repository, Commit, Oid, Revwalk, Index};
use std::collections::HashMap;
#[macro_use]
mod macros;
const E_SUCCESS: i32 = 0;
const E_NO_GIT_REPO: i32 = 1;
const E_FOUND_DANGLING_REFERENCES: i32 = 2;
fn main() {
let exit_code = real_main();
std::process::exit(exit_code);
}
fn real_main() -> i32 {
let submodule_dir = parse_cli_arguments();
println!("Merging {}...", submodule_dir);
let repo = match Repository::open(".") {
Ok(repo) => repo,
Err(e) => {
eprintln!("Couldn't find Git repo in the current directory: {}",
e.message());
return E_NO_GIT_REPO;
}
};
let mut submodule_revwalk = get_submodule_revwalk(&repo, &submodule_dir);
let mut old_id_to_new = HashMap::new();
rewrite_submodule_history(&repo,
&mut submodule_revwalk,
&mut old_id_to_new,
&submodule_dir);
match find_dangling_references_to_submodule(&repo, &submodule_dir, &old_id_to_new) {
Some(_) => return E_FOUND_DANGLING_REFERENCES,
None => {}
}
let mut repo_revwalk = get_repo_revwalk(&repo);
rewrite_repo_history(&repo, &mut repo_revwalk, &mut old_id_to_new, &submodule_dir);
checkout_rewritten_history(&repo, &old_id_to_new);
E_SUCCESS
}
fn parse_cli_arguments() -> String {
let options = clap::App::new("git-submerge")
.version("0.1")
.author("Alexander Batischev <eual.jp@gmail.com>")
.about("Merge Git submodule into the main repo as if they've never been separate at all")
.arg(clap::Arg::with_name("SUBMODULE_DIR")
.help("The submodule to merge")
.required(true)
.index(1))
.get_matches();
// We can safely use unwrap() here because the argument is marked as "required" and Clap checks
// its presence for us.
String::from(options.value_of("SUBMODULE_DIR").unwrap())
}
fn get_submodule_revwalk<'repo>(repo: &'repo Repository, submodule_dir: &str) -> Revwalk<'repo> {
let submodule_url = String::from("./") + submodule_dir;
let mut remote = repo.remote_anonymous(&submodule_url)
.expect("Couldn't create an anonymous remote");
remote.fetch(&[], None, None).expect("Couldn't fetch submodule's history");
let submodule = repo.find_submodule(submodule_dir)
.expect("Couldn't find the submodule with expected path");
let submodule_head = submodule.head_id()
.expect("Couldn't obtain submodule's HEAD");
let mut revwalk = repo.revwalk().expect("Couldn't obtain RevWalk object for the repo");
// "Topological" and reverse means "parents are always visited before their children".
// We need that in order to be sure that our old-to-new-ids map always contains everything we
// need it to contain.
revwalk.set_sorting(git2::SORT_REVERSE | git2::SORT_TOPOLOGICAL);
// TODO (#6): push all branches and tags, not just HEAD
revwalk.push(submodule_head).expect("Couldn't add submodule's HEAD to RevWalk list");
revwalk
}
fn rewrite_submodule_history(repo: &Repository,
revwalk: &mut Revwalk,
old_id_to_new: &mut HashMap<Oid, Oid>,
submodule_dir: &str) {
for maybe_oid in revwalk {
match maybe_oid {
Ok(oid) => {
let commit = repo.find_commit(oid)
.expect(&format!("Couldn't get a commit with ID {}", oid));
let tree = commit.tree()
.expect(&format!("Couldn't obtain the tree of a commit with ID {}", oid));
let mut old_index = Index::new()
.expect("Couldn't create an in-memory index for commit");
let mut new_index = Index::new().expect("Couldn't create an in-memory index");
old_index.read_tree(&tree)
.expect(&format!("Couldn't read the commit {} into index", oid));
// Obtain the new tree, where everything from the old one is moved under
// a directory named after the submodule
for entry in old_index.iter() {
let mut new_entry = entry;
let mut new_path = String::from(submodule_dir);
new_path += "/";
new_path += &String::from_utf8(new_entry.path)
.expect("Failed to convert a path to str");
new_entry.path = new_path.into_bytes();
new_index.add(&new_entry).expect("Couldn't add an entry to the index");
}
let tree_id = new_index.write_tree_to(&repo)
.expect("Couldn't write the index into a tree");
old_id_to_new.insert(tree.id(), tree_id);
let tree = repo.find_tree(tree_id)
.expect("Couldn't retrieve the tree we just created");
let parents = {
let mut p: Vec<Commit> = Vec::new();
for parent_id in commit.parent_ids() {
let new_parent_id = old_id_to_new[&parent_id];
let parent = repo.find_commit(new_parent_id)
.expect("Couldn't find parent commit by its id");
p.push(parent);
}
p
};
let mut parents_refs: Vec<&Commit> = Vec::new();
for i in 0..parents.len() {
parents_refs.push(&parents[i]);
}
let new_commit_id = repo.commit(None,
&commit.author(),
&commit.committer(),
&commit.message().expect("Couldn't retrieve commit's message"),
&tree,
&parents_refs[..])
.expect("Failed to commit");
old_id_to_new.insert(oid, new_commit_id);
}
Err(e) => eprintln!("Error walking the submodule's history: {:?}", e),
}
}
}
fn find_dangling_references_to_submodule<'repo>(repo: &'repo Repository,
submodule_dir: &str,
old_id_to_new: &HashMap<Oid, Oid>)
-> Option<bool> {
let submodule_path = std::path::Path::new(submodule_dir);
let known_submodule_commits: std::collections::HashSet<&Oid> = old_id_to_new.keys().collect();
let mut dangling_references = std::collections::HashSet::new();
let revwalk = get_repo_revwalk(&repo);
for maybe_oid in revwalk {
match maybe_oid {
Ok(oid) => {
let commit = repo.find_commit(oid)
.expect(&format!("Couldn't get a commit with ID {}", oid));
let tree = commit.tree()
.expect(&format!("Couldn't obtain the tree of a commit with ID {}", oid));
let submodule_subdir = match tree.get_path(submodule_path) {
Ok(tree) => tree,
Err(e) => {
if e.code() == git2::ErrorCode::NotFound &&
e.class() == git2::ErrorClass::Tree {
// It's okay. The tree lacks the subtree corresponding to the
// submodule. In other words, the commit doesn't include the submodule.
// That's totally fine. Let's move on.
continue;
} else {
// Unexpected error; let's report it and abort the program
panic!("Error getting submodule's subdir from the tree: {:?}", e);
};
}
};
// **INVARIANT**: if we got this far, current commit contains a submodule and
// should be rewritten
let submodule_commit_id = submodule_subdir.id();
if !known_submodule_commits.contains(&submodule_commit_id) {
dangling_references.insert(submodule_commit_id);
}
}
Err(e) => eprintln!("Error walking the submodule's history: {:?}", e),
}
}
if dangling_references.is_empty() {
None
} else {
// TODO (#16): provide hints as to what options user has to resolve this
eprintln!("The repository references the following submodule commits, but they couldn't \
be found in the submodule's history:");
for id in dangling_references {
eprintln!("{}", id);
}
Some(true)
}
}
fn get_repo_revwalk<'repo>(repo: &'repo Repository) -> Revwalk<'repo> {
let mut revwalk = repo.revwalk().expect("Couldn't obtain RevWalk object for the repo");
revwalk.set_sorting(git2::SORT_REVERSE | git2::SORT_TOPOLOGICAL);
let head = repo.head().expect("Couldn't obtain repo's HEAD");
let head_id = head.target().expect("Couldn't resolve repo's HEAD to a commit ID");
// TODO (#6): push all branches and tags, not just HEAD
revwalk.push(head_id).expect("Couldn't add repo's HEAD to RevWalk list");
revwalk
}
fn rewrite_repo_history(repo: &Repository,
revwalk: &mut Revwalk,
old_id_to_new: &mut HashMap<Oid, Oid>,
submodule_dir: &str) {
let submodule_path = std::path::Path::new(submodule_dir);
for maybe_oid in revwalk {
match maybe_oid {
Ok(oid) => {
let commit = repo.find_commit(oid)
.expect(&format!("Couldn't get a commit with ID {}", oid));
let tree = commit.tree()
.expect(&format!("Couldn't obtain the tree of a commit with ID {}", oid));
let submodule_subdir = match tree.get_path(submodule_path) {
Ok(tree) => tree,
Err(e) => {
if e.code() == git2::ErrorCode::NotFound &&
e.class() == git2::ErrorClass::Tree {
// It's okay. The tree lacks the subtree corresponding to the
// submodule. In other words, the commit doesn't include the submodule.
// That's totally fine. Let's map it into itself and move on.
old_id_to_new.insert(oid, oid);
continue;
} else {
// Unexpected error; let's report it and abort the program
panic!("Error getting submodule's subdir from the tree: {:?}", e);
};
}
};
// **INVARIANT**: if we got this far, current commit contains a submodule and
// should be rewritten
let submodule_commit_id = submodule_subdir.id();
let new_submodule_commit_id = old_id_to_new[&submodule_commit_id];
let submodule_commit = repo.find_commit(new_submodule_commit_id)
.expect("Couldn't obtain submodule's commit");
let subtree_id = submodule_commit.tree()
.and_then(|t| t.get_path(submodule_path))
.and_then(|te| Ok(te.id()))
.expect("Couldn't obtain submodule's subtree ID");
let mut treebuilder = repo.treebuilder(Some(&tree))
.expect("Couldn't create TreeBuilder");
treebuilder.remove(submodule_path)
.expect("Couldn't remove submodule path from TreeBuilder");
treebuilder.insert(submodule_path, subtree_id, 0o040000)
.expect("Couldn't add submodule as a subdir to TreeBuilder");
let new_tree_id = treebuilder.write()
.expect("Couldn't write TreeBuilder into a Tree");
let new_tree = repo.find_tree(new_tree_id)
.expect("Couldn't read back the Tree we just wrote");
// In commits that used to update the submodule, add a parent pointing to
// appropriate commit in new submodule history
let mut parent_subtree_ids = std::collections::HashSet::new();
for parent in commit.parents() {
let parent_tree = parent.tree().expect("Couldn't obtain parent's tree");
let parent_subdir_tree_id = parent_tree.get_path(submodule_path)
.and_then(|x| Ok(x.id()));
match parent_subdir_tree_id {
Ok(id) => {
parent_subtree_ids.insert(id);
()
}
Err(e) => {
if e.code() == git2::ErrorCode::NotFound &&
e.class() == git2::ErrorClass::Tree {
continue;
} else {
panic!("Error getting submodule's subdir from the tree: {:?}", e);
};
}
}
}
// Here's a few pictures to help you understand how we figure out if current commit
// updated the submodule. If we draw a DAG and name submodule states, the following
// situations will mean that the submodule wasn't updated:
//
// o--o--o--A--
// `,-A
// o--o--o--B-
//
// or
//
// o--o--o--A--
// `,-B
// o--o--o--B-
//
// And in the following graphs the submodule was updated:
//
// o--o--o--A--
// `,-C
// o--o--o--B-
//
// or
//
// o--o--o--o--A--B
//
// Put into words, the rule will be "the submodule state in current commit is
// different from states in all its parents". Or, more formally, the current state
// doesn't belong to the set of states in parents.
let submodule_updated: bool = !parent_subtree_ids.contains(&submodule_commit_id);
// Rewrite the parents if the submodule was updated
let parents = {
let mut p: Vec<Commit> = Vec::new();
for parent_id in commit.parent_ids() {
let actual_parent_id = old_id_to_new[&parent_id];
let parent = repo.find_commit(actual_parent_id)
.expect("Couldn't find parent commit by its id");
p.push(parent);
}
if submodule_updated {
p.push(submodule_commit);
}
p
};
let mut parents_refs: Vec<&Commit> = Vec::new();
for i in 0..parents.len() {
parents_refs.push(&parents[i]);
}
let new_commit_id = repo.commit(None,
&commit.author(),
&commit.committer(),
&commit.message().expect("Couldn't retrieve commit's message"),
&new_tree,
&parents_refs[..])
.expect("Failed to commit");
old_id_to_new.insert(oid, new_commit_id);
}
Err(e) => eprintln!("Error walking the repo's history: {:?}", e),
}
}
}
fn checkout_rewritten_history(repo: &Repository, old_id_to_new: &HashMap<Oid, Oid>) {
let mut checkoutbuilder = git2::build::CheckoutBuilder::new();
checkoutbuilder.force();
let head = repo.head().expect("Couldn't obtain repo's HEAD");
let head_id = head.target().expect("Couldn't resolve repo's HEAD to a commit ID");
let updated_id = old_id_to_new[&head_id];
let object = repo.find_object(updated_id, None)
.expect("Couldn't look up an object at which HEAD points");
repo.reset(&object, git2::ResetType::Hard, Some(&mut checkoutbuilder))
.expect("Couldn't run force-reset");
}
|
//! Implementation of a simple uTP client and server.
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate getopts;
extern crate daemonize;
extern crate udt;
extern crate crypto;
extern crate byteorder;
extern crate rand;
extern crate sodiumoxide;
extern crate rustc_serialize;
use daemonize::{Daemonize};
use std::process;
use std::process::Command;
use std::thread;
use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr};
use std::net;
use std::str;
use std::env;
use std::fs::File;
use std::str::FromStr;
use std::path::Path;
use std::ffi::OsStr;
use std::io;
use std::io::{Cursor, Error, Seek, SeekFrom, ErrorKind, stdin, stdout, stderr, Read, Write};
use getopts::Options;
use crypto::{blake2b};
use udt::*;
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use sodiumoxide::crypto::secretbox;
use sodiumoxide::crypto::secretbox::xsalsa20poly1305::Key;
use rustc_serialize::hex::{FromHex, ToHex};
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [options] REMOTE-LOCATION", program);
print!("{}", opts.usage(&brief));
}
fn send_file(stream: UdtSocket, filename: &str) -> Result<(), Error> {
let mut f = File::open(filename).unwrap();
let mut filesize = 0u64;
let mut buf = vec![0; 1024 * 1024];
loop {
match try!(f.read(&mut buf)) {
0 => { break },
read => { filesize += read as u64 },
}
}
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(filesize).unwrap();
match stream.sendmsg(&wtr[..]) {
Ok(0) => {
return Err(Error::new(ErrorKind::WriteZero, "failed to write filesize header before timeout"))
},
Err(e) => {
return Err(Error::new(ErrorKind::Other, format!("{:?}", e)))
}
_ => {
// println!("wrote filesize of {:.2}kb.", filesize as f64 / 1024f64);
}
}
let mut total = 0;
let mut payload = vec![0; 1300];
f.seek(SeekFrom::Start(0));
loop {
match f.read(&mut payload) {
Ok(0) => {
// println!("\nEOF.");
stream.sendmsg(&vec![0;0]);
break;
}
Ok(read) => {
match stream.sendmsg(&payload[0..read]) {
Ok(written) => {
total += written;
// print!("\rwritten {}kb / {}kb ({:.1}%)", total/1024, filesize/1024, (total as f64/1024f64) / (filesize as f64/1024f64) * 100f64);
},
Err(e) => {
stream.close().expect("Error closing stream");
// panic!("{:?}", e);
}
}
},
Err(e) => {
stream.close().expect("Error closing stream");
// panic!("{}", e);
}
}
}
stream.close().expect("Error closing stream.");
// println!("all done!");
Ok(())
}
fn recv_file(sock: UdtSocket, filesize: u64, filename: &str) -> Result<(), Error> {
let mut f = File::create(filename).unwrap();
let mut total = 0u64;
loop {
let buf = try!(sock.recvmsg(1300).map_err(|e| Error::new(ErrorKind::Other, format!("{:?}", e))));
total += buf.len() as u64;
f.write_all(&buf[..]);
print!("\rreceived {}kb / {}kb ({:.1}%)", total/1024, filesize/1024, (total as f64/1024f64) / (filesize as f64/1024f64) * 100f64);
if total >= filesize {
println!("\nEOF");
break;
}
}
sock.close();
Ok(())
}
fn main() {
// This example may run in either server or client mode.
// Using an enum tends to make the code cleaner and easier to read.
enum Mode {Server, Client}
// Start logging
env_logger::init().expect("Error starting logger");
// Fetch arguments
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
// opts.optopt("o", "output", "set output file name", "NAME");
opts.optflag("s", "server", "server mode");
opts.optflag("p", "port-range", "server listening port range");
opts.optflag("h", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let input = if !matches.free.is_empty() {
matches.free[0].clone()
} else {
print_usage(&program, opts);
return;
};
// Parse the mode argument
let mode: Mode = match matches.opt_present("s") {
true => Mode::Server,
false => Mode::Client
};
match mode {
Mode::Server => {
let key = secretbox::gen_key();
match key {
Key(keybytes) => { println!("{}", keybytes.to_hex()) }
}
let daemonize = Daemonize::new();
match daemonize.start() {
Ok(_) => { let _ = writeln!(&mut stderr(), "daemonized"); }
Err(e) => { let _ = writeln!(&mut stderr(), "RWRWARWARARRR"); }
}
udt::init();
let sock = UdtSocket::new(SocketFamily::AFInet, SocketType::Datagram).unwrap();
sock.setsockopt(UdtOpts::UDP_RCVBUF, 5590000i32);
sock.setsockopt(UdtOpts::UDP_SNDBUF, 5590000i32);
sock.bind(SocketAddr::V4(SocketAddrV4::from_str("0.0.0.0:55000").unwrap())).unwrap();
let my_addr = sock.getsockname().unwrap();
// dbg(format!("Server bound to {:?}", my_addr));
sock.listen(1).unwrap();
let (mut stream, peer) = sock.accept().unwrap();
// dbg(format!("Received new connection from peer {:?}", peer));
let mut clientversion = vec![0; 1];
if let Ok(version) = stream.recvmsg(1) {
// dbg(format!("frand using protocol version {}.", version[0]));
if version[0] == 0x00 {
send_file(stream, &input);
} else {
panic!("Unrecognized version.");
}
} else {
panic!("Failed to receive version byte from client.");
}
}
Mode::Client => {
let sections: Vec<&str> = input.split(":").collect();
let addr: String = sections[0].to_owned();
let path: String = sections[1].to_owned();
let cmd = format!("~/bin/shoop -s {}", path);
println!("addr: {}, path: {}, cmd: {}", addr, path, cmd);
let output = Command::new("ssh")
.arg(addr.to_owned())
.arg(cmd)
.output()
.unwrap_or_else(|e| {
panic!("failed to execute process: {}", e);
});
let key = String::from_utf8_lossy(&output.stdout).trim().to_owned();
println!("got key {}", key);
// Create a stream and try to connect to the remote address
//println!("shoop server told us to connect to {}", udp_addr);
//udt::init();
//let sock = UdtSocket::new(SocketFamily::AFInet, SocketType::Datagram).unwrap();
//sock.setsockopt(UdtOpts::UDP_RCVBUF, 5590000i32);
//sock.setsockopt(UdtOpts::UDP_SNDBUF, 5590000i32);
//let addr: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from_str(&addr).unwrap(), 55000));
//sock.bind(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from_str("0.0.0.0").unwrap(), 0)));
//match sock.connect(addr) {
// Ok(()) => {
// println!("connected!");
// },
// Err(e) => {
// panic!("errrrrrrr {:?}", e);
// }
//}
//sock.sendmsg(&[0u8; 1]);
//println!("checking if server is frand");
//match sock.recvmsg(8) {
// Ok(msg) => {
// if msg.len() == 0 {
// panic!("failed to get filesize from server, probable timeout.");
// }
// let mut rdr = Cursor::new(msg);
// let filesize = rdr.read_u64::<LittleEndian>().unwrap();
// println!("got reported filesize of {}", filesize);
// let filename = Path::new(&path).file_name().unwrap_or(OsStr::new("outfile")).to_str().unwrap_or("outfile");
// println!("writing to {}", filename);
// recv_file(sock, filesize, filename);
// }
// Err(e) => {
// panic!("{:?}", e);
// }
//}
}
}
}
warnings are mosquitoes. kill mosquitoes
//! Implementation of a simple uTP client and server.
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate getopts;
extern crate daemonize;
extern crate udt;
extern crate crypto;
extern crate byteorder;
extern crate rand;
extern crate sodiumoxide;
extern crate rustc_serialize;
use daemonize::{Daemonize};
use std::process::Command;
use std::net::{UdpSocket, SocketAddr, SocketAddrV4, Ipv4Addr};
use std::str;
use std::env;
use std::fs::File;
use std::str::FromStr;
use std::path::Path;
use std::ffi::OsStr;
use std::io::{Cursor, Error, Seek, SeekFrom, ErrorKind, stderr, Read, Write};
use getopts::Options;
use udt::*;
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use sodiumoxide::crypto::secretbox;
use sodiumoxide::crypto::secretbox::xsalsa20poly1305::Key;
use rustc_serialize::hex::{FromHex, ToHex};
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [options] REMOTE-LOCATION", program);
print!("{}", opts.usage(&brief));
}
fn send_file(stream: UdtSocket, filename: &str) -> Result<(), Error> {
let mut f = File::open(filename).unwrap();
let mut filesize = 0u64;
let mut buf = vec![0; 1024 * 1024];
loop {
match try!(f.read(&mut buf)) {
0 => { break },
read => { filesize += read as u64 },
}
}
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(filesize).unwrap();
match stream.sendmsg(&wtr[..]) {
Ok(0) => {
return Err(Error::new(ErrorKind::WriteZero, "failed to write filesize header before timeout"))
},
Err(e) => {
return Err(Error::new(ErrorKind::Other, format!("{:?}", e)))
}
_ => {
// println!("wrote filesize of {:.2}kb.", filesize as f64 / 1024f64);
}
}
// let mut total = 0;
let mut payload = vec![0; 1300];
f.seek(SeekFrom::Start(0)).unwrap();
loop {
match f.read(&mut payload) {
Ok(0) => {
// println!("\nEOF.");
stream.sendmsg(&vec![0;0]).unwrap();
break;
}
Ok(read) => {
match stream.sendmsg(&payload[0..read]) {
Ok(_) => { }
Err(e) => {
stream.close().expect("Error closing stream");
panic!("{:?}", e);
}
}
},
Err(e) => {
stream.close().expect("Error closing stream");
panic!("{:?}", e);
}
}
}
stream.close().expect("Error closing stream.");
// println!("all done!");
Ok(())
}
fn get_open_port(start: u16, end: u16) -> Result<u16, ()> {
assert!(end >= start);
let mut p = start;
loop {
match UdpSocket::bind(&format!("0.0.0.0:{}", p)[..]) {
Ok(_) => {
return Ok(p);
}
Err(_) => {
p += 1;
if p > end {
return Err(());
}
}
}
}
}
fn recv_file(sock: UdtSocket, filesize: u64, filename: &str) -> Result<(), Error> {
let mut f = File::create(filename).unwrap();
let mut total = 0u64;
loop {
let buf = try!(sock.recvmsg(1300).map_err(|e| Error::new(ErrorKind::Other, format!("{:?}", e))));
total += buf.len() as u64;
f.write_all(&buf[..]).unwrap();
print!("\rreceived {}kb / {}kb ({:.1}%)", total/1024, filesize/1024, (total as f64/1024f64) / (filesize as f64/1024f64) * 100f64);
if total >= filesize {
println!("\nEOF");
break;
}
}
let _ = sock.close();
Ok(())
}
fn main() {
// This example may run in either server or client mode.
// Using an enum tends to make the code cleaner and easier to read.
enum Mode {Server, Client}
// Start logging
env_logger::init().expect("Error starting logger");
// Fetch arguments
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
// opts.optopt("o", "output", "set output file name", "NAME");
opts.optflag("s", "server", "server mode");
opts.optflag("p", "port-range", "server listening port range");
opts.optflag("h", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let input = if !matches.free.is_empty() {
matches.free[0].clone()
} else {
print_usage(&program, opts);
return;
};
// Parse the mode argument
let mode: Mode = match matches.opt_present("s") {
true => Mode::Server,
false => Mode::Client
};
match mode {
Mode::Server => {
let ip = env::var("SSH_CONNECTION").unwrap_or(String::from("0.0.0.0"));
let port = get_open_port(55000, 55100).unwrap();
let key = secretbox::gen_key();
match key {
Key(keybytes) => { println!("shoop 0 {} {} {}", ip, port, keybytes.to_hex()) }
}
let daemonize = Daemonize::new();
match daemonize.start() {
Ok(_) => { let _ = writeln!(&mut stderr(), "daemonized"); }
Err(_) => { let _ = writeln!(&mut stderr(), "RWRWARWARARRR"); }
}
udt::init();
let sock = UdtSocket::new(SocketFamily::AFInet, SocketType::Datagram).unwrap();
sock.setsockopt(UdtOpts::UDP_RCVBUF, 5590000i32).unwrap();
sock.setsockopt(UdtOpts::UDP_SNDBUF, 5590000i32).unwrap();
sock.bind(SocketAddr::V4(SocketAddrV4::from_str(&format!("{}:{}", ip, port)[..]).unwrap())).unwrap();
sock.listen(1).unwrap();
let (stream, _) = sock.accept().unwrap();
// dbg(format!("Received new connection from peer {:?}", peer));
if let Ok(version) = stream.recvmsg(1) {
// dbg(format!("frand using protocol version {}.", version[0]));
if version[0] == 0x00 {
send_file(stream, &input).unwrap();
} else {
panic!("Unrecognized version.");
}
} else {
panic!("Failed to receive version byte from client.");
}
}
Mode::Client => {
let sections: Vec<&str> = input.split(":").collect();
let addr: String = sections[0].to_owned();
let path: String = sections[1].to_owned();
let cmd = format!("~/bin/shoop -s {}", path);
println!("addr: {}, path: {}, cmd: {}", addr, path, cmd);
let output = Command::new("ssh")
.arg(addr.to_owned())
.arg(cmd)
.output()
.unwrap_or_else(|e| {
panic!("failed to execute process: {}", e);
});
let keyhex = String::from_utf8_lossy(&output.stdout).trim().to_owned();
let mut keybytes = [0u8; 32];
keybytes.copy_from_slice(&keyhex.from_hex().unwrap()[..]);
let key = Key(keybytes);
println!("got key {}", keyhex);
udt::init();
let sock = UdtSocket::new(SocketFamily::AFInet, SocketType::Datagram).unwrap();
sock.setsockopt(UdtOpts::UDP_RCVBUF, 5590000i32).unwrap();
sock.setsockopt(UdtOpts::UDP_SNDBUF, 5590000i32).unwrap();
let addr: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from_str("144.76.81.4").unwrap(), 55000));
// let addr: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from_str(&addr).unwrap(), 55000));
match sock.connect(addr) {
Ok(()) => {
println!("connected!");
},
Err(e) => {
panic!("errrrrrrr {:?}", e);
}
}
sock.sendmsg(&[0u8; 1]).unwrap();
println!("checking if server is frand");
match sock.recvmsg(8) {
Ok(msg) => {
if msg.len() == 0 {
panic!("failed to get filesize from server, probable timeout.");
}
let mut rdr = Cursor::new(msg);
let filesize = rdr.read_u64::<LittleEndian>().unwrap();
println!("got reported filesize of {}", filesize);
let filename = Path::new(&path).file_name().unwrap_or(OsStr::new("outfile")).to_str().unwrap_or("outfile");
println!("writing to {}", filename);
recv_file(sock, filesize, filename).unwrap();
}
Err(e) => {
panic!("{:?}", e);
}
}
}
}
}
|
#![feature(core)] //this should be removed before out of beta
extern crate regex;
use regex::Regex;
enum schmAtom {
Int(i32),
Float(f32),
String(String),
Symb(String)
}
fn main() {
}
fn tokenize(input: String) -> Vec<String>{
// Split input string into smaller tokens.
let re1 = Regex::new(r"\(").unwrap();
let re2 = Regex::new(r"\)").unwrap();
let mut after: String = re1.replace_all(&input,"( ");
after = re2.replace_all(&after, " )");
let split = after.split(" "); //split is an iterator
split.map(|x|{x.to_string()}).collect::<Vec<String>>()
//^to `.to_string` seems kind of hackish...
//BUT it actaully just forces the copy and move of the string
}
fn read_from_tokens(tokens : Vec<schmAtom>){
// Reads expressions from tokens.
}
fn atom(token: String) -> schmAtom {
//Figure out what type a token is representing.
let intNum = token.parse::<i32>();
let floatNum = token.parse::<f32>();
if intNum.is_ok() {
let x = intNum.unwrap();
return schmAtom::Int(x)
} else if floatNum.is_ok() {
let x = floatNum.unwrap();
return schmAtom::Float(x)
} else { //must be string...
match token.as_bytes()[0] as char {
'\"' => return schmAtom::String(token),
_ => return schmAtom::Symb(token)
}
}
}
#[cfg(test)]
#[test]
fn test_atom(){
let test_string = "\"ab\"".to_string();
let test_float = "1.0".to_string();
let test_int = "1".to_string();
let test_symb = "hi".to_string();
let string_atom = atom(test_string);
let float_atom = atom(test_float);
let int_atom = atom(test_int);
let symb_atom = atom(test_symb);
let res = match (string_atom, float_atom, int_atom, symb_atom) {
(schmAtom::String(x), schmAtom::Float(y), schmAtom::Int(z),
schmAtom::Symb(l),)
=> { (x,y,z,l) == ("\"ab\"".to_string(),1.0,1,"hi".to_string())},
_ => false
};
assert!(res,true)
}
#[test]
fn test_tokenize() {
let test_input: String = "(a b c)".to_string();
assert_eq!(tokenize(test_input),
vec!("(","a","b","c", ")"));
}
Initial layout of read_from_tokens. Does not work.
#![feature(core)] //this should be removed before out of beta
extern crate regex;
use regex::Regex;
enum schmAtom {
Int(i32),
Float(f32),
String(String),
Symb(String),
Vec(Vec<schmAtom>)
}
fn main() {
}
fn tokenize(input: String) -> Vec<String>{
// Split input string into smaller tokens.
let re1 = Regex::new(r"\(").unwrap();
let re2 = Regex::new(r"\)").unwrap();
let mut after: String = re1.replace_all(&input,"( ");
after = re2.replace_all(&after, " )");
let split = after.split(" "); //split is an iterator
split.map(|x|{x.to_string()}).collect::<Vec<String>>()
//^to `.to_string` seems kind of hackish...
//BUT it actaully just forces the copy and move of the string
}
fn read_from_tokens(mut tokens : Vec<String>)->schmAtom{
// Reads expressions from tokens.
// Basically traight tolen from lisp.py
let token = tokens.pop().unwrap(); //should try to reduce unwraps
match token.as_slice() {
"(" => {
let mut L = vec!();
loop{ //so dirty... (Is there a better way to recurse this?)
match tokens[0].as_slice() {
")" => break,
_ => L.push(read_from_tokens(tokens)) //<- need to unrecurse this
}
}
tokens.pop(); //removes the ')' char
return schmAtom::Vec(L);
},
_ => return atom(token)
}
}
fn atom(token: String) -> schmAtom {
//Figure out what type a token is representing.
let intNum = token.parse::<i32>();
let floatNum = token.parse::<f32>();
if intNum.is_ok() {
let x = intNum.unwrap();
return schmAtom::Int(x)
} else if floatNum.is_ok() {
let x = floatNum.unwrap();
return schmAtom::Float(x)
} else { //must be string...
match token.as_bytes()[0] as char {
'\"' => return schmAtom::String(token),
_ => return schmAtom::Symb(token)
}
}
}
#[cfg(test)]
#[test]
fn test_atom(){
let test_string = "\"ab\"".to_string();
let test_float = "1.0".to_string();
let test_int = "1".to_string();
let test_symb = "hi".to_string();
let string_atom = atom(test_string);
let float_atom = atom(test_float);
let int_atom = atom(test_int);
let symb_atom = atom(test_symb);
let res = match (string_atom, float_atom, int_atom, symb_atom) {
(schmAtom::String(x), schmAtom::Float(y), schmAtom::Int(z),
schmAtom::Symb(l),)
=> { (x,y,z,l) == ("\"ab\"".to_string(),1.0,1,"hi".to_string())},
_ => false
};
assert!(res,true)
}
#[test]
fn test_tokenize() {
let test_input: String = "(a b c)".to_string();
assert_eq!(tokenize(test_input),
vec!("(","a","b","c", ")"));
}
|
extern crate ansi_term;
extern crate argparse;
extern crate hyper;
extern crate libsnatch;
use ansi_term::Colour::{Green, Yellow, Red};
use argparse::{ArgumentParser, Store, StoreTrue};
use hyper::client::Client;
use libsnatch::{Bytes, Chunks};
use libsnatch::client::GetResponse;
use libsnatch::contentlength::GetContentLength;
use libsnatch::download::download_chunks;
use libsnatch::http_version::ValidateHttpVersion;
use libsnatch::write::write_file;
use std::fs::File;
use std::io;
use std::sync::{Arc, Mutex};
use std::path::Path;
use std::process::exit;
fn main() {
let mut file = String::from("");
let mut threads: usize = 4;
let mut url = String::from("");
let mut verbose = false;
{
let mut argparse = ArgumentParser::new();
argparse.set_description("Snatch, a simple, fast and interruptable download accelerator, \
written in Rust.");
argparse.refer(&mut file)
.add_option(&["-f", "--file"],
Store,
"The local file to save the remote content file");
argparse.refer(&mut threads)
.add_option(&["-t", "--threads"],
Store,
"Number of threads available to download");
argparse.refer(&mut url)
.add_option(&["-u", "--url"], Store, "Remote content URL to download")
.required();
argparse.refer(&mut verbose)
.add_option(&["-v", "--verbose"], StoreTrue, "Verbose mode");
argparse.parse_args_or_exit();
}
let hyper_client = Client::new();
// Get the first response from the server
let client_response = hyper_client.get_head_response(&url).unwrap();
print!("# Waiting a response from the remote server... ");
if !client_response.version.greater_than_http_11() {
println!("{}",
Yellow.bold()
.paint("OK (HTTP version <= 1.0 detected)"));
} else {
println!("{}", Green.bold().paint("OK !"));
}
// If no filename has been given, infer it
if file.is_empty() {
file = match url.split('/').last() {
Some(filename) => String::from(filename),
None => String::from("index.html"),
}
}
let local_path = Path::new(&file);
if local_path.exists() {
if local_path.is_dir() {
panic!(Red.bold()
.paint("[ERROR] The local path to store the remote content is already exists, \
and is a directory!"));
}
println!("{}",
Red.bold()
.paint("[WARNING] The path to store the file already exists! Do you want \
to override it? [y/N]"));
{
let mut user_input = String::new();
io::stdin()
.read_line(&mut user_input)
.ok()
.expect("[ERROR] Couldn't read line!");
user_input = String::from(user_input.trim());
if !(user_input == "y" || user_input == "Y") {
exit(0);
}
}
}
let remote_content_length = match client_response.headers.get_content_length() {
Some(remote_content_length) => remote_content_length,
None => {
println!("{}",
Red.bold()
.paint("[ERROR] Cannot get the content length of the remote content, \
from the server."));
exit(1);
}
};
println!("# Remote content length: {:?} MB",
(remote_content_length / 1000000) as Bytes);
let mut core_chunks = Chunks::with_capacity(threads);
for _ in 0..threads {
core_chunks.push(Vec::new());
}
let mut shared_chunks = Arc::new(Mutex::new(core_chunks));
download_chunks(remote_content_length,
&mut shared_chunks,
threads as u64,
&url);
let mut local_file = File::create(local_path).expect("[ERROR] Cannot create a file !");
match write_file(&mut local_file, &shared_chunks) {
Ok(()) => println!("{}", Green.bold().paint("Chunks have been successfuly saved!")),
Err(error) => println!("[ERROR] {}", error),
}
}
Use static str as constant
extern crate ansi_term;
extern crate argparse;
extern crate hyper;
extern crate libsnatch;
use ansi_term::Colour::{Green, Yellow, Red};
use argparse::{ArgumentParser, Store, StoreTrue};
use hyper::client::Client;
use libsnatch::{Bytes, Chunks};
use libsnatch::client::GetResponse;
use libsnatch::contentlength::GetContentLength;
use libsnatch::download::download_chunks;
use libsnatch::http_version::ValidateHttpVersion;
use libsnatch::write::write_file;
use std::fs::File;
use std::io;
use std::sync::{Arc, Mutex};
use std::path::Path;
use std::process::exit;
static DEFAULT_FILENAME: &'static str = "index.html";
fn main() {
let mut file = String::from("");
let mut threads: usize = 4;
let mut url = String::from("");
let mut verbose = false;
{
let mut argparse = ArgumentParser::new();
argparse.set_description("Snatch, a simple, fast and interruptable download accelerator, \
written in Rust.");
argparse.refer(&mut file)
.add_option(&["-f", "--file"],
Store,
"The local file to save the remote content file");
argparse.refer(&mut threads)
.add_option(&["-t", "--threads"],
Store,
"Number of threads available to download");
argparse.refer(&mut url)
.add_option(&["-u", "--url"], Store, "Remote content URL to download")
.required();
argparse.refer(&mut verbose)
.add_option(&["-v", "--verbose"], StoreTrue, "Verbose mode");
argparse.parse_args_or_exit();
}
let hyper_client = Client::new();
// Get the first response from the server
let client_response = hyper_client.get_head_response(&url).unwrap();
print!("# Waiting a response from the remote server... ");
if !client_response.version.greater_than_http_11() {
println!("{}",
Yellow.bold()
.paint("OK (HTTP version <= 1.0 detected)"));
} else {
println!("{}", Green.bold().paint("OK !"));
}
// If no filename has been given, infer it
if file.is_empty() {
file = match url.split('/').last() {
Some(filename) => String::from(filename),
None => String::from(DEFAULT_FILENAME),
}
}
let local_path = Path::new(&file);
if local_path.exists() {
if local_path.is_dir() {
panic!(Red.bold()
.paint("[ERROR] The local path to store the remote content is already exists, \
and is a directory!"));
}
println!("{}",
Red.bold()
.paint("[WARNING] The path to store the file already exists! Do you want \
to override it? [y/N]"));
{
let mut user_input = String::new();
io::stdin()
.read_line(&mut user_input)
.ok()
.expect("[ERROR] Couldn't read line!");
user_input = String::from(user_input.trim());
if !(user_input == "y" || user_input == "Y") {
exit(0);
}
}
}
let remote_content_length = match client_response.headers.get_content_length() {
Some(remote_content_length) => remote_content_length,
None => {
println!("{}",
Red.bold()
.paint("[ERROR] Cannot get the content length of the remote content, \
from the server."));
exit(1);
}
};
println!("# Remote content length: {:?} MB",
(remote_content_length / 1000000) as Bytes);
let mut core_chunks = Chunks::with_capacity(threads);
for _ in 0..threads {
core_chunks.push(Vec::new());
}
let mut shared_chunks = Arc::new(Mutex::new(core_chunks));
download_chunks(remote_content_length,
&mut shared_chunks,
threads as u64,
&url);
let mut local_file = File::create(local_path).expect("[ERROR] Cannot create a file !");
match write_file(&mut local_file, &shared_chunks) {
Ok(()) => println!("{}", Green.bold().paint("Chunks have been successfuly saved!")),
Err(error) => println!("[ERROR] {}", error),
}
}
|
#![feature(box_patterns)]
use std::error::Error;
#[derive(Debug)]
enum DataType {
UnsignedLong,
String
}
#[derive(Debug)]
struct ColumnType {
name: String,
data_type: DataType,
nullable: bool
}
#[derive(Debug)]
struct TupleType {
columns: Vec<ColumnType>
}
#[derive(Debug,Clone)]
enum Value {
UnsignedLong(u64),
String(String),
Boolean(bool)
}
/// The tuple trait provides type-safe access to individual values within the tuple
trait Tuple {
fn get_value(&self, index: usize) -> Result<Value, Box<Error>>;
}
#[derive(Debug)]
struct SimpleTuple {
values: Vec<Value>
}
impl Tuple for SimpleTuple {
fn get_value(&self, index: usize) -> Result<Value, Box<Error>> {
Ok(self.values[index].clone())
}
}
enum Operator {
Eq,
NotEq,
Lt,
LtEq,
Gt,
GtEq,
}
enum Expr {
/// index into a value within the tuple
TupleValue(usize),
/// literal value
Literal(Value),
/// binary expression e.g. "age > 21"
BinaryExpr { left: Box<Expr>, op: Operator, right: Box<Expr> },
}
enum PlanNode {
// TableScan,
// IndexScan,
Filter(Expr),
// Sort,
// Project,
// Join
}
fn evaluate(tuple: &Box<Tuple>, tt: &TupleType, expr: &Expr) -> Result<Value, Box<Error>> {
match expr {
&Expr::BinaryExpr { box ref left, ref op, box ref right } => {
let left_value = evaluate(tuple, tt, left).unwrap();
let right_value = evaluate(tuple, tt, right).unwrap();
match op {
&Operator::Eq => {
match left_value {
Value::UnsignedLong(l) => match right_value {
Value::UnsignedLong(r) => Ok(Value::Boolean(l == r)),
_ => Err(From::from("oops"))
},
_ => Err(From::from("oops"))
}
},
_ => Err(From::from("oops"))
}
},
&Expr::TupleValue(index) => tuple.get_value(index),
&Expr::Literal(ref value) => Ok(value.clone()),
}
}
fn main() {
let tt = TupleType {
columns: vec![
ColumnType { name: String::from("id"), data_type: DataType::UnsignedLong, nullable: false },
ColumnType { name: String::from("name"), data_type: DataType::String, nullable: false }
]
};
println!("Tuple type: {:?}", tt);
let data : Vec<Box<Tuple>> = vec![
Box::new(SimpleTuple { values: vec![ Value::UnsignedLong(1), Value::String(String::from("Alice")) ] }),
Box::new(SimpleTuple { values: vec![ Value::UnsignedLong(2), Value::String(String::from("Bob")) ] }),
];
// create simple filter expression
let filterExpr = Expr::BinaryExpr {
left: Box::new(Expr::TupleValue(0)),
op: Operator::Eq,
right: Box::new(Expr::Literal(Value::UnsignedLong(2)))
};
//let plan = PlanNode::Filter(filterExpr);
// iterate over tuples and evaluate the plan
for tuple in &data {
let x = evaluate(tuple, &tt, &filterExpr).unwrap();
print!("filter expr evaluates to {:?}", x);
}
}
add comments
#![feature(box_patterns)]
use std::error::Error;
/// The data types supported by this database. Currently just u64 and string but others
/// will be added later, including complex types
#[derive(Debug,Clone)]
enum DataType {
UnsignedLong,
String
}
/// Definition of a column in a relation (data set).
#[derive(Debug,Clone)]
struct ColumnMeta {
name: String,
data_type: DataType,
nullable: bool
}
/// Definition of a relation (data set) consisting of one or more columns.
#[derive(Debug,Clone)]
struct TupleType {
columns: Vec<ColumnMeta>
}
/// Value holder for all supported data types
#[derive(Debug,Clone)]
enum Value {
UnsignedLong(u64),
String(String),
Boolean(bool)
}
/// A tuple represents one row within a relation and is implemented as a trait to allow for
/// specific implementations for different data sources
trait Tuple {
fn get_value(&self, index: usize) -> Result<Value, Box<Error>>;
}
/// A simple tuple implementation for testing and initial prototyping
#[derive(Debug)]
struct SimpleTuple {
values: Vec<Value>
}
impl Tuple for SimpleTuple {
fn get_value(&self, index: usize) -> Result<Value, Box<Error>> {
Ok(self.values[index].clone())
}
}
#[derive(Debug)]
enum Operator {
Eq,
NotEq,
Lt,
LtEq,
Gt,
GtEq,
}
#[derive(Debug)]
enum Expr {
/// index into a value within the tuple
TupleValue(usize),
/// literal value
Literal(Value),
/// binary expression e.g. "age > 21"
BinaryExpr { left: Box<Expr>, op: Operator, right: Box<Expr> },
}
#[derive(Debug)]
enum PlanNode {
// TableScan,
// IndexScan,
Filter(Expr),
// Sort,
// Project,
// Join
}
fn evaluate(tuple: &Box<Tuple>, tt: &TupleType, expr: &Expr) -> Result<Value, Box<Error>> {
match expr {
&Expr::BinaryExpr { box ref left, ref op, box ref right } => {
let left_value = evaluate(tuple, tt, left).unwrap();
let right_value = evaluate(tuple, tt, right).unwrap();
match op {
&Operator::Eq => {
match left_value {
Value::UnsignedLong(l) => match right_value {
Value::UnsignedLong(r) => Ok(Value::Boolean(l == r)),
_ => Err(From::from("oops"))
},
_ => Err(From::from("oops"))
}
},
_ => Err(From::from("oops"))
}
},
&Expr::TupleValue(index) => tuple.get_value(index),
&Expr::Literal(ref value) => Ok(value.clone()),
}
}
fn main() {
let tt = TupleType {
columns: vec![
ColumnMeta { name: String::from("id"), data_type: DataType::UnsignedLong, nullable: false },
ColumnMeta { name: String::from("name"), data_type: DataType::String, nullable: false }
]
};
println!("Tuple type: {:?}", tt);
let data : Vec<Box<Tuple>> = vec![
Box::new(SimpleTuple { values: vec![ Value::UnsignedLong(1), Value::String(String::from("Alice")) ] }),
Box::new(SimpleTuple { values: vec![ Value::UnsignedLong(2), Value::String(String::from("Bob")) ] }),
];
// create simple filter expression for "id = 2"
let filterExpr = Expr::BinaryExpr {
left: Box::new(Expr::TupleValue(0)),
op: Operator::Eq,
right: Box::new(Expr::Literal(Value::UnsignedLong(2)))
};
println!("Expression: {:?}", filterExpr);
// iterate over tuples and evaluate the expression
for tuple in &data {
let x = evaluate(tuple, &tt, &filterExpr).unwrap();
println!("filter expr evaluates to {:?}", x);
}
}
|
extern crate warehouse;
use warehouse::{ Auth, Crates };
fn main() {
let mut crates = Crates::new(
"https://crates.io".to_string(), None);
let get = crates.named("url");
println!("result {:?}", get);
let search = crates.search(
"url"
);
println!("result {:?}", search.ok().expect("crates"));
}
unused import
extern crate warehouse;
use warehouse::Crates;
fn main() {
let mut crates = Crates::new(
"https://crates.io".to_string(), None);
let get = crates.named("url");
println!("result {:?}", get);
let search = crates.search(
"url"
);
println!("result {:?}", search.ok().expect("crates"));
}
|
extern crate sdl;
extern crate rand;
use rand::distributions::{IndependentSample, Range};
use sdl::video::{SurfaceFlag, VideoFlag, Color};
use sdl::event::{Event, Key, Mouse};
#[derive(Copy, Clone)]
struct Spot {
hidden: bool,
mine: bool,
flag: bool,
n: u8,
}
struct Field {
width: usize,
height: usize,
field: Vec<Vec<Spot>>,
}
impl Field {
fn new(r: f32, w: usize, h: usize) -> Field {
let bt = Range::new(0.,1.);
let mut rng = rand::thread_rng();
let mut field = Vec::with_capacity(h);
for i in 0..h {
field.push(Vec::with_capacity(w));
for _ in 0..w {
field[i].push(
Spot{hidden: true,
mine: bt.ind_sample(&mut rng) < r,
flag: false,
n: 0}
);
}
}
let mut f = Field {width: w, height: h, field: field};
f.count_field();
f
}
fn swap_mine(&mut self, x: usize, y: usize) {
let f = &mut self.field[y][x];
f.mine = !f.mine;
}
fn show_spot(&mut self, x: usize, y: usize) {
if x >= self.width {return;}
if y >= self.height {return;}
{
let f = &mut self.field[y][x];
if f.flag {return;}
if f.hidden {
f.hidden = false;
}
}
if self.field[y][x].n != self.count_flags(x, y) {return;}
for i in 0..3 {
if x+i == 0 || x+i > self.width {continue;}
for j in 0..3 {
if y+j == 0 || y+j > self.height {continue;}
if i ==1 && j == 1 {continue;}
let f = self.field[y+j-1][x+i-1];
if f.hidden {
self.show_spot(x+i-1, y+j-1);
}
}
}
}
fn flag_spot(&mut self, x: usize, y: usize) {
let f = &mut self.field[y][x];
if f.hidden {
f.flag = !f.flag;
}
}
fn count_field(&mut self) {
for i in 0..self.width {
for j in 0..self.height {
self.field[j][i].n = self.count_neighbors(i, j);
}
}
}
fn count_neighbors(&self, x: usize, y: usize) -> u8 {
let mut n = 0;
for i in 0..3 {
if x+i == 0 || x+i > self.width {continue;}
for j in 0..3 {
if y+j == 0 || y+j > self.height {continue;}
if i == 1 && j == 1 {continue;}
if self.field[y+j-1][x+i-1].mine {n += 1}
}
}
n
}
fn count_flags(&self, x: usize, y: usize) -> u8 {
let mut n = 0;
for i in 0..3 {
if x+i == 0 || x+i > self.width {continue;}
for j in 0..3 {
if y+j == 0 || y+j > self.height {continue;}
if i == 1 && j == 1 {continue;}
if self.field[y+j-1][x+i-1].flag {n += 1}
}
}
n
}
}
struct Screen {
width: isize,
height: isize,
spot_length: u16,
surface: sdl::video::Surface,
}
impl Screen {
fn new(w: isize, h: isize, l: u16) -> Screen {
sdl::init(&[sdl::InitFlag::Video]);
sdl::wm::set_caption("mines", "mines");
let s = match sdl::video::set_video_mode(w, h, 32,
&[SurfaceFlag::HWSurface],
&[VideoFlag::DoubleBuf]) {
Ok(s) => s,
Err(err) => panic!("failed to set video mode: {}", err)
};
Screen {width: w, height: h, spot_length: l, surface: s}
}
fn draw_square(&self, x: u16, y: u16, w: u16, (r,g,b): (u8, u8, u8)) {
self.surface.fill_rect(
Some(sdl::Rect {x: x as i16, y: y as i16, w: w, h: w}),
Color::RGB(r, g, b)
);
}
fn draw_num(&self, n: u8, x: u16, y: u16) {
self.draw_square(x, y, self.spot_length-1,
(255, 255, 255)
);
let mut n = n;
let sub = (self.spot_length-1)/3;
let pack = (self.spot_length - sub*3)/2;
let color = if n == 1 {
(100,100,255)
}else if n == 2 {
(0,255,0)
}else if n == 3 {
(255,0,0)
}else if n == 4 {
(0,0,255)
}else if n == 5 {
(200,50,0)
}else if n == 6 {
(0,255,255)
}else if n == 7 {
(0,0,0)
}else {
(150,150,150)
};
loop {
if n == 0 {break;}
else if n == 1{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
break;
}else if n == 2{
self.draw_square(x+pack, y+pack, sub-1, color);
self.draw_square(sub*2+x+pack, sub*2+y+pack, sub-1, color);
break;
}else if n == 3{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
n = 2;
}else if n == 4{
self.draw_square(x+pack, sub*2+y+pack, sub-1, color);
self.draw_square(sub*2+x+pack, y+pack, sub-1, color);
n = 2;
}else if n == 5{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
n = 4;
}else if n == 6{
self.draw_square(x+pack, sub+y+pack, sub-1, color);
self.draw_square(sub*2+x+pack, sub+y+pack, sub-1, color);
n = 4;
}else if n == 7{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
n = 6;
}else if n == 8{
self.draw_square(sub+x+pack, y+pack, sub-1, color);
self.draw_square(sub+x+pack, sub*2+y+pack, sub-1, color);
n = 6;
}
}
}
fn draw_field(&self, ref field: &Field) {
let length = self.spot_length;
let mut n = 0;
for ref i in field.field.iter() {
let mut m = 0;
for ref sq in i.iter() {
if sq.hidden {
self.draw_square(m*length+1,
n*length+1,
length-1,
(180, 180, 180)
);
if sq.flag {
self.draw_square(m*length+4,
n*length+4,
length-7,
(255, 0, 0)
);
}
}else{
if sq.mine {
self.draw_square(m*length+1,
n*length+1,
length-1,
(0, 0, 0)
);
}else{
self.draw_num(
sq.n,
m*length+1,
n*length+1
);
}
}
m += 1;
}
n += 1;
}
}
}
fn main() {
const WIDTH: usize = 30;
const HEIGHT: usize = 20;
const SIZE: usize = 35;
const R: f32 = 0.185;
let mut field = Field::new(R, WIDTH, HEIGHT);
let screen = Screen::new((SIZE*WIDTH) as isize + 1,
(SIZE*HEIGHT) as isize + 1, SIZE as u16);
loop {
match sdl::event::wait_event() {
Event::Quit => break,
Event::MouseButton(b, down, mx, my) => {
if down {
if b == Mouse::Left {
field.show_spot(mx as usize/SIZE, my as usize/SIZE);
}
else if b == Mouse::Right {
field.flag_spot(mx as usize/SIZE, my as usize/SIZE);
}
}
},
Event::Key(k, down, _, _) => {
if down {
if k == Key::Escape {
break;
}
if k == Key::R {
field = Field::new(R, WIDTH, HEIGHT);
}
}
},
_ => {}
}
screen.draw_field(&field);
screen.surface.flip();
}
sdl::quit();
}
First click never mine
extern crate sdl;
extern crate rand;
use rand::distributions::{IndependentSample, Range};
use sdl::video::{SurfaceFlag, VideoFlag, Color};
use sdl::event::{Event, Key, Mouse};
#[derive(Copy, Clone)]
struct Spot {
hidden: bool,
mine: bool,
flag: bool,
n: u8,
}
struct Field {
empty: bool,
width: usize,
height: usize,
field: Vec<Vec<Spot>>,
}
impl Field {
fn new(r: f32, w: usize, h: usize, x: usize, y: usize) -> Field {
let bt = Range::new(0.,1.);
let mut rng = rand::thread_rng();
let mut field = Vec::with_capacity(h);
for i in 0..h {
field.push(Vec::with_capacity(w));
for _ in 0..w {
field[i].push(
Spot{hidden: true,
mine: bt.ind_sample(&mut rng) < r,
flag: false,
n: 0}
);
}
}
let mut f = Field {empty: false, width: w, height: h, field: field};
f.count_field();
if f.field[y][x].n != 0 || f.field[y][x].mine {
f = Field::new(r, w, h, x, y)
}
f
}
fn empty(w: usize, h: usize) -> Field {
Field {empty: true, width: w, height: h,
field: vec![vec![Spot {hidden: true, mine: false, flag: false, n: 0}; h]; w]
}
}
fn swap_mine(&mut self, x: usize, y: usize) {
let f = &mut self.field[y][x];
f.mine = !f.mine;
}
fn show_spot(&mut self, x: usize, y: usize) {
if x >= self.width {return;}
if y >= self.height {return;}
{
let f = &mut self.field[y][x];
if f.flag {return;}
if f.hidden {
f.hidden = false;
}
}
if self.field[y][x].n != self.count_flags(x, y) {return;}
for i in 0..3 {
if x+i == 0 || x+i > self.width {continue;}
for j in 0..3 {
if y+j == 0 || y+j > self.height {continue;}
if i ==1 && j == 1 {continue;}
let f = self.field[y+j-1][x+i-1];
if f.hidden {
self.show_spot(x+i-1, y+j-1);
}
}
}
}
fn flag_spot(&mut self, x: usize, y: usize) {
let f = &mut self.field[y][x];
if f.hidden {
f.flag = !f.flag;
}
}
fn count_field(&mut self) {
for i in 0..self.width {
for j in 0..self.height {
self.field[j][i].n = self.count_neighbors(i, j);
}
}
}
fn count_neighbors(&self, x: usize, y: usize) -> u8 {
let mut n = 0;
for i in 0..3 {
if x+i == 0 || x+i > self.width {continue;}
for j in 0..3 {
if y+j == 0 || y+j > self.height {continue;}
if i == 1 && j == 1 {continue;}
if self.field[y+j-1][x+i-1].mine {n += 1}
}
}
n
}
fn count_flags(&self, x: usize, y: usize) -> u8 {
let mut n = 0;
for i in 0..3 {
if x+i == 0 || x+i > self.width {continue;}
for j in 0..3 {
if y+j == 0 || y+j > self.height {continue;}
if i == 1 && j == 1 {continue;}
if self.field[y+j-1][x+i-1].flag {n += 1}
}
}
n
}
}
struct Screen {
width: isize,
height: isize,
spot_length: u16,
surface: sdl::video::Surface,
}
impl Screen {
fn new(w: isize, h: isize, l: u16) -> Screen {
sdl::init(&[sdl::InitFlag::Video]);
sdl::wm::set_caption("mines", "mines");
let s = match sdl::video::set_video_mode(w, h, 32,
&[SurfaceFlag::HWSurface],
&[VideoFlag::DoubleBuf]) {
Ok(s) => s,
Err(err) => panic!("failed to set video mode: {}", err)
};
Screen {width: w, height: h, spot_length: l, surface: s}
}
fn draw_square(&self, x: u16, y: u16, w: u16, (r,g,b): (u8, u8, u8)) {
self.surface.fill_rect(
Some(sdl::Rect {x: x as i16, y: y as i16, w: w, h: w}),
Color::RGB(r, g, b)
);
}
fn draw_num(&self, n: u8, x: u16, y: u16) {
self.draw_square(x, y, self.spot_length-1,
(255, 255, 255)
);
let mut n = n;
let sub = (self.spot_length-1)/3;
let pack = (self.spot_length - sub*3)/2;
let color = if n == 1 {
(100,100,255)
}else if n == 2 {
(0,255,0)
}else if n == 3 {
(255,0,0)
}else if n == 4 {
(0,0,255)
}else if n == 5 {
(200,50,0)
}else if n == 6 {
(0,255,255)
}else if n == 7 {
(0,0,0)
}else {
(150,150,150)
};
loop {
if n == 0 {break;}
else if n == 1{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
break;
}else if n == 2{
self.draw_square(x+pack, y+pack, sub-1, color);
self.draw_square(sub*2+x+pack, sub*2+y+pack, sub-1, color);
break;
}else if n == 3{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
n = 2;
}else if n == 4{
self.draw_square(x+pack, sub*2+y+pack, sub-1, color);
self.draw_square(sub*2+x+pack, y+pack, sub-1, color);
n = 2;
}else if n == 5{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
n = 4;
}else if n == 6{
self.draw_square(x+pack, sub+y+pack, sub-1, color);
self.draw_square(sub*2+x+pack, sub+y+pack, sub-1, color);
n = 4;
}else if n == 7{
self.draw_square(sub+x+pack, sub+y+pack, sub-1, color);
n = 6;
}else if n == 8{
self.draw_square(sub+x+pack, y+pack, sub-1, color);
self.draw_square(sub+x+pack, sub*2+y+pack, sub-1, color);
n = 6;
}
}
}
fn draw_field(&self, ref field: &Field) {
let length = self.spot_length;
let mut n = 0;
for ref i in field.field.iter() {
let mut m = 0;
for ref sq in i.iter() {
if sq.hidden {
self.draw_square(m*length+1,
n*length+1,
length-1,
(180, 180, 180)
);
if sq.flag {
self.draw_square(m*length+4,
n*length+4,
length-7,
(255, 0, 0)
);
}
}else{
if sq.mine {
self.draw_square(m*length+1,
n*length+1,
length-1,
(0, 0, 0)
);
}else{
self.draw_num(
sq.n,
m*length+1,
n*length+1
);
}
}
m += 1;
}
n += 1;
}
}
}
fn main() {
const WIDTH: usize = 30;
const HEIGHT: usize = 20;
const SIZE: usize = 35;
const R: f32 = 0.2;
let mut field = Field::empty(WIDTH, HEIGHT);
let screen = Screen::new((SIZE*WIDTH) as isize + 1,
(SIZE*HEIGHT) as isize + 1, SIZE as u16);
loop {
match sdl::event::wait_event() {
Event::Quit => break,
Event::MouseButton(b, down, mx, my) => {
if down {
let x = mx as usize/SIZE;
let y = my as usize/SIZE;
if b == Mouse::Left {
if field.empty {
field = Field::new(R, WIDTH, HEIGHT, x, y);
}
field.show_spot(x ,y);
}
else if b == Mouse::Right {
field.flag_spot(x, y);
}
}
},
Event::Key(k, down, _, _) => {
if down {
if k == Key::Escape {
break;
}
if k == Key::R {
field = Field::empty(WIDTH, HEIGHT);
}
}
},
_ => {}
}
screen.draw_field(&field);
screen.surface.flip();
}
sdl::quit();
}
|
#![feature(mpsc_select, box_syntax, fnbox, never_type, tool_lints)]
// areyoufuckingkiddingme.jpg
#![allow(proc_macro_derive_resolution_fallback, clippy::unreadable_literal)]
#![recursion_limit = "1024"]
extern crate ansi_term;
#[macro_use]
extern crate bot_command_derive;
extern crate byteorder;
extern crate chrono;
extern crate ctrlc;
#[macro_use]
extern crate diesel;
extern crate dotenv;
extern crate envy;
#[macro_use]
extern crate error_chain;
extern crate failure;
extern crate fern;
extern crate fflogs;
extern crate ffxiv_types as ffxiv;
extern crate itertools;
extern crate lalafell;
#[macro_use]
extern crate lazy_static;
extern crate lodestone_api_client;
#[macro_use]
extern crate log;
extern crate rand;
extern crate reqwest;
extern crate scraper;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate serenity;
#[macro_use]
extern crate structopt;
extern crate typemap;
extern crate unicase;
extern crate url;
extern crate uuid;
// TODO: Efficiency. Every time a command is called, it creates a new App and calls the methods on
// it. Storing just one App per command would be ideal.
macro_rules! into {
($t:ty, $e:expr) => {{
let x: $t = $e.into();
x
}}
}
macro_rules! some_or {
($e: expr, $o: expr) => {{
#[allow(unused_variables)]
match $e {
Some(x) => x,
None => $o
}
}}
}
mod bot;
mod commands;
mod config;
mod database;
mod error;
mod filters;
mod listeners;
mod lodestone;
mod logging;
mod tasks;
mod util;
use error::*;
use bot::LalafellBot;
use std::sync::{Arc, Mutex};
fn main() {
if let Err(e) = inner() {
for err in e.iter() {
error!("{}", err);
}
}
}
fn inner() -> Result<()> {
if let Err(e) = logging::init_logger() {
println!("Could not set up logger.");
for err in e.iter() {
println!("{}", err);
}
return Ok(());
}
info!("Loading .env");
dotenv::dotenv().ok();
info!("Reading environment variables");
let environment: Environment = envy::prefixed("LB_").from_env().expect("Invalid or missing environment variables");
let bot = match bot::create_bot(environment) {
Ok(b) => b,
Err(e) => bail!("could not create bot: {}", e)
};
let shard_manager = Arc::clone(&bot.discord.shard_manager);
ctrlc::set_handler(move || {
info!("Stopping main loop");
shard_manager.lock().shutdown_all();
}).expect("could not set interrupt handler");
let bot = Arc::new(Mutex::new(bot));
info!("Spinning up bot");
std::thread::spawn(move || {
let mut bot = bot.lock().unwrap();
if let Err(e) = bot.discord.start_autosharded() {
error!("could not start bot: {}", e);
}
}).join().unwrap();
info!("Exiting");
Ok(())
}
#[derive(Debug, Deserialize)]
pub struct Environment {
pub config: String,
pub database_location: String,
pub discord_bot_token: String,
pub fflogs_api_key: String,
}
refactor: remove stabilised feature
#![feature(mpsc_select, box_syntax, fnbox, never_type)]
// areyoufuckingkiddingme.jpg
#![allow(proc_macro_derive_resolution_fallback, clippy::unreadable_literal)]
#![recursion_limit = "1024"]
extern crate ansi_term;
#[macro_use]
extern crate bot_command_derive;
extern crate byteorder;
extern crate chrono;
extern crate ctrlc;
#[macro_use]
extern crate diesel;
extern crate dotenv;
extern crate envy;
#[macro_use]
extern crate error_chain;
extern crate failure;
extern crate fern;
extern crate fflogs;
extern crate ffxiv_types as ffxiv;
extern crate itertools;
extern crate lalafell;
#[macro_use]
extern crate lazy_static;
extern crate lodestone_api_client;
#[macro_use]
extern crate log;
extern crate rand;
extern crate reqwest;
extern crate scraper;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate serenity;
#[macro_use]
extern crate structopt;
extern crate typemap;
extern crate unicase;
extern crate url;
extern crate uuid;
// TODO: Efficiency. Every time a command is called, it creates a new App and calls the methods on
// it. Storing just one App per command would be ideal.
macro_rules! into {
($t:ty, $e:expr) => {{
let x: $t = $e.into();
x
}}
}
macro_rules! some_or {
($e: expr, $o: expr) => {{
#[allow(unused_variables)]
match $e {
Some(x) => x,
None => $o
}
}}
}
mod bot;
mod commands;
mod config;
mod database;
mod error;
mod filters;
mod listeners;
mod lodestone;
mod logging;
mod tasks;
mod util;
use error::*;
use bot::LalafellBot;
use std::sync::{Arc, Mutex};
fn main() {
if let Err(e) = inner() {
for err in e.iter() {
error!("{}", err);
}
}
}
fn inner() -> Result<()> {
if let Err(e) = logging::init_logger() {
println!("Could not set up logger.");
for err in e.iter() {
println!("{}", err);
}
return Ok(());
}
info!("Loading .env");
dotenv::dotenv().ok();
info!("Reading environment variables");
let environment: Environment = envy::prefixed("LB_").from_env().expect("Invalid or missing environment variables");
let bot = match bot::create_bot(environment) {
Ok(b) => b,
Err(e) => bail!("could not create bot: {}", e)
};
let shard_manager = Arc::clone(&bot.discord.shard_manager);
ctrlc::set_handler(move || {
info!("Stopping main loop");
shard_manager.lock().shutdown_all();
}).expect("could not set interrupt handler");
let bot = Arc::new(Mutex::new(bot));
info!("Spinning up bot");
std::thread::spawn(move || {
let mut bot = bot.lock().unwrap();
if let Err(e) = bot.discord.start_autosharded() {
error!("could not start bot: {}", e);
}
}).join().unwrap();
info!("Exiting");
Ok(())
}
#[derive(Debug, Deserialize)]
pub struct Environment {
pub config: String,
pub database_location: String,
pub discord_bot_token: String,
pub fflogs_api_key: String,
}
|
use std::io;
mod table;
mod player;
fn main() {
// create the table with 9 empty fields
let mut table: [char; 9] = [' '; 9];
// set x as first player ('o' will be changed to 'x')
let mut player: char = 'o';
let mut stdin = io::stdin();
let mut input = String::new();
let mut field = 0;
let mut valid: bool = true;
let mut game_over: bool = false;
println!("ROX - Tic Tac Toe in Rust");
println!("Author: Nikola S. (panther99)");
while !game_over {
// change player
player::change_player(&mut player);
// clears input, prints table and curren player
input.clear();
table::print_table(table);
println!("Current player: {}", player);
println!("Choose field (1-9): ");
// get user input
stdin.read_line(&mut input)
.expect("Failed to read line");
// we're shadowing field to usize to use it
// for indexing array in field checker
let field: usize = input.trim().parse()
.expect("Failed to read line");
// check if user wrote valid field
if field > 0 && field < 10 {
if table[field-1] == ' ' {
table[field-1] = player;
valid = true;
} else {
println!("That field isn't empty!");
valid = false;
}
} else {
println!("That field doesn't exist!");
valid = false;
}
// do this while field is invalid
while !valid {
input.clear();
table::print_table(table);
println!("Choose field (1-9): ");
stdin.read_line(&mut input)
.expect("Failed to read line");
let field: usize = input.trim().parse()
.expect("Failed to read line");
if field > 0 && field < 10 {
if table[field-1] == ' ' {
table[field-1] = player;
valid = true;
} else {
println!("That field isn't empty!");
valid = false;
}
} else {
println!("That field doesn't exist!");
valid = false;
}
}
// check if current player won the game
game_over = table::check_table(table, player);
}
// check if table is full or last player won the game
if table::full_table(table) {
println!("It's a tie!");
} else {
println!("Player {} won the game!", player);
}
}
Fixed bug where empty input will break the program
use std::io;
mod table;
mod player;
fn main() {
// create the table with 9 empty fields
let mut table: [char; 9] = [' '; 9];
// set x as first player ('o' will be changed to 'x')
let mut player: char = 'o';
let mut stdin = io::stdin();
let mut input = String::new();
let mut field = 0;
let mut valid: bool = true;
let mut game_over: bool = false;
println!("ROX - Tic Tac Toe in Rust");
println!("Author: Nikola S. (panther99)");
while !game_over {
// change player
player::change_player(&mut player);
// clears input, prints table and curren player
input.clear();
table::print_table(table);
println!("Current player: {}", player);
println!("Choose field (1-9): ");
// get user input
stdin.read_line(&mut input)
.expect("Failed to read line");
// we're shadowing field to usize to use it
// for indexing array in field checker
let field = input.trim().parse::<usize>();
// this will catch errors such as empty input
match field {
// check if user wrote valid field
Ok(f) => {
if f > 0 && f < 10 {
if table[f-1] == ' ' {
table[f-1] = player;
valid = true;
} else {
println!("That field isn't empty!");
valid = false;
}
} else {
println!("That field doesn't exist!");
valid = false;
}
},
// changing player right away so
// variable of current player will
// return to the same player while
// there's an error in user input
Err(e) => {
println!("Error: {:?}", e);
player::change_player(&mut player);
}
}
// do this while field is invalid
while !valid {
input.clear();
table::print_table(table);
println!("Choose field (1-9): ");
stdin.read_line(&mut input)
.expect("Failed to read line");
let field = input.trim().parse::<usize>();
// this will catch errors such as empty input
match field {
// check if user wrote valid field
Ok(f) => {
if f > 0 && f < 10 {
if table[f-1] == ' ' {
table[f-1] = player;
valid = true;
} else {
println!("That field isn't empty!");
valid = false;
}
} else {
println!("That field doesn't exist!");
valid = false;
}
},
// changing player right away so
// variable of current player will
// return to the same player while
// there's an error in user input
Err(e) => {
println!("Error: {:?}", e);
player::change_player(&mut player);
}
}
}
// check if current player won the game
game_over = table::check_table(table, player);
}
// check if table is full or last player won the game
if table::full_table(table) {
println!("It's a tie!");
} else {
println!("Player {} won the game!", player);
}
}
|
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Runs a virtual machine under KVM
pub mod panic_hook;
use std::fmt;
use std::fs::{File, OpenOptions};
use std::num::ParseIntError;
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::string::String;
use std::thread::sleep;
use std::time::Duration;
use crosvm::{
argument::{self, print_help, set_arguments, Argument},
linux, BindMount, Config, DiskOption, Executable, GidMap, SharedDir, TouchDeviceOption,
};
#[cfg(feature = "gpu")]
use devices::virtio::gpu::{GpuParameters, DEFAULT_GPU_PARAMS};
use devices::{SerialParameters, SerialType};
use msg_socket::{MsgReceiver, MsgSender, MsgSocket};
use qcow::QcowFile;
use sys_util::{
debug, error, getpid, info, kill_process_group, net::UnixSeqpacket, reap_child, syslog,
validate_raw_fd, warn,
};
use vm_control::{
BalloonControlCommand, DiskControlCommand, MaybeOwnedFd, UsbControlCommand, UsbControlResult,
VmControlRequestSocket, VmRequest, VmResponse, USB_CONTROL_MAX_PORTS,
};
fn executable_is_plugin(executable: &Option<Executable>) -> bool {
match executable {
Some(Executable::Plugin(_)) => true,
_ => false,
}
}
// Wait for all children to exit. Return true if they have all exited, false
// otherwise.
fn wait_all_children() -> bool {
const CHILD_WAIT_MAX_ITER: isize = 100;
const CHILD_WAIT_MS: u64 = 10;
for _ in 0..CHILD_WAIT_MAX_ITER {
loop {
match reap_child() {
Ok(0) => break,
// We expect ECHILD which indicates that there were no children left.
Err(e) if e.errno() == libc::ECHILD => return true,
Err(e) => {
warn!("error while waiting for children: {}", e);
return false;
}
// We reaped one child, so continue reaping.
_ => {}
}
}
// There's no timeout option for waitpid which reap_child calls internally, so our only
// recourse is to sleep while waiting for the children to exit.
sleep(Duration::from_millis(CHILD_WAIT_MS));
}
// If we've made it to this point, not all of the children have exited.
false
}
/// Parse a comma-separated list of CPU numbers and ranges and convert it to a Vec of CPU numbers.
fn parse_cpu_set(s: &str) -> argument::Result<Vec<usize>> {
let mut cpuset = Vec::new();
for part in s.split(',') {
let range: Vec<&str> = part.split('-').collect();
if range.len() == 0 || range.len() > 2 {
return Err(argument::Error::InvalidValue {
value: part.to_owned(),
expected: "invalid list syntax",
});
}
let first_cpu: usize = range[0]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: part.to_owned(),
expected: "CPU index must be a non-negative integer",
})?;
let last_cpu: usize = if range.len() == 2 {
range[1]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: part.to_owned(),
expected: "CPU index must be a non-negative integer",
})?
} else {
first_cpu
};
if last_cpu < first_cpu {
return Err(argument::Error::InvalidValue {
value: part.to_owned(),
expected: "CPU ranges must be from low to high",
});
}
for cpu in first_cpu..=last_cpu {
cpuset.push(cpu);
}
}
Ok(cpuset)
}
#[cfg(feature = "gpu")]
fn parse_gpu_options(s: Option<&str>) -> argument::Result<GpuParameters> {
let mut gpu_params = DEFAULT_GPU_PARAMS;
if let Some(s) = s {
let opts = s
.split(",")
.map(|frag| frag.split("="))
.map(|mut kv| (kv.next().unwrap_or(""), kv.next().unwrap_or("")));
for (k, v) in opts {
match k {
"egl" => match v {
"true" | "" => {
gpu_params.renderer_use_egl = true;
}
"false" => {
gpu_params.renderer_use_egl = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'egl' should be a boolean",
});
}
},
"gles" => match v {
"true" | "" => {
gpu_params.renderer_use_gles = true;
}
"false" => {
gpu_params.renderer_use_gles = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'gles' should be a boolean",
});
}
},
"glx" => match v {
"true" | "" => {
gpu_params.renderer_use_glx = true;
}
"false" => {
gpu_params.renderer_use_glx = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'glx' should be a boolean",
});
}
},
"surfaceless" => match v {
"true" | "" => {
gpu_params.renderer_use_surfaceless = true;
}
"false" => {
gpu_params.renderer_use_surfaceless = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'surfaceless' should be a boolean",
});
}
},
"width" => {
gpu_params.display_width =
v.parse::<u32>()
.map_err(|_| argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'width' must be a valid integer",
})?;
}
"height" => {
gpu_params.display_height =
v.parse::<u32>()
.map_err(|_| argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'height' must be a valid integer",
})?;
}
"" => {}
_ => {
return Err(argument::Error::UnknownArgument(format!(
"gpu parameter {}",
k
)));
}
}
}
}
Ok(gpu_params)
}
fn parse_serial_options(s: &str) -> argument::Result<SerialParameters> {
let mut serial_setting = SerialParameters {
type_: SerialType::Sink,
path: None,
num: 1,
console: false,
stdin: false,
};
let opts = s
.split(",")
.map(|frag| frag.split("="))
.map(|mut kv| (kv.next().unwrap_or(""), kv.next().unwrap_or("")));
for (k, v) in opts {
match k {
"type" => {
serial_setting.type_ = v
.parse::<SerialType>()
.map_err(|e| argument::Error::UnknownArgument(format!("{}", e)))?
}
"num" => {
let num = v.parse::<u8>().map_err(|e| {
argument::Error::Syntax(format!("serial device number is not parsable: {}", e))
})?;
if num < 1 || num > 4 {
return Err(argument::Error::InvalidValue {
value: num.to_string(),
expected: "Serial port num must be between 1 - 4",
});
}
serial_setting.num = num;
}
"console" => {
serial_setting.console = v.parse::<bool>().map_err(|e| {
argument::Error::Syntax(format!(
"serial device console is not parseable: {}",
e
))
})?
}
"stdin" => {
serial_setting.stdin = v.parse::<bool>().map_err(|e| {
argument::Error::Syntax(format!("serial device stdin is not parseable: {}", e))
})?
}
"path" => serial_setting.path = Some(PathBuf::from(v)),
_ => {
return Err(argument::Error::UnknownArgument(format!(
"serial parameter {}",
k
)));
}
}
}
Ok(serial_setting)
}
fn parse_plugin_mount_option(value: &str) -> argument::Result<BindMount> {
let components: Vec<&str> = value.split(":").collect();
if components.len() != 3 {
return Err(argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`plugin-mount` must have exactly 3 components: <src>:<dst>:<writable>",
});
}
let src = PathBuf::from(components[0]);
if src.is_relative() {
return Err(argument::Error::InvalidValue {
value: components[0].to_owned(),
expected: "the source path for `plugin-mount` must be absolute",
});
}
if !src.exists() {
return Err(argument::Error::InvalidValue {
value: components[0].to_owned(),
expected: "the source path for `plugin-mount` does not exist",
});
}
let dst = PathBuf::from(components[1]);
if dst.is_relative() {
return Err(argument::Error::InvalidValue {
value: components[1].to_owned(),
expected: "the destination path for `plugin-mount` must be absolute",
});
}
let writable: bool = components[2]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[2].to_owned(),
expected: "the <writable> component for `plugin-mount` is not valid bool",
})?;
Ok(BindMount { src, dst, writable })
}
fn parse_plugin_gid_map_option(value: &str) -> argument::Result<GidMap> {
let components: Vec<&str> = value.split(":").collect();
if components.len() != 3 {
return Err(argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`plugin-gid-map` must have exactly 3 components: <inner>:<outer>:<count>",
});
}
let inner: libc::gid_t = components[0]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[0].to_owned(),
expected: "the <inner> component for `plugin-gid-map` is not valid gid",
})?;
let outer: libc::gid_t = components[1]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[1].to_owned(),
expected: "the <outer> component for `plugin-gid-map` is not valid gid",
})?;
let count: u32 = components[2]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[2].to_owned(),
expected: "the <count> component for `plugin-gid-map` is not valid number",
})?;
Ok(GidMap {
inner,
outer,
count,
})
}
fn set_argument(cfg: &mut Config, name: &str, value: Option<&str>) -> argument::Result<()> {
match name {
"" => {
if cfg.executable_path.is_some() {
return Err(argument::Error::TooManyArguments(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
)));
}
let kernel_path = PathBuf::from(value.unwrap());
if !kernel_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this kernel path does not exist",
});
}
cfg.executable_path = Some(Executable::Kernel(kernel_path));
}
"android-fstab" => {
if cfg.android_fstab.is_some()
&& !cfg.android_fstab.as_ref().unwrap().as_os_str().is_empty()
{
return Err(argument::Error::TooManyArguments(
"expected exactly one android fstab path".to_owned(),
));
} else {
let android_fstab = PathBuf::from(value.unwrap());
if !android_fstab.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this android fstab path does not exist",
});
}
cfg.android_fstab = Some(android_fstab);
}
}
"params" => {
cfg.params.push(value.unwrap().to_owned());
}
"cpus" => {
if cfg.vcpu_count.is_some() {
return Err(argument::Error::TooManyArguments(
"`cpus` already given".to_owned(),
));
}
cfg.vcpu_count =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `cpus` needs to be integer",
})?,
)
}
"cpu-affinity" => {
if cfg.vcpu_affinity.len() != 0 {
return Err(argument::Error::TooManyArguments(
"`cpu-affinity` already given".to_owned(),
));
}
cfg.vcpu_affinity = parse_cpu_set(value.unwrap())?;
}
"mem" => {
if cfg.memory.is_some() {
return Err(argument::Error::TooManyArguments(
"`mem` already given".to_owned(),
));
}
cfg.memory =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `mem` needs to be integer",
})?,
)
}
"cras-audio" => {
cfg.cras_audio = true;
}
"cras-capture" => {
cfg.cras_capture = true;
}
"null-audio" => {
cfg.null_audio = true;
}
"serial" => {
let serial_params = parse_serial_options(value.unwrap())?;
let num = serial_params.num;
if cfg.serial_parameters.contains_key(&num) {
return Err(argument::Error::TooManyArguments(format!(
"serial num {}",
num
)));
}
if serial_params.console {
for params in cfg.serial_parameters.values() {
if params.console {
return Err(argument::Error::TooManyArguments(format!(
"serial device {} already set as console",
params.num
)));
}
}
}
if serial_params.stdin {
if let Some(previous_stdin) = cfg.serial_parameters.values().find(|sp| sp.stdin) {
return Err(argument::Error::TooManyArguments(format!(
"serial device {} already connected to standard input",
previous_stdin.num
)));
}
}
cfg.serial_parameters.insert(num, serial_params);
}
"syslog-tag" => {
if cfg.syslog_tag.is_some() {
return Err(argument::Error::TooManyArguments(
"`syslog-tag` already given".to_owned(),
));
}
syslog::set_proc_name(value.unwrap());
cfg.syslog_tag = Some(value.unwrap().to_owned());
}
"root" | "rwroot" | "disk" | "rwdisk" | "qcow" | "rwqcow" => {
let param = value.unwrap();
let mut components = param.split(',');
let read_only = !name.starts_with("rw");
let disk_path =
PathBuf::from(
components
.next()
.ok_or_else(|| argument::Error::InvalidValue {
value: param.to_owned(),
expected: "missing disk path",
})?,
);
if !disk_path.exists() {
return Err(argument::Error::InvalidValue {
value: param.to_owned(),
expected: "this disk path does not exist",
});
}
if name.ends_with("root") {
if cfg.disks.len() >= 26 {
return Err(argument::Error::TooManyArguments(
"ran out of letters for to assign to root disk".to_owned(),
));
}
cfg.params.push(format!(
"root=/dev/vd{} {}",
char::from(b'a' + cfg.disks.len() as u8),
if read_only { "ro" } else { "rw" }
));
}
let mut disk = DiskOption {
path: disk_path,
read_only,
sparse: true,
block_size: 512,
};
for opt in components {
let mut o = opt.splitn(2, '=');
let kind = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "disk options must not be empty",
})?;
let value = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "disk options must be of the form `kind=value`",
})?;
match kind {
"sparse" => {
let sparse = value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`sparse` must be a boolean",
})?;
disk.sparse = sparse;
}
"block_size" => {
let block_size =
value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`block_size` must be an integer",
})?;
disk.block_size = block_size;
}
_ => {
return Err(argument::Error::InvalidValue {
value: kind.to_owned(),
expected: "unrecognized disk option",
});
}
}
}
cfg.disks.push(disk);
}
"pmem-device" | "rw-pmem-device" => {
let disk_path = PathBuf::from(value.unwrap());
if !disk_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this disk path does not exist",
});
}
cfg.pmem_devices.push(DiskOption {
path: disk_path,
read_only: !name.starts_with("rw"),
sparse: false,
block_size: sys_util::pagesize() as u32,
});
}
"host_ip" => {
if cfg.host_ip.is_some() {
return Err(argument::Error::TooManyArguments(
"`host_ip` already given".to_owned(),
));
}
cfg.host_ip =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "`host_ip` needs to be in the form \"x.x.x.x\"",
})?,
)
}
"netmask" => {
if cfg.netmask.is_some() {
return Err(argument::Error::TooManyArguments(
"`netmask` already given".to_owned(),
));
}
cfg.netmask =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "`netmask` needs to be in the form \"x.x.x.x\"",
})?,
)
}
"mac" => {
if cfg.mac_address.is_some() {
return Err(argument::Error::TooManyArguments(
"`mac` already given".to_owned(),
));
}
cfg.mac_address =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "`mac` needs to be in the form \"XX:XX:XX:XX:XX:XX\"",
})?,
)
}
"wayland-sock" => {
if cfg.wayland_socket_path.is_some() {
return Err(argument::Error::TooManyArguments(
"`wayland-sock` already given".to_owned(),
));
}
let wayland_socket_path = PathBuf::from(value.unwrap());
if !wayland_socket_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_string(),
expected: "Wayland socket does not exist",
});
}
cfg.wayland_socket_path = Some(wayland_socket_path);
}
#[cfg(feature = "wl-dmabuf")]
"wayland-dmabuf" => cfg.wayland_dmabuf = true,
"x-display" => {
if cfg.x_display.is_some() {
return Err(argument::Error::TooManyArguments(
"`x-display` already given".to_owned(),
));
}
cfg.x_display = Some(value.unwrap().to_owned());
}
"display-window-keyboard" => {
cfg.display_window_keyboard = true;
}
"display-window-mouse" => {
cfg.display_window_mouse = true;
}
"socket" => {
if cfg.socket_path.is_some() {
return Err(argument::Error::TooManyArguments(
"`socket` already given".to_owned(),
));
}
let mut socket_path = PathBuf::from(value.unwrap());
if socket_path.is_dir() {
socket_path.push(format!("crosvm-{}.sock", getpid()));
}
if socket_path.exists() {
return Err(argument::Error::InvalidValue {
value: socket_path.to_string_lossy().into_owned(),
expected: "this socket path already exists",
});
}
cfg.socket_path = Some(socket_path);
}
"disable-sandbox" => {
cfg.sandbox = false;
}
"cid" => {
if cfg.cid.is_some() {
return Err(argument::Error::TooManyArguments(
"`cid` alread given".to_owned(),
));
}
cfg.cid = Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `cid` must be an unsigned integer",
})?,
);
}
"shared-dir" => {
// This is formatted as multiple fields, each separated by ":". The first 2 fields are
// fixed (src:tag). The rest may appear in any order:
//
// * type=TYPE - must be one of "p9" or "fs" (default: p9)
// * uidmap=UIDMAP - a uid map in the format "inner outer count[,inner outer count]"
// (default: "0 <current euid> 1")
// * gidmap=GIDMAP - a gid map in the same format as uidmap
// (default: "0 <current egid> 1")
// * timeout=TIMEOUT - a timeout value in seconds, which indicates how long attributes
// and directory contents should be considered valid (default: 5)
// * cache=CACHE - one of "never", "always", or "auto" (default: auto)
// * writeback=BOOL - indicates whether writeback caching should be enabled (default: false)
let param = value.unwrap();
let mut components = param.split(':');
let src =
PathBuf::from(
components
.next()
.ok_or_else(|| argument::Error::InvalidValue {
value: param.to_owned(),
expected: "missing source path for `shared-dir`",
})?,
);
let tag = components
.next()
.ok_or_else(|| argument::Error::InvalidValue {
value: param.to_owned(),
expected: "missing tag for `shared-dir`",
})?
.to_owned();
if !src.is_dir() {
return Err(argument::Error::InvalidValue {
value: param.to_owned(),
expected: "source path for `shared-dir` must be a directory",
});
}
let mut shared_dir = SharedDir {
src,
tag,
..Default::default()
};
for opt in components {
let mut o = opt.splitn(2, '=');
let kind = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "`shared-dir` options must not be empty",
})?;
let value = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "`shared-dir` options must be of the form `kind=value`",
})?;
match kind {
"type" => {
shared_dir.kind =
value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`type` must be one of `fs` or `9p`",
})?
}
"uidmap" => shared_dir.uid_map = value.into(),
"gidmap" => shared_dir.gid_map = value.into(),
"timeout" => {
let seconds = value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`timeout` must be an integer",
})?;
let dur = Duration::from_secs(seconds);
shared_dir.cfg.entry_timeout = dur.clone();
shared_dir.cfg.attr_timeout = dur;
}
"cache" => {
let policy = value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`cache` must be one of `never`, `always`, or `auto`",
})?;
shared_dir.cfg.cache_policy = policy;
}
"writeback" => {
let writeback =
value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`writeback` must be a boolean",
})?;
shared_dir.cfg.writeback = writeback;
}
_ => {
return Err(argument::Error::InvalidValue {
value: kind.to_owned(),
expected: "unrecognized option for `shared-dir`",
})
}
}
}
cfg.shared_dirs.push(shared_dir);
}
"seccomp-policy-dir" => {
// `value` is Some because we are in this match so it's safe to unwrap.
cfg.seccomp_policy_dir = PathBuf::from(value.unwrap());
}
"seccomp-log-failures" => {
cfg.seccomp_log_failures = true;
}
"plugin" => {
if cfg.executable_path.is_some() {
return Err(argument::Error::TooManyArguments(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
)));
}
let plugin = PathBuf::from(value.unwrap().to_owned());
if plugin.is_relative() {
return Err(argument::Error::InvalidValue {
value: plugin.to_string_lossy().into_owned(),
expected: "the plugin path must be an absolute path",
});
}
cfg.executable_path = Some(Executable::Plugin(plugin));
}
"plugin-root" => {
cfg.plugin_root = Some(PathBuf::from(value.unwrap().to_owned()));
}
"plugin-mount" => {
let mount = parse_plugin_mount_option(value.unwrap())?;
cfg.plugin_mounts.push(mount);
}
"plugin-gid-map" => {
let map = parse_plugin_gid_map_option(value.unwrap())?;
cfg.plugin_gid_maps.push(map);
}
"vhost-net" => cfg.vhost_net = true,
"tap-fd" => {
cfg.tap_fd.push(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `tap-fd` must be an unsigned integer",
})?,
);
}
#[cfg(feature = "gpu")]
"gpu" => {
let params = parse_gpu_options(value)?;
cfg.gpu_parameters = Some(params);
}
"software-tpm" => {
cfg.software_tpm = true;
}
"single-touch" => {
if cfg.virtio_single_touch.is_some() {
return Err(argument::Error::TooManyArguments(
"`single-touch` already given".to_owned(),
));
}
let mut it = value.unwrap().split(":");
let mut single_touch_spec =
TouchDeviceOption::new(PathBuf::from(it.next().unwrap().to_owned()));
if let Some(width) = it.next() {
single_touch_spec.width = width.trim().parse().unwrap();
}
if let Some(height) = it.next() {
single_touch_spec.height = height.trim().parse().unwrap();
}
cfg.virtio_single_touch = Some(single_touch_spec);
}
"trackpad" => {
if cfg.virtio_trackpad.is_some() {
return Err(argument::Error::TooManyArguments(
"`trackpad` already given".to_owned(),
));
}
let mut it = value.unwrap().split(":");
let mut trackpad_spec =
TouchDeviceOption::new(PathBuf::from(it.next().unwrap().to_owned()));
if let Some(width) = it.next() {
trackpad_spec.width = width.trim().parse().unwrap();
}
if let Some(height) = it.next() {
trackpad_spec.height = height.trim().parse().unwrap();
}
cfg.virtio_trackpad = Some(trackpad_spec);
}
"mouse" => {
if cfg.virtio_mouse.is_some() {
return Err(argument::Error::TooManyArguments(
"`mouse` already given".to_owned(),
));
}
cfg.virtio_mouse = Some(PathBuf::from(value.unwrap().to_owned()));
}
"keyboard" => {
if cfg.virtio_keyboard.is_some() {
return Err(argument::Error::TooManyArguments(
"`keyboard` already given".to_owned(),
));
}
cfg.virtio_keyboard = Some(PathBuf::from(value.unwrap().to_owned()));
}
"evdev" => {
let dev_path = PathBuf::from(value.unwrap());
if !dev_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this input device path does not exist",
});
}
cfg.virtio_input_evdevs.push(dev_path);
}
"split-irqchip" => {
cfg.split_irqchip = true;
}
"initrd" => {
cfg.initrd_path = Some(PathBuf::from(value.unwrap().to_owned()));
}
"bios" => {
if cfg.executable_path.is_some() {
return Err(argument::Error::TooManyArguments(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
)));
}
cfg.executable_path = Some(Executable::Bios(PathBuf::from(value.unwrap().to_owned())));
}
"vfio" => {
let vfio_path = PathBuf::from(value.unwrap());
if !vfio_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "the vfio path does not exist",
});
}
if !vfio_path.is_dir() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "the vfio path should be directory",
});
}
cfg.vfio = Some(vfio_path);
}
"help" => return Err(argument::Error::PrintHelp),
_ => unreachable!(),
}
Ok(())
}
fn run_vm(args: std::env::Args) -> std::result::Result<(), ()> {
let arguments =
&[Argument::positional("KERNEL", "bzImage of kernel to run"),
Argument::value("android-fstab", "PATH", "Path to Android fstab"),
Argument::short_value('i', "initrd", "PATH", "Initial ramdisk to load."),
Argument::short_value('p',
"params",
"PARAMS",
"Extra kernel or plugin command line arguments. Can be given more than once."),
Argument::short_value('c', "cpus", "N", "Number of VCPUs. (default: 1)"),
Argument::value("cpu-affinity", "CPUSET", "Comma-separated list of CPUs or CPU ranges to run VCPUs on. (e.g. 0,1-3,5) (default: no mask)"),
Argument::short_value('m',
"mem",
"N",
"Amount of guest memory in MiB. (default: 256)"),
Argument::short_value('r',
"root",
"PATH[,key=value[,key=value[,...]]",
"Path to a root disk image followed by optional comma-separated options.
Like `--disk` but adds appropriate kernel command line option.
See --disk for valid options."),
Argument::value("rwroot", "PATH[,key=value[,key=value[,...]]", "Path to a writable root disk image followed by optional comma-separated options.
See --disk for valid options."),
Argument::short_value('d', "disk", "PATH[,key=value[,key=value[,...]]", "Path to a disk image followed by optional comma-separated options.
Valid keys:
sparse=BOOL - Indicates whether the disk should support the discard operation (default: true)
block_size=BYTES - Set the reported block size of the disk (default: 512)"),
Argument::value("qcow", "PATH", "Path to a qcow2 disk image. (Deprecated; use --disk instead.)"),
Argument::value("rwdisk", "PATH[,key=value[,key=value[,...]]", "Path to a writable disk image followed by optional comma-separated options.
See --disk for valid options."),
Argument::value("rwqcow", "PATH", "Path to a writable qcow2 disk image. (Deprecated; use --rwdisk instead.)"),
Argument::value("rw-pmem-device", "PATH", "Path to a writable disk image."),
Argument::value("pmem-device", "PATH", "Path to a disk image."),
Argument::value("host_ip",
"IP",
"IP address to assign to host tap interface."),
Argument::value("netmask", "NETMASK", "Netmask for VM subnet."),
Argument::value("mac", "MAC", "MAC address for VM."),
Argument::flag("cras-audio", "Add an audio device to the VM that plays samples through CRAS server"),
Argument::flag("cras-capture", "Enable capturing audio from CRAS server to the cras-audio device"),
Argument::flag("null-audio", "Add an audio device to the VM that plays samples to /dev/null"),
Argument::value("serial",
"type=TYPE,[num=NUM,path=PATH,console,stdin]",
"Comma separated key=value pairs for setting up serial devices. Can be given more than once.
Possible key values:
type=(stdout,syslog,sink,file) - Where to route the serial device
num=(1,2,3,4) - Serial Device Number. If not provided, num will default to 1.
path=PATH - The path to the file to write to when type=file
console - Use this serial device as the guest console. Can only be given once. Will default to first serial port if not provided.
stdin - Direct standard input to this serial device. Can only be given once. Will default to first serial port if not provided.
"),
Argument::value("syslog-tag", "TAG", "When logging to syslog, use the provided tag."),
Argument::value("x-display", "DISPLAY", "X11 display name to use."),
Argument::flag("display-window-keyboard", "Capture keyboard input from the display window."),
Argument::flag("display-window-mouse", "Capture keyboard input from the display window."),
Argument::value("wayland-sock", "PATH", "Path to the Wayland socket to use."),
#[cfg(feature = "wl-dmabuf")]
Argument::flag("wayland-dmabuf", "Enable support for DMABufs in Wayland device."),
Argument::short_value('s',
"socket",
"PATH",
"Path to put the control socket. If PATH is a directory, a name will be generated."),
Argument::flag("disable-sandbox", "Run all devices in one, non-sandboxed process."),
Argument::value("cid", "CID", "Context ID for virtual sockets."),
Argument::value("shared-dir", "PATH:TAG[:type=TYPE:writeback=BOOL:timeout=SECONDS:uidmap=UIDMAP:gidmap=GIDMAP:cache=CACHE]",
"Colon-separated options for configuring a directory to be shared with the VM.
The first field is the directory to be shared and the second field is the tag that the VM can use to identify the device.
The remaining fields are key=value pairs that may appear in any order. Valid keys are:
type=(p9, fs) - Indicates whether the directory should be shared via virtio-9p or virtio-fs (default: p9).
uidmap=UIDMAP - The uid map to use for the device's jail in the format \"inner outer count[,inner outer count]\" (default: 0 <current euid> 1).
gidmap=GIDMAP - The gid map to use for the device's jail in the format \"inner outer count[,inner outer count]\" (default: 0 <current egid> 1).
cache=(never, auto, always) - Indicates whether the VM can cache the contents of the shared directory (default: auto). When set to \"auto\" and the type is \"fs\", the VM will use close-to-open consistency for file contents.
timeout=SECONDS - How long the VM should consider file attributes and directory entries to be valid (default: 5). If the VM has exclusive access to the directory, then this should be a large value. If the directory can be modified by other processes, then this should be 0.
writeback=BOOL - Indicates whether the VM can use writeback caching (default: false). This is only safe to do when the VM has exclusive access to the files in a directory. Additionally, the server should have read permission for all files as the VM may issue read requests even for files that are opened write-only.
"),
Argument::value("seccomp-policy-dir", "PATH", "Path to seccomp .policy files."),
Argument::flag("seccomp-log-failures", "Instead of seccomp filter failures being fatal, they will be logged instead."),
#[cfg(feature = "plugin")]
Argument::value("plugin", "PATH", "Absolute path to plugin process to run under crosvm."),
#[cfg(feature = "plugin")]
Argument::value("plugin-root", "PATH", "Absolute path to a directory that will become root filesystem for the plugin process."),
#[cfg(feature = "plugin")]
Argument::value("plugin-mount", "PATH:PATH:BOOL", "Path to be mounted into the plugin's root filesystem. Can be given more than once."),
#[cfg(feature = "plugin")]
Argument::value("plugin-gid-map", "GID:GID:INT", "Supplemental GIDs that should be mapped in plugin jail. Can be given more than once."),
Argument::flag("vhost-net", "Use vhost for networking."),
Argument::value("tap-fd",
"fd",
"File descriptor for configured tap device. A different virtual network card will be added each time this argument is given."),
#[cfg(feature = "gpu")]
Argument::flag_or_value("gpu",
"[width=INT,height=INT]",
"(EXPERIMENTAL) Comma separated key=value pairs for setting up a virtio-gpu device
Possible key values:
width=INT - The width of the virtual display connected to the virtio-gpu.
height=INT - The height of the virtual display connected to the virtio-gpu.
egl[=true|=false] - If the virtio-gpu backend should use a EGL context for rendering.
glx[=true|=false] - If the virtio-gpu backend should use a GLX context for rendering.
surfaceless[=true|=false] - If the virtio-gpu backend should use a surfaceless context for rendering.
"),
#[cfg(feature = "tpm")]
Argument::flag("software-tpm", "enable a software emulated trusted platform module device"),
Argument::value("evdev", "PATH", "Path to an event device node. The device will be grabbed (unusable from the host) and made available to the guest with the same configuration it shows on the host"),
Argument::value("single-touch", "PATH:WIDTH:HEIGHT", "Path to a socket from where to read single touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)."),
Argument::value("trackpad", "PATH:WIDTH:HEIGHT", "Path to a socket from where to read trackpad input events and write status updates to, optionally followed by screen width and height (defaults to 800x1280)."),
Argument::value("mouse", "PATH", "Path to a socket from where to read mouse input events and write status updates to."),
Argument::value("keyboard", "PATH", "Path to a socket from where to read keyboard input events and write status updates to."),
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
Argument::flag("split-irqchip", "(EXPERIMENTAL) enable split-irqchip support"),
Argument::value("bios", "PATH", "Path to BIOS/firmware ROM"),
Argument::value("vfio", "PATH", "Path to sysfs of pass through or mdev device"),
Argument::short_flag('h', "help", "Print help message.")];
let mut cfg = Config::default();
let match_res = set_arguments(args, &arguments[..], |name, value| {
set_argument(&mut cfg, name, value)
})
.and_then(|_| {
if cfg.executable_path.is_none() {
return Err(argument::Error::ExpectedArgument("`KERNEL`".to_owned()));
}
if cfg.host_ip.is_some() || cfg.netmask.is_some() || cfg.mac_address.is_some() {
if cfg.host_ip.is_none() {
return Err(argument::Error::ExpectedArgument(
"`host_ip` missing from network config".to_owned(),
));
}
if cfg.netmask.is_none() {
return Err(argument::Error::ExpectedArgument(
"`netmask` missing from network config".to_owned(),
));
}
if cfg.mac_address.is_none() {
return Err(argument::Error::ExpectedArgument(
"`mac` missing from network config".to_owned(),
));
}
}
if cfg.plugin_root.is_some() && !executable_is_plugin(&cfg.executable_path) {
return Err(argument::Error::ExpectedArgument(
"`plugin-root` requires `plugin`".to_owned(),
));
}
Ok(())
});
match match_res {
#[cfg(feature = "plugin")]
Ok(()) if executable_is_plugin(&cfg.executable_path) => {
match crosvm::plugin::run_config(cfg) {
Ok(_) => {
info!("crosvm and plugin have exited normally");
Ok(())
}
Err(e) => {
error!("{}", e);
Err(())
}
}
}
Ok(()) => match linux::run_config(cfg) {
Ok(_) => {
info!("crosvm has exited normally");
Ok(())
}
Err(e) => {
error!("{}", e);
Err(())
}
},
Err(argument::Error::PrintHelp) => {
print_help("crosvm run", "KERNEL", &arguments[..]);
Ok(())
}
Err(e) => {
println!("{}", e);
Err(())
}
}
}
fn handle_request(
request: &VmRequest,
args: std::env::Args,
) -> std::result::Result<VmResponse, ()> {
let mut return_result = Err(());
for socket_path in args {
match UnixSeqpacket::connect(&socket_path) {
Ok(s) => {
let socket: VmControlRequestSocket = MsgSocket::new(s);
if let Err(e) = socket.send(request) {
error!(
"failed to send request to socket at '{}': {}",
socket_path, e
);
return_result = Err(());
continue;
}
match socket.recv() {
Ok(response) => return_result = Ok(response),
Err(e) => {
error!(
"failed to send request to socket at2 '{}': {}",
socket_path, e
);
return_result = Err(());
continue;
}
}
}
Err(e) => {
error!("failed to connect to socket at '{}': {}", socket_path, e);
return_result = Err(());
}
}
}
return_result
}
fn vms_request(request: &VmRequest, args: std::env::Args) -> std::result::Result<(), ()> {
let response = handle_request(request, args)?;
info!("request response was {}", response);
Ok(())
}
fn stop_vms(args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() == 0 {
print_help("crosvm stop", "VM_SOCKET...", &[]);
println!("Stops the crosvm instance listening on each `VM_SOCKET` given.");
return Err(());
}
vms_request(&VmRequest::Exit, args)
}
fn suspend_vms(args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() == 0 {
print_help("crosvm suspend", "VM_SOCKET...", &[]);
println!("Suspends the crosvm instance listening on each `VM_SOCKET` given.");
return Err(());
}
vms_request(&VmRequest::Suspend, args)
}
fn resume_vms(args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() == 0 {
print_help("crosvm resume", "VM_SOCKET...", &[]);
println!("Resumes the crosvm instance listening on each `VM_SOCKET` given.");
return Err(());
}
vms_request(&VmRequest::Resume, args)
}
fn balloon_vms(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() < 2 {
print_help("crosvm balloon", "SIZE VM_SOCKET...", &[]);
println!("Set the ballon size of the crosvm instance to `SIZE` bytes.");
return Err(());
}
let num_bytes = match args.nth(0).unwrap().parse::<u64>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse number of bytes");
return Err(());
}
};
let command = BalloonControlCommand::Adjust { num_bytes };
vms_request(&VmRequest::BalloonCommand(command), args)
}
fn create_qcow2(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() != 2 {
print_help("crosvm create_qcow2", "PATH SIZE", &[]);
println!("Create a new QCOW2 image at `PATH` of the specified `SIZE` in bytes.");
return Err(());
}
let file_path = args.nth(0).unwrap();
let size: u64 = match args.nth(0).unwrap().parse::<u64>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse size of the disk.");
return Err(());
}
};
let file = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&file_path)
.map_err(|e| {
error!("Failed opening qcow file at '{}': {}", file_path, e);
})?;
QcowFile::new(file, size).map_err(|e| {
error!("Failed to create qcow file at '{}': {}", file_path, e);
})?;
Ok(())
}
fn disk_cmd(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() < 2 {
print_help("crosvm disk", "SUBCOMMAND VM_SOCKET...", &[]);
println!("Manage attached virtual disk devices.");
println!("Subcommands:");
println!(" resize DISK_INDEX NEW_SIZE VM_SOCKET");
return Err(());
}
let subcommand: &str = &args.nth(0).unwrap();
let request = match subcommand {
"resize" => {
let disk_index = match args.nth(0).unwrap().parse::<usize>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse disk index");
return Err(());
}
};
let new_size = match args.nth(0).unwrap().parse::<u64>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse disk size");
return Err(());
}
};
VmRequest::DiskCommand {
disk_index,
command: DiskControlCommand::Resize { new_size },
}
}
_ => {
error!("Unknown disk subcommand '{}'", subcommand);
return Err(());
}
};
vms_request(&request, args)
}
enum ModifyUsbError {
ArgMissing(&'static str),
ArgParse(&'static str, String),
ArgParseInt(&'static str, String, ParseIntError),
FailedFdValidate(sys_util::Error),
PathDoesNotExist(PathBuf),
SocketFailed,
UnexpectedResponse(VmResponse),
UnknownCommand(String),
UsbControl(UsbControlResult),
}
impl fmt::Display for ModifyUsbError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ModifyUsbError::*;
match self {
ArgMissing(a) => write!(f, "argument missing: {}", a),
ArgParse(name, value) => {
write!(f, "failed to parse argument {} value `{}`", name, value)
}
ArgParseInt(name, value, e) => write!(
f,
"failed to parse integer argument {} value `{}`: {}",
name, value, e
),
FailedFdValidate(e) => write!(f, "failed to validate file descriptor: {}", e),
PathDoesNotExist(p) => write!(f, "path `{}` does not exist", p.display()),
SocketFailed => write!(f, "socket failed"),
UnexpectedResponse(r) => write!(f, "unexpected response: {}", r),
UnknownCommand(c) => write!(f, "unknown command: `{}`", c),
UsbControl(e) => write!(f, "{}", e),
}
}
}
type ModifyUsbResult<T> = std::result::Result<T, ModifyUsbError>;
fn parse_bus_id_addr(v: &str) -> ModifyUsbResult<(u8, u8, u16, u16)> {
debug!("parse_bus_id_addr: {}", v);
let mut ids = v.split(":");
match (ids.next(), ids.next(), ids.next(), ids.next()) {
(Some(bus_id), Some(addr), Some(vid), Some(pid)) => {
let bus_id = bus_id
.parse::<u8>()
.map_err(|e| ModifyUsbError::ArgParseInt("bus_id", bus_id.to_owned(), e))?;
let addr = addr
.parse::<u8>()
.map_err(|e| ModifyUsbError::ArgParseInt("addr", addr.to_owned(), e))?;
let vid = u16::from_str_radix(&vid, 16)
.map_err(|e| ModifyUsbError::ArgParseInt("vid", vid.to_owned(), e))?;
let pid = u16::from_str_radix(&pid, 16)
.map_err(|e| ModifyUsbError::ArgParseInt("pid", pid.to_owned(), e))?;
Ok((bus_id, addr, vid, pid))
}
_ => Err(ModifyUsbError::ArgParse(
"BUS_ID_ADDR_BUS_NUM_DEV_NUM",
v.to_owned(),
)),
}
}
fn raw_fd_from_path(path: &Path) -> ModifyUsbResult<RawFd> {
if !path.exists() {
return Err(ModifyUsbError::PathDoesNotExist(path.to_owned()));
}
let raw_fd = path
.file_name()
.and_then(|fd_osstr| fd_osstr.to_str())
.map_or(
Err(ModifyUsbError::ArgParse(
"USB_DEVICE_PATH",
path.to_string_lossy().into_owned(),
)),
|fd_str| {
fd_str.parse::<libc::c_int>().map_err(|e| {
ModifyUsbError::ArgParseInt("USB_DEVICE_PATH", fd_str.to_owned(), e)
})
},
)?;
validate_raw_fd(raw_fd).map_err(ModifyUsbError::FailedFdValidate)
}
fn usb_attach(mut args: std::env::Args) -> ModifyUsbResult<UsbControlResult> {
let val = args
.next()
.ok_or(ModifyUsbError::ArgMissing("BUS_ID_ADDR_BUS_NUM_DEV_NUM"))?;
let (bus, addr, vid, pid) = parse_bus_id_addr(&val)?;
let dev_path = PathBuf::from(
args.next()
.ok_or(ModifyUsbError::ArgMissing("usb device path"))?,
);
let usb_file: Option<File> = if dev_path == Path::new("-") {
None
} else if dev_path.parent() == Some(Path::new("/proc/self/fd")) {
// Special case '/proc/self/fd/*' paths. The FD is already open, just use it.
// Safe because we will validate |raw_fd|.
Some(unsafe { File::from_raw_fd(raw_fd_from_path(&dev_path)?) })
} else {
Some(
OpenOptions::new()
.read(true)
.write(true)
.open(&dev_path)
.map_err(|_| ModifyUsbError::UsbControl(UsbControlResult::FailedToOpenDevice))?,
)
};
let request = VmRequest::UsbCommand(UsbControlCommand::AttachDevice {
bus,
addr,
vid,
pid,
fd: usb_file.map(MaybeOwnedFd::Owned),
});
let response = handle_request(&request, args).map_err(|_| ModifyUsbError::SocketFailed)?;
match response {
VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
r => Err(ModifyUsbError::UnexpectedResponse(r)),
}
}
fn usb_detach(mut args: std::env::Args) -> ModifyUsbResult<UsbControlResult> {
let port: u8 = args
.next()
.map_or(Err(ModifyUsbError::ArgMissing("PORT")), |p| {
p.parse::<u8>()
.map_err(|e| ModifyUsbError::ArgParseInt("PORT", p.to_owned(), e))
})?;
let request = VmRequest::UsbCommand(UsbControlCommand::DetachDevice { port });
let response = handle_request(&request, args).map_err(|_| ModifyUsbError::SocketFailed)?;
match response {
VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
r => Err(ModifyUsbError::UnexpectedResponse(r)),
}
}
fn usb_list(args: std::env::Args) -> ModifyUsbResult<UsbControlResult> {
let mut ports: [u8; USB_CONTROL_MAX_PORTS] = Default::default();
for (index, port) in ports.iter_mut().enumerate() {
*port = index as u8
}
let request = VmRequest::UsbCommand(UsbControlCommand::ListDevice { ports });
let response = handle_request(&request, args).map_err(|_| ModifyUsbError::SocketFailed)?;
match response {
VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
r => Err(ModifyUsbError::UnexpectedResponse(r)),
}
}
fn modify_usb(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() < 2 {
print_help("crosvm usb",
"[attach BUS_ID:ADDR:VENDOR_ID:PRODUCT_ID [USB_DEVICE_PATH|-] | detach PORT | list] VM_SOCKET...", &[]);
return Err(());
}
// This unwrap will not panic because of the above length check.
let command = args.next().unwrap();
let result = match command.as_ref() {
"attach" => usb_attach(args),
"detach" => usb_detach(args),
"list" => usb_list(args),
other => Err(ModifyUsbError::UnknownCommand(other.to_owned())),
};
match result {
Ok(response) => {
println!("{}", response);
Ok(())
}
Err(e) => {
println!("error {}", e);
Err(())
}
}
}
fn print_usage() {
print_help("crosvm", "[stop|run]", &[]);
println!("Commands:");
println!(" stop - Stops crosvm instances via their control sockets.");
println!(" run - Start a new crosvm instance.");
println!(" create_qcow2 - Create a new qcow2 disk image file.");
println!(" disk - Manage attached virtual disk devices.");
println!(" usb - Manage attached virtual USB devices.");
}
fn crosvm_main() -> std::result::Result<(), ()> {
if let Err(e) = syslog::init() {
println!("failed to initialize syslog: {}", e);
return Err(());
}
panic_hook::set_panic_hook();
let mut args = std::env::args();
if args.next().is_none() {
error!("expected executable name");
return Err(());
}
// Past this point, usage of exit is in danger of leaking zombie processes.
let ret = match args.next().as_ref().map(|a| a.as_ref()) {
None => {
print_usage();
Ok(())
}
Some("stop") => stop_vms(args),
Some("suspend") => suspend_vms(args),
Some("resume") => resume_vms(args),
Some("run") => run_vm(args),
Some("balloon") => balloon_vms(args),
Some("create_qcow2") => create_qcow2(args),
Some("disk") => disk_cmd(args),
Some("usb") => modify_usb(args),
Some(c) => {
println!("invalid subcommand: {:?}", c);
print_usage();
Err(())
}
};
// Reap exit status from any child device processes. At this point, all devices should have been
// dropped in the main process and told to shutdown. Try over a period of 100ms, since it may
// take some time for the processes to shut down.
if !wait_all_children() {
// We gave them a chance, and it's too late.
warn!("not all child processes have exited; sending SIGKILL");
if let Err(e) = kill_process_group() {
// We're now at the mercy of the OS to clean up after us.
warn!("unable to kill all child processes: {}", e);
}
}
// WARNING: Any code added after this point is not guaranteed to run
// since we may forcibly kill this process (and its children) above.
ret
}
fn main() {
std::process::exit(if crosvm_main().is_ok() { 0 } else { 1 });
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_cpu_set_single() {
assert_eq!(parse_cpu_set("123").expect("parse failed"), vec![123]);
}
#[test]
fn parse_cpu_set_list() {
assert_eq!(
parse_cpu_set("0,1,2,3").expect("parse failed"),
vec![0, 1, 2, 3]
);
}
#[test]
fn parse_cpu_set_range() {
assert_eq!(
parse_cpu_set("0-3").expect("parse failed"),
vec![0, 1, 2, 3]
);
}
#[test]
fn parse_cpu_set_list_of_ranges() {
assert_eq!(
parse_cpu_set("3-4,7-9,18").expect("parse failed"),
vec![3, 4, 7, 8, 9, 18]
);
}
#[test]
fn parse_cpu_set_repeated() {
// For now, allow duplicates - they will be handled gracefully by the vec to cpu_set_t conversion.
assert_eq!(parse_cpu_set("1,1,1").expect("parse failed"), vec![1, 1, 1]);
}
#[test]
fn parse_cpu_set_negative() {
// Negative CPU numbers are not allowed.
parse_cpu_set("-3").expect_err("parse should have failed");
}
#[test]
fn parse_cpu_set_reverse_range() {
// Ranges must be from low to high.
parse_cpu_set("5-2").expect_err("parse should have failed");
}
#[test]
fn parse_cpu_set_open_range() {
parse_cpu_set("3-").expect_err("parse should have failed");
}
#[test]
fn parse_cpu_set_extra_comma() {
parse_cpu_set("0,1,2,").expect_err("parse should have failed");
}
#[test]
fn parse_serial_vaild() {
parse_serial_options("type=syslog,num=1,console=true,stdin=true")
.expect("parse should have succeded");
}
#[test]
fn parse_serial_valid_no_num() {
parse_serial_options("type=syslog").expect("parse should have succeded");
}
#[test]
fn parse_serial_invalid_type() {
parse_serial_options("type=wormhole,num=1").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_num_upper() {
parse_serial_options("type=syslog,num=5").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_num_lower() {
parse_serial_options("type=syslog,num=0").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_num_string() {
parse_serial_options("type=syslog,num=number3").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_option() {
parse_serial_options("type=syslog,speed=lightspeed").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_two_stdin() {
let mut config = Config::default();
set_argument(&mut config, "serial", Some("num=1,type=stdout,stdin=true"))
.expect("should parse the first serial argument");
set_argument(&mut config, "serial", Some("num=2,type=stdout,stdin=true"))
.expect_err("should fail to parse a second serial port connected to stdin");
}
}
crosvm: plugin-mount-file and plugin-gid-map-file options
List of bind-mounts and gid maps can be quite long, so let's allow
listing them in text files, when convenient.
BUG=b:144454617
TEST=Run Plugin VM
Change-Id: I1218dab5a7e87b9f1ba44de6828da890fddb99fe
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1967785
Reviewed-by: Zach Reizner <3374927928a53d04457674c546bd4de57c2044ec@chromium.org>
Tested-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
Commit-Queue: Dmitry Torokhov <10a8c465cefc9bdd6c925e26964d23c90f1141cc@chromium.org>
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Runs a virtual machine under KVM
pub mod panic_hook;
use std::fmt;
use std::fs::{File, OpenOptions};
use std::io::{BufRead, BufReader};
use std::num::ParseIntError;
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::string::String;
use std::thread::sleep;
use std::time::Duration;
use crosvm::{
argument::{self, print_help, set_arguments, Argument},
linux, BindMount, Config, DiskOption, Executable, GidMap, SharedDir, TouchDeviceOption,
};
#[cfg(feature = "gpu")]
use devices::virtio::gpu::{GpuParameters, DEFAULT_GPU_PARAMS};
use devices::{SerialParameters, SerialType};
use msg_socket::{MsgReceiver, MsgSender, MsgSocket};
use qcow::QcowFile;
use sys_util::{
debug, error, getpid, info, kill_process_group, net::UnixSeqpacket, reap_child, syslog,
validate_raw_fd, warn,
};
use vm_control::{
BalloonControlCommand, DiskControlCommand, MaybeOwnedFd, UsbControlCommand, UsbControlResult,
VmControlRequestSocket, VmRequest, VmResponse, USB_CONTROL_MAX_PORTS,
};
fn executable_is_plugin(executable: &Option<Executable>) -> bool {
match executable {
Some(Executable::Plugin(_)) => true,
_ => false,
}
}
// Wait for all children to exit. Return true if they have all exited, false
// otherwise.
fn wait_all_children() -> bool {
const CHILD_WAIT_MAX_ITER: isize = 100;
const CHILD_WAIT_MS: u64 = 10;
for _ in 0..CHILD_WAIT_MAX_ITER {
loop {
match reap_child() {
Ok(0) => break,
// We expect ECHILD which indicates that there were no children left.
Err(e) if e.errno() == libc::ECHILD => return true,
Err(e) => {
warn!("error while waiting for children: {}", e);
return false;
}
// We reaped one child, so continue reaping.
_ => {}
}
}
// There's no timeout option for waitpid which reap_child calls internally, so our only
// recourse is to sleep while waiting for the children to exit.
sleep(Duration::from_millis(CHILD_WAIT_MS));
}
// If we've made it to this point, not all of the children have exited.
false
}
/// Parse a comma-separated list of CPU numbers and ranges and convert it to a Vec of CPU numbers.
fn parse_cpu_set(s: &str) -> argument::Result<Vec<usize>> {
let mut cpuset = Vec::new();
for part in s.split(',') {
let range: Vec<&str> = part.split('-').collect();
if range.len() == 0 || range.len() > 2 {
return Err(argument::Error::InvalidValue {
value: part.to_owned(),
expected: "invalid list syntax",
});
}
let first_cpu: usize = range[0]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: part.to_owned(),
expected: "CPU index must be a non-negative integer",
})?;
let last_cpu: usize = if range.len() == 2 {
range[1]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: part.to_owned(),
expected: "CPU index must be a non-negative integer",
})?
} else {
first_cpu
};
if last_cpu < first_cpu {
return Err(argument::Error::InvalidValue {
value: part.to_owned(),
expected: "CPU ranges must be from low to high",
});
}
for cpu in first_cpu..=last_cpu {
cpuset.push(cpu);
}
}
Ok(cpuset)
}
#[cfg(feature = "gpu")]
fn parse_gpu_options(s: Option<&str>) -> argument::Result<GpuParameters> {
let mut gpu_params = DEFAULT_GPU_PARAMS;
if let Some(s) = s {
let opts = s
.split(",")
.map(|frag| frag.split("="))
.map(|mut kv| (kv.next().unwrap_or(""), kv.next().unwrap_or("")));
for (k, v) in opts {
match k {
"egl" => match v {
"true" | "" => {
gpu_params.renderer_use_egl = true;
}
"false" => {
gpu_params.renderer_use_egl = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'egl' should be a boolean",
});
}
},
"gles" => match v {
"true" | "" => {
gpu_params.renderer_use_gles = true;
}
"false" => {
gpu_params.renderer_use_gles = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'gles' should be a boolean",
});
}
},
"glx" => match v {
"true" | "" => {
gpu_params.renderer_use_glx = true;
}
"false" => {
gpu_params.renderer_use_glx = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'glx' should be a boolean",
});
}
},
"surfaceless" => match v {
"true" | "" => {
gpu_params.renderer_use_surfaceless = true;
}
"false" => {
gpu_params.renderer_use_surfaceless = false;
}
_ => {
return Err(argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'surfaceless' should be a boolean",
});
}
},
"width" => {
gpu_params.display_width =
v.parse::<u32>()
.map_err(|_| argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'width' must be a valid integer",
})?;
}
"height" => {
gpu_params.display_height =
v.parse::<u32>()
.map_err(|_| argument::Error::InvalidValue {
value: v.to_string(),
expected: "gpu parameter 'height' must be a valid integer",
})?;
}
"" => {}
_ => {
return Err(argument::Error::UnknownArgument(format!(
"gpu parameter {}",
k
)));
}
}
}
}
Ok(gpu_params)
}
fn parse_serial_options(s: &str) -> argument::Result<SerialParameters> {
let mut serial_setting = SerialParameters {
type_: SerialType::Sink,
path: None,
num: 1,
console: false,
stdin: false,
};
let opts = s
.split(",")
.map(|frag| frag.split("="))
.map(|mut kv| (kv.next().unwrap_or(""), kv.next().unwrap_or("")));
for (k, v) in opts {
match k {
"type" => {
serial_setting.type_ = v
.parse::<SerialType>()
.map_err(|e| argument::Error::UnknownArgument(format!("{}", e)))?
}
"num" => {
let num = v.parse::<u8>().map_err(|e| {
argument::Error::Syntax(format!("serial device number is not parsable: {}", e))
})?;
if num < 1 || num > 4 {
return Err(argument::Error::InvalidValue {
value: num.to_string(),
expected: "Serial port num must be between 1 - 4",
});
}
serial_setting.num = num;
}
"console" => {
serial_setting.console = v.parse::<bool>().map_err(|e| {
argument::Error::Syntax(format!(
"serial device console is not parseable: {}",
e
))
})?
}
"stdin" => {
serial_setting.stdin = v.parse::<bool>().map_err(|e| {
argument::Error::Syntax(format!("serial device stdin is not parseable: {}", e))
})?
}
"path" => serial_setting.path = Some(PathBuf::from(v)),
_ => {
return Err(argument::Error::UnknownArgument(format!(
"serial parameter {}",
k
)));
}
}
}
Ok(serial_setting)
}
fn parse_plugin_mount_option(value: &str) -> argument::Result<BindMount> {
let components: Vec<&str> = value.split(":").collect();
if components.len() != 3 {
return Err(argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`plugin-mount` must have exactly 3 components: <src>:<dst>:<writable>",
});
}
let src = PathBuf::from(components[0]);
if src.is_relative() {
return Err(argument::Error::InvalidValue {
value: components[0].to_owned(),
expected: "the source path for `plugin-mount` must be absolute",
});
}
if !src.exists() {
return Err(argument::Error::InvalidValue {
value: components[0].to_owned(),
expected: "the source path for `plugin-mount` does not exist",
});
}
let dst = PathBuf::from(components[1]);
if dst.is_relative() {
return Err(argument::Error::InvalidValue {
value: components[1].to_owned(),
expected: "the destination path for `plugin-mount` must be absolute",
});
}
let writable: bool = components[2]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[2].to_owned(),
expected: "the <writable> component for `plugin-mount` is not valid bool",
})?;
Ok(BindMount { src, dst, writable })
}
fn parse_plugin_gid_map_option(value: &str) -> argument::Result<GidMap> {
let components: Vec<&str> = value.split(":").collect();
if components.len() != 3 {
return Err(argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`plugin-gid-map` must have exactly 3 components: <inner>:<outer>:<count>",
});
}
let inner: libc::gid_t = components[0]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[0].to_owned(),
expected: "the <inner> component for `plugin-gid-map` is not valid gid",
})?;
let outer: libc::gid_t = components[1]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[1].to_owned(),
expected: "the <outer> component for `plugin-gid-map` is not valid gid",
})?;
let count: u32 = components[2]
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: components[2].to_owned(),
expected: "the <count> component for `plugin-gid-map` is not valid number",
})?;
Ok(GidMap {
inner,
outer,
count,
})
}
fn set_argument(cfg: &mut Config, name: &str, value: Option<&str>) -> argument::Result<()> {
match name {
"" => {
if cfg.executable_path.is_some() {
return Err(argument::Error::TooManyArguments(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
)));
}
let kernel_path = PathBuf::from(value.unwrap());
if !kernel_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this kernel path does not exist",
});
}
cfg.executable_path = Some(Executable::Kernel(kernel_path));
}
"android-fstab" => {
if cfg.android_fstab.is_some()
&& !cfg.android_fstab.as_ref().unwrap().as_os_str().is_empty()
{
return Err(argument::Error::TooManyArguments(
"expected exactly one android fstab path".to_owned(),
));
} else {
let android_fstab = PathBuf::from(value.unwrap());
if !android_fstab.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this android fstab path does not exist",
});
}
cfg.android_fstab = Some(android_fstab);
}
}
"params" => {
cfg.params.push(value.unwrap().to_owned());
}
"cpus" => {
if cfg.vcpu_count.is_some() {
return Err(argument::Error::TooManyArguments(
"`cpus` already given".to_owned(),
));
}
cfg.vcpu_count =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `cpus` needs to be integer",
})?,
)
}
"cpu-affinity" => {
if cfg.vcpu_affinity.len() != 0 {
return Err(argument::Error::TooManyArguments(
"`cpu-affinity` already given".to_owned(),
));
}
cfg.vcpu_affinity = parse_cpu_set(value.unwrap())?;
}
"mem" => {
if cfg.memory.is_some() {
return Err(argument::Error::TooManyArguments(
"`mem` already given".to_owned(),
));
}
cfg.memory =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `mem` needs to be integer",
})?,
)
}
"cras-audio" => {
cfg.cras_audio = true;
}
"cras-capture" => {
cfg.cras_capture = true;
}
"null-audio" => {
cfg.null_audio = true;
}
"serial" => {
let serial_params = parse_serial_options(value.unwrap())?;
let num = serial_params.num;
if cfg.serial_parameters.contains_key(&num) {
return Err(argument::Error::TooManyArguments(format!(
"serial num {}",
num
)));
}
if serial_params.console {
for params in cfg.serial_parameters.values() {
if params.console {
return Err(argument::Error::TooManyArguments(format!(
"serial device {} already set as console",
params.num
)));
}
}
}
if serial_params.stdin {
if let Some(previous_stdin) = cfg.serial_parameters.values().find(|sp| sp.stdin) {
return Err(argument::Error::TooManyArguments(format!(
"serial device {} already connected to standard input",
previous_stdin.num
)));
}
}
cfg.serial_parameters.insert(num, serial_params);
}
"syslog-tag" => {
if cfg.syslog_tag.is_some() {
return Err(argument::Error::TooManyArguments(
"`syslog-tag` already given".to_owned(),
));
}
syslog::set_proc_name(value.unwrap());
cfg.syslog_tag = Some(value.unwrap().to_owned());
}
"root" | "rwroot" | "disk" | "rwdisk" | "qcow" | "rwqcow" => {
let param = value.unwrap();
let mut components = param.split(',');
let read_only = !name.starts_with("rw");
let disk_path =
PathBuf::from(
components
.next()
.ok_or_else(|| argument::Error::InvalidValue {
value: param.to_owned(),
expected: "missing disk path",
})?,
);
if !disk_path.exists() {
return Err(argument::Error::InvalidValue {
value: param.to_owned(),
expected: "this disk path does not exist",
});
}
if name.ends_with("root") {
if cfg.disks.len() >= 26 {
return Err(argument::Error::TooManyArguments(
"ran out of letters for to assign to root disk".to_owned(),
));
}
cfg.params.push(format!(
"root=/dev/vd{} {}",
char::from(b'a' + cfg.disks.len() as u8),
if read_only { "ro" } else { "rw" }
));
}
let mut disk = DiskOption {
path: disk_path,
read_only,
sparse: true,
block_size: 512,
};
for opt in components {
let mut o = opt.splitn(2, '=');
let kind = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "disk options must not be empty",
})?;
let value = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "disk options must be of the form `kind=value`",
})?;
match kind {
"sparse" => {
let sparse = value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`sparse` must be a boolean",
})?;
disk.sparse = sparse;
}
"block_size" => {
let block_size =
value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`block_size` must be an integer",
})?;
disk.block_size = block_size;
}
_ => {
return Err(argument::Error::InvalidValue {
value: kind.to_owned(),
expected: "unrecognized disk option",
});
}
}
}
cfg.disks.push(disk);
}
"pmem-device" | "rw-pmem-device" => {
let disk_path = PathBuf::from(value.unwrap());
if !disk_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this disk path does not exist",
});
}
cfg.pmem_devices.push(DiskOption {
path: disk_path,
read_only: !name.starts_with("rw"),
sparse: false,
block_size: sys_util::pagesize() as u32,
});
}
"host_ip" => {
if cfg.host_ip.is_some() {
return Err(argument::Error::TooManyArguments(
"`host_ip` already given".to_owned(),
));
}
cfg.host_ip =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "`host_ip` needs to be in the form \"x.x.x.x\"",
})?,
)
}
"netmask" => {
if cfg.netmask.is_some() {
return Err(argument::Error::TooManyArguments(
"`netmask` already given".to_owned(),
));
}
cfg.netmask =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "`netmask` needs to be in the form \"x.x.x.x\"",
})?,
)
}
"mac" => {
if cfg.mac_address.is_some() {
return Err(argument::Error::TooManyArguments(
"`mac` already given".to_owned(),
));
}
cfg.mac_address =
Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "`mac` needs to be in the form \"XX:XX:XX:XX:XX:XX\"",
})?,
)
}
"wayland-sock" => {
if cfg.wayland_socket_path.is_some() {
return Err(argument::Error::TooManyArguments(
"`wayland-sock` already given".to_owned(),
));
}
let wayland_socket_path = PathBuf::from(value.unwrap());
if !wayland_socket_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_string(),
expected: "Wayland socket does not exist",
});
}
cfg.wayland_socket_path = Some(wayland_socket_path);
}
#[cfg(feature = "wl-dmabuf")]
"wayland-dmabuf" => cfg.wayland_dmabuf = true,
"x-display" => {
if cfg.x_display.is_some() {
return Err(argument::Error::TooManyArguments(
"`x-display` already given".to_owned(),
));
}
cfg.x_display = Some(value.unwrap().to_owned());
}
"display-window-keyboard" => {
cfg.display_window_keyboard = true;
}
"display-window-mouse" => {
cfg.display_window_mouse = true;
}
"socket" => {
if cfg.socket_path.is_some() {
return Err(argument::Error::TooManyArguments(
"`socket` already given".to_owned(),
));
}
let mut socket_path = PathBuf::from(value.unwrap());
if socket_path.is_dir() {
socket_path.push(format!("crosvm-{}.sock", getpid()));
}
if socket_path.exists() {
return Err(argument::Error::InvalidValue {
value: socket_path.to_string_lossy().into_owned(),
expected: "this socket path already exists",
});
}
cfg.socket_path = Some(socket_path);
}
"disable-sandbox" => {
cfg.sandbox = false;
}
"cid" => {
if cfg.cid.is_some() {
return Err(argument::Error::TooManyArguments(
"`cid` alread given".to_owned(),
));
}
cfg.cid = Some(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `cid` must be an unsigned integer",
})?,
);
}
"shared-dir" => {
// This is formatted as multiple fields, each separated by ":". The first 2 fields are
// fixed (src:tag). The rest may appear in any order:
//
// * type=TYPE - must be one of "p9" or "fs" (default: p9)
// * uidmap=UIDMAP - a uid map in the format "inner outer count[,inner outer count]"
// (default: "0 <current euid> 1")
// * gidmap=GIDMAP - a gid map in the same format as uidmap
// (default: "0 <current egid> 1")
// * timeout=TIMEOUT - a timeout value in seconds, which indicates how long attributes
// and directory contents should be considered valid (default: 5)
// * cache=CACHE - one of "never", "always", or "auto" (default: auto)
// * writeback=BOOL - indicates whether writeback caching should be enabled (default: false)
let param = value.unwrap();
let mut components = param.split(':');
let src =
PathBuf::from(
components
.next()
.ok_or_else(|| argument::Error::InvalidValue {
value: param.to_owned(),
expected: "missing source path for `shared-dir`",
})?,
);
let tag = components
.next()
.ok_or_else(|| argument::Error::InvalidValue {
value: param.to_owned(),
expected: "missing tag for `shared-dir`",
})?
.to_owned();
if !src.is_dir() {
return Err(argument::Error::InvalidValue {
value: param.to_owned(),
expected: "source path for `shared-dir` must be a directory",
});
}
let mut shared_dir = SharedDir {
src,
tag,
..Default::default()
};
for opt in components {
let mut o = opt.splitn(2, '=');
let kind = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "`shared-dir` options must not be empty",
})?;
let value = o.next().ok_or_else(|| argument::Error::InvalidValue {
value: opt.to_owned(),
expected: "`shared-dir` options must be of the form `kind=value`",
})?;
match kind {
"type" => {
shared_dir.kind =
value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`type` must be one of `fs` or `9p`",
})?
}
"uidmap" => shared_dir.uid_map = value.into(),
"gidmap" => shared_dir.gid_map = value.into(),
"timeout" => {
let seconds = value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`timeout` must be an integer",
})?;
let dur = Duration::from_secs(seconds);
shared_dir.cfg.entry_timeout = dur.clone();
shared_dir.cfg.attr_timeout = dur;
}
"cache" => {
let policy = value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`cache` must be one of `never`, `always`, or `auto`",
})?;
shared_dir.cfg.cache_policy = policy;
}
"writeback" => {
let writeback =
value.parse().map_err(|_| argument::Error::InvalidValue {
value: value.to_owned(),
expected: "`writeback` must be a boolean",
})?;
shared_dir.cfg.writeback = writeback;
}
_ => {
return Err(argument::Error::InvalidValue {
value: kind.to_owned(),
expected: "unrecognized option for `shared-dir`",
})
}
}
}
cfg.shared_dirs.push(shared_dir);
}
"seccomp-policy-dir" => {
// `value` is Some because we are in this match so it's safe to unwrap.
cfg.seccomp_policy_dir = PathBuf::from(value.unwrap());
}
"seccomp-log-failures" => {
cfg.seccomp_log_failures = true;
}
"plugin" => {
if cfg.executable_path.is_some() {
return Err(argument::Error::TooManyArguments(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
)));
}
let plugin = PathBuf::from(value.unwrap().to_owned());
if plugin.is_relative() {
return Err(argument::Error::InvalidValue {
value: plugin.to_string_lossy().into_owned(),
expected: "the plugin path must be an absolute path",
});
}
cfg.executable_path = Some(Executable::Plugin(plugin));
}
"plugin-root" => {
cfg.plugin_root = Some(PathBuf::from(value.unwrap().to_owned()));
}
"plugin-mount" => {
let mount = parse_plugin_mount_option(value.unwrap())?;
cfg.plugin_mounts.push(mount);
}
"plugin-mount-file" => {
let file = File::open(value.unwrap()).map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "unable to open `plugin-mount-file` file",
})?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.trim_end_matches("#").trim();
if !trimmed_line.is_empty() {
let mount = parse_plugin_mount_option(trimmed_line)?;
cfg.plugin_mounts.push(mount);
}
}
}
"plugin-gid-map" => {
let map = parse_plugin_gid_map_option(value.unwrap())?;
cfg.plugin_gid_maps.push(map);
}
"plugin-gid-map-file" => {
let file = File::open(value.unwrap()).map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "unable to open `plugin-gid-map-file` file",
})?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.trim_end_matches("#").trim();
if !trimmed_line.is_empty() {
let map = parse_plugin_gid_map_option(trimmed_line)?;
cfg.plugin_gid_maps.push(map);
}
}
}
"vhost-net" => cfg.vhost_net = true,
"tap-fd" => {
cfg.tap_fd.push(
value
.unwrap()
.parse()
.map_err(|_| argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this value for `tap-fd` must be an unsigned integer",
})?,
);
}
#[cfg(feature = "gpu")]
"gpu" => {
let params = parse_gpu_options(value)?;
cfg.gpu_parameters = Some(params);
}
"software-tpm" => {
cfg.software_tpm = true;
}
"single-touch" => {
if cfg.virtio_single_touch.is_some() {
return Err(argument::Error::TooManyArguments(
"`single-touch` already given".to_owned(),
));
}
let mut it = value.unwrap().split(":");
let mut single_touch_spec =
TouchDeviceOption::new(PathBuf::from(it.next().unwrap().to_owned()));
if let Some(width) = it.next() {
single_touch_spec.width = width.trim().parse().unwrap();
}
if let Some(height) = it.next() {
single_touch_spec.height = height.trim().parse().unwrap();
}
cfg.virtio_single_touch = Some(single_touch_spec);
}
"trackpad" => {
if cfg.virtio_trackpad.is_some() {
return Err(argument::Error::TooManyArguments(
"`trackpad` already given".to_owned(),
));
}
let mut it = value.unwrap().split(":");
let mut trackpad_spec =
TouchDeviceOption::new(PathBuf::from(it.next().unwrap().to_owned()));
if let Some(width) = it.next() {
trackpad_spec.width = width.trim().parse().unwrap();
}
if let Some(height) = it.next() {
trackpad_spec.height = height.trim().parse().unwrap();
}
cfg.virtio_trackpad = Some(trackpad_spec);
}
"mouse" => {
if cfg.virtio_mouse.is_some() {
return Err(argument::Error::TooManyArguments(
"`mouse` already given".to_owned(),
));
}
cfg.virtio_mouse = Some(PathBuf::from(value.unwrap().to_owned()));
}
"keyboard" => {
if cfg.virtio_keyboard.is_some() {
return Err(argument::Error::TooManyArguments(
"`keyboard` already given".to_owned(),
));
}
cfg.virtio_keyboard = Some(PathBuf::from(value.unwrap().to_owned()));
}
"evdev" => {
let dev_path = PathBuf::from(value.unwrap());
if !dev_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "this input device path does not exist",
});
}
cfg.virtio_input_evdevs.push(dev_path);
}
"split-irqchip" => {
cfg.split_irqchip = true;
}
"initrd" => {
cfg.initrd_path = Some(PathBuf::from(value.unwrap().to_owned()));
}
"bios" => {
if cfg.executable_path.is_some() {
return Err(argument::Error::TooManyArguments(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
)));
}
cfg.executable_path = Some(Executable::Bios(PathBuf::from(value.unwrap().to_owned())));
}
"vfio" => {
let vfio_path = PathBuf::from(value.unwrap());
if !vfio_path.exists() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "the vfio path does not exist",
});
}
if !vfio_path.is_dir() {
return Err(argument::Error::InvalidValue {
value: value.unwrap().to_owned(),
expected: "the vfio path should be directory",
});
}
cfg.vfio = Some(vfio_path);
}
"help" => return Err(argument::Error::PrintHelp),
_ => unreachable!(),
}
Ok(())
}
fn run_vm(args: std::env::Args) -> std::result::Result<(), ()> {
let arguments =
&[Argument::positional("KERNEL", "bzImage of kernel to run"),
Argument::value("android-fstab", "PATH", "Path to Android fstab"),
Argument::short_value('i', "initrd", "PATH", "Initial ramdisk to load."),
Argument::short_value('p',
"params",
"PARAMS",
"Extra kernel or plugin command line arguments. Can be given more than once."),
Argument::short_value('c', "cpus", "N", "Number of VCPUs. (default: 1)"),
Argument::value("cpu-affinity", "CPUSET", "Comma-separated list of CPUs or CPU ranges to run VCPUs on. (e.g. 0,1-3,5) (default: no mask)"),
Argument::short_value('m',
"mem",
"N",
"Amount of guest memory in MiB. (default: 256)"),
Argument::short_value('r',
"root",
"PATH[,key=value[,key=value[,...]]",
"Path to a root disk image followed by optional comma-separated options.
Like `--disk` but adds appropriate kernel command line option.
See --disk for valid options."),
Argument::value("rwroot", "PATH[,key=value[,key=value[,...]]", "Path to a writable root disk image followed by optional comma-separated options.
See --disk for valid options."),
Argument::short_value('d', "disk", "PATH[,key=value[,key=value[,...]]", "Path to a disk image followed by optional comma-separated options.
Valid keys:
sparse=BOOL - Indicates whether the disk should support the discard operation (default: true)
block_size=BYTES - Set the reported block size of the disk (default: 512)"),
Argument::value("qcow", "PATH", "Path to a qcow2 disk image. (Deprecated; use --disk instead.)"),
Argument::value("rwdisk", "PATH[,key=value[,key=value[,...]]", "Path to a writable disk image followed by optional comma-separated options.
See --disk for valid options."),
Argument::value("rwqcow", "PATH", "Path to a writable qcow2 disk image. (Deprecated; use --rwdisk instead.)"),
Argument::value("rw-pmem-device", "PATH", "Path to a writable disk image."),
Argument::value("pmem-device", "PATH", "Path to a disk image."),
Argument::value("host_ip",
"IP",
"IP address to assign to host tap interface."),
Argument::value("netmask", "NETMASK", "Netmask for VM subnet."),
Argument::value("mac", "MAC", "MAC address for VM."),
Argument::flag("cras-audio", "Add an audio device to the VM that plays samples through CRAS server"),
Argument::flag("cras-capture", "Enable capturing audio from CRAS server to the cras-audio device"),
Argument::flag("null-audio", "Add an audio device to the VM that plays samples to /dev/null"),
Argument::value("serial",
"type=TYPE,[num=NUM,path=PATH,console,stdin]",
"Comma separated key=value pairs for setting up serial devices. Can be given more than once.
Possible key values:
type=(stdout,syslog,sink,file) - Where to route the serial device
num=(1,2,3,4) - Serial Device Number. If not provided, num will default to 1.
path=PATH - The path to the file to write to when type=file
console - Use this serial device as the guest console. Can only be given once. Will default to first serial port if not provided.
stdin - Direct standard input to this serial device. Can only be given once. Will default to first serial port if not provided.
"),
Argument::value("syslog-tag", "TAG", "When logging to syslog, use the provided tag."),
Argument::value("x-display", "DISPLAY", "X11 display name to use."),
Argument::flag("display-window-keyboard", "Capture keyboard input from the display window."),
Argument::flag("display-window-mouse", "Capture keyboard input from the display window."),
Argument::value("wayland-sock", "PATH", "Path to the Wayland socket to use."),
#[cfg(feature = "wl-dmabuf")]
Argument::flag("wayland-dmabuf", "Enable support for DMABufs in Wayland device."),
Argument::short_value('s',
"socket",
"PATH",
"Path to put the control socket. If PATH is a directory, a name will be generated."),
Argument::flag("disable-sandbox", "Run all devices in one, non-sandboxed process."),
Argument::value("cid", "CID", "Context ID for virtual sockets."),
Argument::value("shared-dir", "PATH:TAG[:type=TYPE:writeback=BOOL:timeout=SECONDS:uidmap=UIDMAP:gidmap=GIDMAP:cache=CACHE]",
"Colon-separated options for configuring a directory to be shared with the VM.
The first field is the directory to be shared and the second field is the tag that the VM can use to identify the device.
The remaining fields are key=value pairs that may appear in any order. Valid keys are:
type=(p9, fs) - Indicates whether the directory should be shared via virtio-9p or virtio-fs (default: p9).
uidmap=UIDMAP - The uid map to use for the device's jail in the format \"inner outer count[,inner outer count]\" (default: 0 <current euid> 1).
gidmap=GIDMAP - The gid map to use for the device's jail in the format \"inner outer count[,inner outer count]\" (default: 0 <current egid> 1).
cache=(never, auto, always) - Indicates whether the VM can cache the contents of the shared directory (default: auto). When set to \"auto\" and the type is \"fs\", the VM will use close-to-open consistency for file contents.
timeout=SECONDS - How long the VM should consider file attributes and directory entries to be valid (default: 5). If the VM has exclusive access to the directory, then this should be a large value. If the directory can be modified by other processes, then this should be 0.
writeback=BOOL - Indicates whether the VM can use writeback caching (default: false). This is only safe to do when the VM has exclusive access to the files in a directory. Additionally, the server should have read permission for all files as the VM may issue read requests even for files that are opened write-only.
"),
Argument::value("seccomp-policy-dir", "PATH", "Path to seccomp .policy files."),
Argument::flag("seccomp-log-failures", "Instead of seccomp filter failures being fatal, they will be logged instead."),
#[cfg(feature = "plugin")]
Argument::value("plugin", "PATH", "Absolute path to plugin process to run under crosvm."),
#[cfg(feature = "plugin")]
Argument::value("plugin-root", "PATH", "Absolute path to a directory that will become root filesystem for the plugin process."),
#[cfg(feature = "plugin")]
Argument::value("plugin-mount", "PATH:PATH:BOOL", "Path to be mounted into the plugin's root filesystem. Can be given more than once."),
#[cfg(feature = "plugin")]
Argument::value("plugin-mount-file", "PATH", "Path to the file listing paths be mounted into the plugin's root filesystem. Can be given more than once."),
#[cfg(feature = "plugin")]
Argument::value("plugin-gid-map", "GID:GID:INT", "Supplemental GIDs that should be mapped in plugin jail. Can be given more than once."),
#[cfg(feature = "plugin")]
Argument::value("plugin-gid-map-file", "PATH", "Path to the file listing supplemental GIDs that should be mapped in plugin jail. Can be given more than once."),
Argument::flag("vhost-net", "Use vhost for networking."),
Argument::value("tap-fd",
"fd",
"File descriptor for configured tap device. A different virtual network card will be added each time this argument is given."),
#[cfg(feature = "gpu")]
Argument::flag_or_value("gpu",
"[width=INT,height=INT]",
"(EXPERIMENTAL) Comma separated key=value pairs for setting up a virtio-gpu device
Possible key values:
width=INT - The width of the virtual display connected to the virtio-gpu.
height=INT - The height of the virtual display connected to the virtio-gpu.
egl[=true|=false] - If the virtio-gpu backend should use a EGL context for rendering.
glx[=true|=false] - If the virtio-gpu backend should use a GLX context for rendering.
surfaceless[=true|=false] - If the virtio-gpu backend should use a surfaceless context for rendering.
"),
#[cfg(feature = "tpm")]
Argument::flag("software-tpm", "enable a software emulated trusted platform module device"),
Argument::value("evdev", "PATH", "Path to an event device node. The device will be grabbed (unusable from the host) and made available to the guest with the same configuration it shows on the host"),
Argument::value("single-touch", "PATH:WIDTH:HEIGHT", "Path to a socket from where to read single touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)."),
Argument::value("trackpad", "PATH:WIDTH:HEIGHT", "Path to a socket from where to read trackpad input events and write status updates to, optionally followed by screen width and height (defaults to 800x1280)."),
Argument::value("mouse", "PATH", "Path to a socket from where to read mouse input events and write status updates to."),
Argument::value("keyboard", "PATH", "Path to a socket from where to read keyboard input events and write status updates to."),
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
Argument::flag("split-irqchip", "(EXPERIMENTAL) enable split-irqchip support"),
Argument::value("bios", "PATH", "Path to BIOS/firmware ROM"),
Argument::value("vfio", "PATH", "Path to sysfs of pass through or mdev device"),
Argument::short_flag('h', "help", "Print help message.")];
let mut cfg = Config::default();
let match_res = set_arguments(args, &arguments[..], |name, value| {
set_argument(&mut cfg, name, value)
})
.and_then(|_| {
if cfg.executable_path.is_none() {
return Err(argument::Error::ExpectedArgument("`KERNEL`".to_owned()));
}
if cfg.host_ip.is_some() || cfg.netmask.is_some() || cfg.mac_address.is_some() {
if cfg.host_ip.is_none() {
return Err(argument::Error::ExpectedArgument(
"`host_ip` missing from network config".to_owned(),
));
}
if cfg.netmask.is_none() {
return Err(argument::Error::ExpectedArgument(
"`netmask` missing from network config".to_owned(),
));
}
if cfg.mac_address.is_none() {
return Err(argument::Error::ExpectedArgument(
"`mac` missing from network config".to_owned(),
));
}
}
if cfg.plugin_root.is_some() && !executable_is_plugin(&cfg.executable_path) {
return Err(argument::Error::ExpectedArgument(
"`plugin-root` requires `plugin`".to_owned(),
));
}
Ok(())
});
match match_res {
#[cfg(feature = "plugin")]
Ok(()) if executable_is_plugin(&cfg.executable_path) => {
match crosvm::plugin::run_config(cfg) {
Ok(_) => {
info!("crosvm and plugin have exited normally");
Ok(())
}
Err(e) => {
error!("{}", e);
Err(())
}
}
}
Ok(()) => match linux::run_config(cfg) {
Ok(_) => {
info!("crosvm has exited normally");
Ok(())
}
Err(e) => {
error!("{}", e);
Err(())
}
},
Err(argument::Error::PrintHelp) => {
print_help("crosvm run", "KERNEL", &arguments[..]);
Ok(())
}
Err(e) => {
println!("{}", e);
Err(())
}
}
}
fn handle_request(
request: &VmRequest,
args: std::env::Args,
) -> std::result::Result<VmResponse, ()> {
let mut return_result = Err(());
for socket_path in args {
match UnixSeqpacket::connect(&socket_path) {
Ok(s) => {
let socket: VmControlRequestSocket = MsgSocket::new(s);
if let Err(e) = socket.send(request) {
error!(
"failed to send request to socket at '{}': {}",
socket_path, e
);
return_result = Err(());
continue;
}
match socket.recv() {
Ok(response) => return_result = Ok(response),
Err(e) => {
error!(
"failed to send request to socket at2 '{}': {}",
socket_path, e
);
return_result = Err(());
continue;
}
}
}
Err(e) => {
error!("failed to connect to socket at '{}': {}", socket_path, e);
return_result = Err(());
}
}
}
return_result
}
fn vms_request(request: &VmRequest, args: std::env::Args) -> std::result::Result<(), ()> {
let response = handle_request(request, args)?;
info!("request response was {}", response);
Ok(())
}
fn stop_vms(args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() == 0 {
print_help("crosvm stop", "VM_SOCKET...", &[]);
println!("Stops the crosvm instance listening on each `VM_SOCKET` given.");
return Err(());
}
vms_request(&VmRequest::Exit, args)
}
fn suspend_vms(args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() == 0 {
print_help("crosvm suspend", "VM_SOCKET...", &[]);
println!("Suspends the crosvm instance listening on each `VM_SOCKET` given.");
return Err(());
}
vms_request(&VmRequest::Suspend, args)
}
fn resume_vms(args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() == 0 {
print_help("crosvm resume", "VM_SOCKET...", &[]);
println!("Resumes the crosvm instance listening on each `VM_SOCKET` given.");
return Err(());
}
vms_request(&VmRequest::Resume, args)
}
fn balloon_vms(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() < 2 {
print_help("crosvm balloon", "SIZE VM_SOCKET...", &[]);
println!("Set the ballon size of the crosvm instance to `SIZE` bytes.");
return Err(());
}
let num_bytes = match args.nth(0).unwrap().parse::<u64>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse number of bytes");
return Err(());
}
};
let command = BalloonControlCommand::Adjust { num_bytes };
vms_request(&VmRequest::BalloonCommand(command), args)
}
fn create_qcow2(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() != 2 {
print_help("crosvm create_qcow2", "PATH SIZE", &[]);
println!("Create a new QCOW2 image at `PATH` of the specified `SIZE` in bytes.");
return Err(());
}
let file_path = args.nth(0).unwrap();
let size: u64 = match args.nth(0).unwrap().parse::<u64>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse size of the disk.");
return Err(());
}
};
let file = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&file_path)
.map_err(|e| {
error!("Failed opening qcow file at '{}': {}", file_path, e);
})?;
QcowFile::new(file, size).map_err(|e| {
error!("Failed to create qcow file at '{}': {}", file_path, e);
})?;
Ok(())
}
fn disk_cmd(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() < 2 {
print_help("crosvm disk", "SUBCOMMAND VM_SOCKET...", &[]);
println!("Manage attached virtual disk devices.");
println!("Subcommands:");
println!(" resize DISK_INDEX NEW_SIZE VM_SOCKET");
return Err(());
}
let subcommand: &str = &args.nth(0).unwrap();
let request = match subcommand {
"resize" => {
let disk_index = match args.nth(0).unwrap().parse::<usize>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse disk index");
return Err(());
}
};
let new_size = match args.nth(0).unwrap().parse::<u64>() {
Ok(n) => n,
Err(_) => {
error!("Failed to parse disk size");
return Err(());
}
};
VmRequest::DiskCommand {
disk_index,
command: DiskControlCommand::Resize { new_size },
}
}
_ => {
error!("Unknown disk subcommand '{}'", subcommand);
return Err(());
}
};
vms_request(&request, args)
}
enum ModifyUsbError {
ArgMissing(&'static str),
ArgParse(&'static str, String),
ArgParseInt(&'static str, String, ParseIntError),
FailedFdValidate(sys_util::Error),
PathDoesNotExist(PathBuf),
SocketFailed,
UnexpectedResponse(VmResponse),
UnknownCommand(String),
UsbControl(UsbControlResult),
}
impl fmt::Display for ModifyUsbError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ModifyUsbError::*;
match self {
ArgMissing(a) => write!(f, "argument missing: {}", a),
ArgParse(name, value) => {
write!(f, "failed to parse argument {} value `{}`", name, value)
}
ArgParseInt(name, value, e) => write!(
f,
"failed to parse integer argument {} value `{}`: {}",
name, value, e
),
FailedFdValidate(e) => write!(f, "failed to validate file descriptor: {}", e),
PathDoesNotExist(p) => write!(f, "path `{}` does not exist", p.display()),
SocketFailed => write!(f, "socket failed"),
UnexpectedResponse(r) => write!(f, "unexpected response: {}", r),
UnknownCommand(c) => write!(f, "unknown command: `{}`", c),
UsbControl(e) => write!(f, "{}", e),
}
}
}
type ModifyUsbResult<T> = std::result::Result<T, ModifyUsbError>;
fn parse_bus_id_addr(v: &str) -> ModifyUsbResult<(u8, u8, u16, u16)> {
debug!("parse_bus_id_addr: {}", v);
let mut ids = v.split(":");
match (ids.next(), ids.next(), ids.next(), ids.next()) {
(Some(bus_id), Some(addr), Some(vid), Some(pid)) => {
let bus_id = bus_id
.parse::<u8>()
.map_err(|e| ModifyUsbError::ArgParseInt("bus_id", bus_id.to_owned(), e))?;
let addr = addr
.parse::<u8>()
.map_err(|e| ModifyUsbError::ArgParseInt("addr", addr.to_owned(), e))?;
let vid = u16::from_str_radix(&vid, 16)
.map_err(|e| ModifyUsbError::ArgParseInt("vid", vid.to_owned(), e))?;
let pid = u16::from_str_radix(&pid, 16)
.map_err(|e| ModifyUsbError::ArgParseInt("pid", pid.to_owned(), e))?;
Ok((bus_id, addr, vid, pid))
}
_ => Err(ModifyUsbError::ArgParse(
"BUS_ID_ADDR_BUS_NUM_DEV_NUM",
v.to_owned(),
)),
}
}
fn raw_fd_from_path(path: &Path) -> ModifyUsbResult<RawFd> {
if !path.exists() {
return Err(ModifyUsbError::PathDoesNotExist(path.to_owned()));
}
let raw_fd = path
.file_name()
.and_then(|fd_osstr| fd_osstr.to_str())
.map_or(
Err(ModifyUsbError::ArgParse(
"USB_DEVICE_PATH",
path.to_string_lossy().into_owned(),
)),
|fd_str| {
fd_str.parse::<libc::c_int>().map_err(|e| {
ModifyUsbError::ArgParseInt("USB_DEVICE_PATH", fd_str.to_owned(), e)
})
},
)?;
validate_raw_fd(raw_fd).map_err(ModifyUsbError::FailedFdValidate)
}
fn usb_attach(mut args: std::env::Args) -> ModifyUsbResult<UsbControlResult> {
let val = args
.next()
.ok_or(ModifyUsbError::ArgMissing("BUS_ID_ADDR_BUS_NUM_DEV_NUM"))?;
let (bus, addr, vid, pid) = parse_bus_id_addr(&val)?;
let dev_path = PathBuf::from(
args.next()
.ok_or(ModifyUsbError::ArgMissing("usb device path"))?,
);
let usb_file: Option<File> = if dev_path == Path::new("-") {
None
} else if dev_path.parent() == Some(Path::new("/proc/self/fd")) {
// Special case '/proc/self/fd/*' paths. The FD is already open, just use it.
// Safe because we will validate |raw_fd|.
Some(unsafe { File::from_raw_fd(raw_fd_from_path(&dev_path)?) })
} else {
Some(
OpenOptions::new()
.read(true)
.write(true)
.open(&dev_path)
.map_err(|_| ModifyUsbError::UsbControl(UsbControlResult::FailedToOpenDevice))?,
)
};
let request = VmRequest::UsbCommand(UsbControlCommand::AttachDevice {
bus,
addr,
vid,
pid,
fd: usb_file.map(MaybeOwnedFd::Owned),
});
let response = handle_request(&request, args).map_err(|_| ModifyUsbError::SocketFailed)?;
match response {
VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
r => Err(ModifyUsbError::UnexpectedResponse(r)),
}
}
fn usb_detach(mut args: std::env::Args) -> ModifyUsbResult<UsbControlResult> {
let port: u8 = args
.next()
.map_or(Err(ModifyUsbError::ArgMissing("PORT")), |p| {
p.parse::<u8>()
.map_err(|e| ModifyUsbError::ArgParseInt("PORT", p.to_owned(), e))
})?;
let request = VmRequest::UsbCommand(UsbControlCommand::DetachDevice { port });
let response = handle_request(&request, args).map_err(|_| ModifyUsbError::SocketFailed)?;
match response {
VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
r => Err(ModifyUsbError::UnexpectedResponse(r)),
}
}
fn usb_list(args: std::env::Args) -> ModifyUsbResult<UsbControlResult> {
let mut ports: [u8; USB_CONTROL_MAX_PORTS] = Default::default();
for (index, port) in ports.iter_mut().enumerate() {
*port = index as u8
}
let request = VmRequest::UsbCommand(UsbControlCommand::ListDevice { ports });
let response = handle_request(&request, args).map_err(|_| ModifyUsbError::SocketFailed)?;
match response {
VmResponse::UsbResponse(usb_resp) => Ok(usb_resp),
r => Err(ModifyUsbError::UnexpectedResponse(r)),
}
}
fn modify_usb(mut args: std::env::Args) -> std::result::Result<(), ()> {
if args.len() < 2 {
print_help("crosvm usb",
"[attach BUS_ID:ADDR:VENDOR_ID:PRODUCT_ID [USB_DEVICE_PATH|-] | detach PORT | list] VM_SOCKET...", &[]);
return Err(());
}
// This unwrap will not panic because of the above length check.
let command = args.next().unwrap();
let result = match command.as_ref() {
"attach" => usb_attach(args),
"detach" => usb_detach(args),
"list" => usb_list(args),
other => Err(ModifyUsbError::UnknownCommand(other.to_owned())),
};
match result {
Ok(response) => {
println!("{}", response);
Ok(())
}
Err(e) => {
println!("error {}", e);
Err(())
}
}
}
fn print_usage() {
print_help("crosvm", "[stop|run]", &[]);
println!("Commands:");
println!(" stop - Stops crosvm instances via their control sockets.");
println!(" run - Start a new crosvm instance.");
println!(" create_qcow2 - Create a new qcow2 disk image file.");
println!(" disk - Manage attached virtual disk devices.");
println!(" usb - Manage attached virtual USB devices.");
}
fn crosvm_main() -> std::result::Result<(), ()> {
if let Err(e) = syslog::init() {
println!("failed to initialize syslog: {}", e);
return Err(());
}
panic_hook::set_panic_hook();
let mut args = std::env::args();
if args.next().is_none() {
error!("expected executable name");
return Err(());
}
// Past this point, usage of exit is in danger of leaking zombie processes.
let ret = match args.next().as_ref().map(|a| a.as_ref()) {
None => {
print_usage();
Ok(())
}
Some("stop") => stop_vms(args),
Some("suspend") => suspend_vms(args),
Some("resume") => resume_vms(args),
Some("run") => run_vm(args),
Some("balloon") => balloon_vms(args),
Some("create_qcow2") => create_qcow2(args),
Some("disk") => disk_cmd(args),
Some("usb") => modify_usb(args),
Some(c) => {
println!("invalid subcommand: {:?}", c);
print_usage();
Err(())
}
};
// Reap exit status from any child device processes. At this point, all devices should have been
// dropped in the main process and told to shutdown. Try over a period of 100ms, since it may
// take some time for the processes to shut down.
if !wait_all_children() {
// We gave them a chance, and it's too late.
warn!("not all child processes have exited; sending SIGKILL");
if let Err(e) = kill_process_group() {
// We're now at the mercy of the OS to clean up after us.
warn!("unable to kill all child processes: {}", e);
}
}
// WARNING: Any code added after this point is not guaranteed to run
// since we may forcibly kill this process (and its children) above.
ret
}
fn main() {
std::process::exit(if crosvm_main().is_ok() { 0 } else { 1 });
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_cpu_set_single() {
assert_eq!(parse_cpu_set("123").expect("parse failed"), vec![123]);
}
#[test]
fn parse_cpu_set_list() {
assert_eq!(
parse_cpu_set("0,1,2,3").expect("parse failed"),
vec![0, 1, 2, 3]
);
}
#[test]
fn parse_cpu_set_range() {
assert_eq!(
parse_cpu_set("0-3").expect("parse failed"),
vec![0, 1, 2, 3]
);
}
#[test]
fn parse_cpu_set_list_of_ranges() {
assert_eq!(
parse_cpu_set("3-4,7-9,18").expect("parse failed"),
vec![3, 4, 7, 8, 9, 18]
);
}
#[test]
fn parse_cpu_set_repeated() {
// For now, allow duplicates - they will be handled gracefully by the vec to cpu_set_t conversion.
assert_eq!(parse_cpu_set("1,1,1").expect("parse failed"), vec![1, 1, 1]);
}
#[test]
fn parse_cpu_set_negative() {
// Negative CPU numbers are not allowed.
parse_cpu_set("-3").expect_err("parse should have failed");
}
#[test]
fn parse_cpu_set_reverse_range() {
// Ranges must be from low to high.
parse_cpu_set("5-2").expect_err("parse should have failed");
}
#[test]
fn parse_cpu_set_open_range() {
parse_cpu_set("3-").expect_err("parse should have failed");
}
#[test]
fn parse_cpu_set_extra_comma() {
parse_cpu_set("0,1,2,").expect_err("parse should have failed");
}
#[test]
fn parse_serial_vaild() {
parse_serial_options("type=syslog,num=1,console=true,stdin=true")
.expect("parse should have succeded");
}
#[test]
fn parse_serial_valid_no_num() {
parse_serial_options("type=syslog").expect("parse should have succeded");
}
#[test]
fn parse_serial_invalid_type() {
parse_serial_options("type=wormhole,num=1").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_num_upper() {
parse_serial_options("type=syslog,num=5").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_num_lower() {
parse_serial_options("type=syslog,num=0").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_num_string() {
parse_serial_options("type=syslog,num=number3").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_option() {
parse_serial_options("type=syslog,speed=lightspeed").expect_err("parse should have failed");
}
#[test]
fn parse_serial_invalid_two_stdin() {
let mut config = Config::default();
set_argument(&mut config, "serial", Some("num=1,type=stdout,stdin=true"))
.expect("should parse the first serial argument");
set_argument(&mut config, "serial", Some("num=2,type=stdout,stdin=true"))
.expect_err("should fail to parse a second serial port connected to stdin");
}
}
|
#![recursion_limit = "1024"]
#![cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity, trivial_regex))]
use std::ascii::AsciiExt;
use std::collections::HashMap;
use std::io::Write;
use std::io::BufWriter;
use std::io::stderr;
use std::fs::File;
use std::str;
use std::path::{PathBuf, Path};
use std::process::Command;
use std::vec::Vec;
use std::marker::Send;
extern crate argparse;
use argparse::{ArgumentParser, StoreTrue, StoreFalse, Store};
extern crate regex;
use regex::Regex;
extern crate rust_htslib;
use rust_htslib::bam::Read;
use rust_htslib::bam::record::Cigar;
use rust_htslib::bam::Reader;
#[macro_use]
extern crate error_chain;
mod intervaltree;
error_chain!{
foreign_links {
::std::io::Error, Io;
::std::str::Utf8Error, Utf8;
::regex::Error, Regex;
}
errors {
NoneError
}
}
trait ToResult<T> {
fn r(self) -> Result<T>;
}
impl<T> ToResult<T> for Option<T> {
fn r(self) -> Result<T> {
match self {
Some(v) => Ok(v),
None => Err(ErrorKind::NoneError.into()),
}
}
}
fn cigar2exons(exons: &mut Vec<(i32, i32)>, cigar: &[Cigar], pos: i32) -> Result<()> {
let mut pos = pos;
for op in cigar {
match op {
&Cigar::Match(length) => {
pos += length as i32;
exons.push((pos - length as i32, pos));
Ok(())
}
&Cigar::RefSkip(length) |
&Cigar::Del(length) => {
pos += length as i32;
Ok(())
}
&Cigar::Ins(_) |
&Cigar::SoftClip(_) |
&Cigar::HardClip(_) |
&Cigar::Pad(_) => Ok(()),
c => Err(format!("Bad CIGAR string: {:?}", c)),
}?;
}
Ok(())
}
fn open_file(options: &Options,
read_number: i32,
strand: &str,
split_strand: &str,
fhs: &mut HashMap<String, Option<File>>)
-> Result<String> {
let mut prefix = PathBuf::new();
prefix.set_file_name(&options.bamfile);
prefix.set_extension("");
let track_name = vec![if !options.trackname.is_empty() {
options.trackname.clone()
} else {
let p = prefix.as_path().to_str().ok_or("Failed to parse path!")?;
p.to_string()
},
if options.split_read && read_number > 0 {
format!(".r{}", read_number)
} else {
"".to_string()
},
if split_strand != "uu" && !strand.is_empty() {
format!(".{}", strand)
} else {
"".to_string()
}]
.join("");
let filename = vec![if !options.out.is_empty() {
options.out.clone()
} else {
let p = prefix.as_path().to_str().ok_or("Failed to parse path!")?;
p.to_string()
},
if options.split_read && read_number > 0 {
format!(".r{}", read_number)
} else {
"".to_string()
},
if split_strand != "uu" && !strand.is_empty() {
format!(".{}", strand)
} else {
"".to_string()
},
".bedgraph".to_string()]
.join("");
// initialize the file if needed
if !fhs.contains_key(&filename) {
let mut f = File::create(&filename)?;
if options.trackline {
writeln!(f,
"track type=bedGraph name=\"{}\" description=\"{}\" visibility=full",
track_name,
track_name)?;
}
fhs.insert(filename.clone(), Some(f));
}
Ok(filename)
}
fn write_chr(options: &Options,
chr: &(u32, String),
histogram: &HashMap<(i32, String), Vec<i32>>,
fhs: &mut HashMap<String, Option<File>>,
split_strand: &str)
-> Result<()> {
for (key, histo) in histogram {
let read_number = key.0;
let strand = &key.1;
let filename = open_file(options, read_number, strand, split_strand, fhs)?;
let mut f = fhs.get_mut(&filename).r()?;
let file = f.as_mut().r()?;
let mut writer = BufWriter::new(file);
// scan the histogram to produce the bedgraph data
let mut start: usize = 0;
let mut end: usize = 0;
let ref_length: usize = chr.0 as usize;
while start < ref_length {
while (if end < histo.len() { histo[end] } else { 0 }) ==
(if start < histo.len() { histo[start] } else { 0 }) &&
end < ref_length {
end += 1
}
if options.zero || (if start < histo.len() { histo[start] } else { 0 }) > 0 {
writeln!(writer,
"{}\t{}\t{}\t{}",
chr.1,
start,
end,
if strand == "-" {
-histo[start]
} else {
histo[start]
})?;
}
start = end;
}
}
Ok(())
}
fn analyze_bam(options: &Options,
split_strand: &str,
autostrand_pass: bool,
intervals: &Option<HashMap<String, intervaltree::IntervalTree<u8>>>)
-> Result<()> {
if !Path::new(&options.bamfile).exists() {
return Err(format!("Bam file {} could not be found!", &options.bamfile).into());
}
let bam = (match Reader::from_path(&options.bamfile) {
Ok(reader) => Ok(reader),
Err(_) => Err("Error: BGZFError"),
})?;
let header = bam.header();
let mut refs: Vec<(u32, String)> = Vec::new();
refs.resize(header.target_count() as usize, (0, "".to_string()));
let target_names = header.target_names();
for target_name in target_names {
let tid = header.tid(target_name).r()?;
let target_len = header.target_len(tid).r()?;
let target_name = std::str::from_utf8(target_name)?;
refs[tid as usize] = (target_len, target_name.to_string());
}
if options.fixchr {
for r in &mut refs {
let regex = Regex::new(r"^(chr|Zv9_)")?;
if regex.is_match(&r.1) {
let refname = r.1.to_string();
r.1.clear();
r.1.push_str(&format!("chr{}", refname));
}
}
}
if autostrand_pass {
writeln!(stderr(),
"Running strand detection phase on {}",
options.bamfile)?;
} else {
writeln!(stderr(), "Building histograms for {}", options.bamfile)?;
}
// build a lookup map for the refseqs
let mut refmap: HashMap<String, usize> = HashMap::new();
for (i, _) in refs.iter().enumerate() {
refmap.insert(refs[i].1.to_string(), i);
}
let mut lastchr: i32 = -1;
let mut fhs: HashMap<String, Option<File>> = HashMap::new();
let mut histogram: HashMap<(i32, String), Vec<i32>> = HashMap::new();
let mut autostrand_totals: HashMap<char, i64> = HashMap::new();
autostrand_totals.insert('s', 0);
autostrand_totals.insert('r', 0);
let mut autostrand_totals2: HashMap<char, i64> = HashMap::new();
autostrand_totals2.insert('s', 0);
autostrand_totals2.insert('r', 0);
let mut read = rust_htslib::bam::record::Record::new();
while bam.read(&mut read).is_ok() {
// if we've hit a new chr, write out the bedgraph data and clear the histogram
if lastchr == -1 || read.tid() != lastchr {
if !autostrand_pass && !histogram.is_empty() && lastchr != -1 {
write_chr(options,
&refs[lastchr as usize],
&histogram,
&mut fhs,
split_strand)?;
}
histogram.clear();
lastchr = read.tid();
}
// skip this read if it's no good
let paired = read.is_paired();
let proper = read.is_proper_pair();
let primary = !read.is_secondary();
if (options.paired_only && !paired) || (options.primary_only && !primary) ||
(options.proper_only && !proper) {
continue;
}
// skip if it's not unique and we want unique alignments
if options.uniq {
let hits = read.aux("NH".to_string().as_bytes());
if hits == None || hits.r()?.integer() != 1 {
continue;
}
}
let mut exons: Vec<(i32, i32)> = Vec::new();
let mut get_exons: Vec<(i32, i32)> = Vec::new();
cigar2exons(&mut get_exons, &read.cigar(), read.pos())?;
if !options.split_exons && !get_exons.is_empty() {
let first = get_exons.get(0).r()?;
let last = get_exons.get(get_exons.len() - 1).r()?;
exons = vec![(first.0, last.1)];
}
let read_number = if read.is_secondary() { 2 } else { 1 };
// attempt to determine the strandedness of the transcript
// read numbers match, is not reverse, is not flipped
// let xs = read.aux("XS".as_bytes());
let strand =
//if xs.is_some() {
// str::from_utf8(xs.r()?.string())?
//} else
if read_number == 1 {
if split_strand.chars().nth(0).r()? == 'r' {
if read.is_reverse() { "+" } else { "-" }
} else if split_strand.chars().nth(0).r()? == 's' {
if read.is_reverse() { "-" } else { "+" }
} else {
""
}
} else if read_number == 2 {
if split_strand.chars().nth(1).r()? == 's' {
if read.is_reverse() { "-" } else { "+" }
} else if split_strand.chars().nth(1).r()? == 'r' {
if read.is_reverse() { "+" } else { "-" }
} else {
""
}
} else {
""
};
let read_num = if options.split_read { read_number } else { 0 };
let ref_length = refs[read.tid() as usize].0;
// add the read to the histogram
for exon in exons {
// try to determine the strandedness of the data
if autostrand_pass {
if intervals.is_some() {
let intervals = intervals.as_ref().r()?;
if intervals.contains_key(&refs[lastchr as usize].1) {
let mut overlapping_annot: Vec<intervaltree::Interval<u8>> = Vec::new();
intervals[&refs[lastchr as usize].1]
.find_overlapping(exon.0 + 1, exon.1, &mut overlapping_annot);
for interval in overlapping_annot {
let overlap_length = std::cmp::min(exon.1, interval.stop) -
std::cmp::max(exon.0, interval.start - 1);
let strandtype = if read.is_reverse() == (interval.value == b'-') {
's'
} else {
'r'
};
if read_number == 1 {
let at = autostrand_totals.get_mut(&strandtype).r()?;
*at += overlap_length as i64
} else if read_number == 2 {
let at2 = autostrand_totals2.get_mut(&strandtype).r()?;
*at2 += overlap_length as i64
}
}
}
}
} else {
let tuple = (read_num, strand.to_string());
if !histogram.contains_key(&tuple) {
histogram.insert(tuple.clone(), Vec::new());
}
// keep track of chromosome sizes
if ref_length < exon.1 as u32 {
refs[read.tid() as usize].0 = exon.1 as u32;
}
if histogram[&tuple].len() < ref_length as usize {
let h = histogram.get_mut(&tuple).r()?;
h.resize(ref_length as usize, 0);
}
for pos in exon.0..exon.1 {
let h = histogram.get_mut(&tuple).r()?;
(*h)[pos as usize] += 1;
}
}
}
}
if !autostrand_pass && !histogram.is_empty() && lastchr != -1 {
write_chr(options,
&refs[lastchr as usize],
&histogram,
&mut fhs,
split_strand)?;
}
// make sure empty files were created
if histogram.is_empty() && !autostrand_pass {
for read_number in if options.split_read {
vec![1, 2]
} else {
vec![0]
} {
for s in if split_strand != "uu" {
vec!["+", "-"]
} else {
vec![""]
} {
open_file(options, read_number, s, split_strand, &mut fhs)?;
}
}
}
// close the filehandles
for (_, fh) in &mut fhs {
*fh = None;
}
if autostrand_pass {
// get the read 1 and read2 totals
let mut total1: i64 = 0;
let mut total2: i64 = 0;
for i in &autostrand_totals {
total1 += *i.1;
}
for i in &autostrand_totals2 {
total2 += *i.1;
}
// figure out the best and second-best strand types for reads 1 and 2
let mut best1: (char, i64) = ('\0', 0);
let mut second_best1: (char, i64) = ('\0', 0);
let mut best2: (char, i64) = ('\0', 0);
let mut second_best2: (char, i64) = ('\0', 0);
for i in autostrand_totals {
if i.1 > 0 {
writeln!(stderr(),
"Total evidence for read 1 strand type {}: {}",
i.0,
i.1)?;
}
if best1.1 < i.1 {
second_best1 = best1;
best1 = i;
} else if second_best1.1 < i.1 {
second_best1 = i;
}
}
for i in autostrand_totals2 {
if i.1 > 0 {
writeln!(stderr(),
"Total evidence for read 2 strand type {}: {}",
i.0,
i.1)?;
}
if best2.1 < i.1 {
second_best2 = best2;
best2 = i;
} else if second_best2.1 < i.1 {
second_best2 = i;
}
}
let threshold: f64 = 0.0; //threshold isn't working, set to zero
let strand1 = if total1 > 0 {
if threshold < (best1.1 - second_best1.1) as f64 / (total1) as f64 {
best1.0
} else {
'u'
}
} else {
'u'
};
let strand2 = if total2 > 0 {
if threshold < (best2.1 - second_best2.1) as f64 / (total2) as f64 {
best2.0
} else {
'u'
}
} else {
'u'
};
let best_strand = format!("{}{}", strand1, strand2);
writeln!(stderr(),
"autostrand_pass found best strand type: {}",
best_strand)?;
// re-run analyzeBam with the strand type indicated
analyze_bam(options, &best_strand, false, intervals)?;
}
if !autostrand_pass && options.bigwig {
// Convert bedgraph file to bigwig file
for fh in &fhs {
// write the genome file for bigwigs
let genome_filename = format!("{}.genome", fh.0);
{
let mut genome_fh = File::create(&genome_filename)?;
for r in &refs {
writeln!(genome_fh, "{}\t{}", r.1, r.0)?;
}
}
// run bedGraphToBigWig
let regex = Regex::new(r"\.bedgraph$")?;
let bigwig_file = regex.replace(fh.0, ".bw");
let sorted_bedgraph = regex.replace(fh.0, ".sorted.bedgraph");
for command in vec![Command::new("sort")
.args(&["-k1,1", "-k2,2n", "-o", &sorted_bedgraph, fh.0])
.env("LC_COLLATE", "C"),
Command::new("bedGraphToBigWig")
.args(&[&sorted_bedgraph, &genome_filename, &bigwig_file])] {
let mut child = command.spawn()?;
let exit_code = child.wait()?;
if !exit_code.success() {
if exit_code.code().is_some() {
let code = exit_code.code().r()?;
return Err(format!("Nonzero exit code {} returned from \
command: {:?}",
code,
command)
.into());
} else {
return Err(format!("Command was interrupted: {:?}", command).into());
}
}
}
// remove the bedgraph file
std::fs::remove_file(fh.0)?;
// remove the sorted bedgraph file
std::fs::remove_file(sorted_bedgraph)?;
// remove the genome file
std::fs::remove_file(&genome_filename)?;
}
};
Ok(())
}
struct Options {
split_exons: bool,
split_read: bool,
zero: bool,
paired_only: bool,
proper_only: bool,
primary_only: bool,
trackline: bool,
bigwig: bool,
uniq: bool,
fixchr: bool,
bamfile: String,
trackname: String,
out: String,
autostrand: String,
split_strand: String,
}
impl Options {
fn default() -> Options {
Options {
split_exons: false,
split_read: false,
zero: false,
paired_only: false,
proper_only: false,
primary_only: false,
trackline: false,
bigwig: false,
uniq: false,
fixchr: false,
bamfile: "".to_string(),
trackname: "".to_string(),
out: "".to_string(),
autostrand: "".to_string(),
split_strand: "uu".to_string(),
}
}
}
fn run() -> Result<()> {
// enable stack traces
std::env::set_var("RUST_BACKTRACE", "1");
let mut options = Options { ..Options::default() };
{
let mut ap = ArgumentParser::new();
ap.set_description("Convert a bam file into a bedgraph/bigwig file.");
ap.refer(&mut options.bamfile)
.add_argument("BAMFILE", Store, "Input BAM filename")
.required();
ap.refer(&mut options.split_exons)
.add_option(&["--split"],
StoreTrue,
"Use CIGAR string to split alignment into separate exons (default)")
.add_option(&["--nosplit"], StoreFalse, "");
ap.refer(&mut options.autostrand)
.add_option(&["--autostrand"],
Store,
"Attempt to determine the strandedness of the input data using an \
annotation file. Must be a .bam file.")
.metavar("ANNOT_BAMFILE");
ap.refer(&mut options.split_strand)
.add_option(&["--strand"],
Store,
"Split output bedgraph by strand: Possible values: u s r uu us ur su ss \
sr ru rs rr, first char is read1, second is read2, u=unstranded, \
s=stranded, r=reverse")
.metavar("[TYPE]");
ap.refer(&mut options.split_read)
.add_option(&["--read"],
StoreTrue,
"Split output bedgraph by read number")
.add_option(&["--noread"], StoreFalse, "(default)");
ap.refer(&mut options.zero)
.add_option(&["--zero"], StoreTrue, "Pad output bedgraph with zeroes")
.add_option(&["--nozero"], StoreFalse, "(default)");
ap.refer(&mut options.fixchr)
.add_option(&["--fixchr"],
StoreTrue,
"Transform chromosome names to be UCSC-compatible")
.add_option(&["--nofixchr"], StoreFalse, "(default)");
ap.refer(&mut options.paired_only)
.add_option(&["--paired"],
StoreTrue,
"Only output paired read alignments")
.add_option(&["--nopaired"], StoreFalse, "(default)");
ap.refer(&mut options.proper_only)
.add_option(&["--proper"],
StoreTrue,
"Only output proper-paired read alignments")
.add_option(&["--noproper"], StoreFalse, "(default)");
ap.refer(&mut options.primary_only)
.add_option(&["--primary"], StoreTrue, "Only output primary alignments")
.add_option(&["--noprimary"], StoreFalse, "(default)");
ap.refer(&mut options.bigwig)
.add_option(&["--bigwig"],
StoreTrue,
"Output bigwig files (requires bedGraphToBigWig in $PATH)")
.add_option(&["--nobigwig"], StoreFalse, "(default)");
ap.refer(&mut options.uniq)
.add_option(&["--uniq"],
StoreTrue,
"Keep only unique alignments (NH:i:1)")
.add_option(&["--nouniq"], StoreFalse, "(default)");
ap.refer(&mut options.out)
.add_option(&["--out"], Store, "Output file prefix")
.metavar("FILE");
ap.refer(&mut options.trackline)
.add_option(&["--trackline"],
StoreTrue,
"Output a UCSC track line (default)")
.add_option(&["--notrackline"], StoreFalse, "");
ap.refer(&mut options.trackname)
.add_option(&["--trackname"], Store, "Name of track for the track line")
.metavar("TRACKNAME");
if ap.parse(std::env::args().collect(),
&mut std::io::sink(),
&mut std::io::sink())
.is_err() {
let name = if std::env::args().count() > 0 {
std::env::args().nth(0).r()?
} else {
"unknown".to_string()
};
ap.print_help(&name, &mut stderr())?;
std::process::exit(1);
}
}
options.split_strand = options.split_strand.to_ascii_lowercase();
if options.split_strand.len() == 1 {
options.split_strand = options.split_strand + "u";
}
let regex = Regex::new(r"^[usr][usr]$")?;
if !regex.is_match(&options.split_strand) {
return Err(format!("Invalid value for split_strand: \"{}\": values must be \
one of: u s r uu us ur su ss sr ru rs rr",
options.split_strand)
.into());
}
// read in the annotation file
let mut intervals: Option<HashMap<String, intervaltree::IntervalTree<u8>>> = None;
if !options.autostrand.is_empty() {
if !Path::new(&options.autostrand).exists() {
return Err(format!("Autostrand Bam file {} could not be found!",
&options.autostrand)
.into());
}
let bam = match rust_htslib::bam::Reader::from_path(&options.autostrand) {
Ok(r) => Ok(r),
Err(_) => Err("BGZFError!"),
}?;
let header = bam.header();
let mut refs: Vec<(u32, String)> = Vec::new();
refs.resize(header.target_count() as usize, (0, "".to_string()));
let target_names = header.target_names();
for target_name in target_names {
let tid = header.tid(target_name).r()?;
let target_len = header.target_len(tid).r()?;
let target_name = std::str::from_utf8(target_name)?;
refs[tid as usize] = (target_len, target_name.to_string());
}
if options.fixchr {
for r in &mut refs {
let regex = Regex::new(r"^(chr|Zv9_)")?;
if regex.is_match(&r.1) {
let refname = r.1.to_string();
r.1.clear();
r.1.push_str(&format!("chr{}", refname));
}
}
}
let mut interval_lists: HashMap<String, Vec<intervaltree::Interval<u8>>> = HashMap::new();
let mut read = rust_htslib::bam::record::Record::new();
while bam.read(&mut read).is_ok() {
let chr = refs[read.tid() as usize].1.clone();
if !interval_lists.contains_key(&chr) {
interval_lists.insert(chr.clone(), Vec::new());
}
let mut exons: Vec<(i32, i32)> = Vec::new();
cigar2exons(&mut exons, &read.cigar(), read.pos())?;
if !exons.is_empty() {
let interval_list = interval_lists.get_mut(&chr).r()?;
interval_list.push(intervaltree::Interval::new(read.pos() + 1,
exons[exons.len() - 1].1,
if read.is_reverse() {
b'-'
} else {
b'+'
}));
}
}
for (chr, list) in &interval_lists {
if intervals.is_none() {
intervals = Some(HashMap::new());
}
let interval = intervals.as_mut().r()?;
interval.insert(chr.clone(), intervaltree::IntervalTree::new_from(list));
}
}
// // analyze the bam file and produce histograms
if !options.autostrand.is_empty() {
// make both stranded and unstranded files
analyze_bam(&options,
&options.split_strand,
!options.autostrand.is_empty(),
&intervals)?;
analyze_bam(&options, "uu", false, &intervals)?;
} else {
analyze_bam(&options,
&options.split_strand,
!options.autostrand.is_empty(),
&intervals)?;
}
Ok(())
}
fn main() {
let result = run();
if result.is_err() {
writeln!(stderr(),
"Backtrace: {:?}",
result.err().unwrap().backtrace())
.unwrap();
std::process::exit(1);
}
std::process::exit(0);
}
improve error handling
#![recursion_limit = "1024"]
#![cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity, trivial_regex))]
use std::ascii::AsciiExt;
use std::collections::HashMap;
use std::io::Write;
use std::io::BufWriter;
use std::io::stderr;
use std::fs::File;
use std::str;
use std::path::{PathBuf, Path};
use std::process::Command;
use std::vec::Vec;
extern crate argparse;
use argparse::{ArgumentParser, StoreTrue, StoreFalse, Store};
extern crate regex;
use regex::Regex;
extern crate rust_htslib;
use rust_htslib::bam::Read;
use rust_htslib::bam::record::Cigar;
use rust_htslib::bam::Reader;
#[macro_use]
extern crate error_chain;
mod intervaltree;
mod errors {
error_chain!{
foreign_links {
::std::io::Error, Io;
::std::str::Utf8Error, Utf8;
::regex::Error, Regex;
}
errors {
NoneError
}
}
pub trait ToResult<T> {
fn r(self) -> Result<T>;
}
impl<T> ToResult<T> for Option<T> {
fn r(self) -> Result<T> {
match self {
Some(v) => Ok(v),
None => Err(ErrorKind::NoneError.into()),
}
}
}
}
use errors::*;
use errors::ToResult;
fn cigar2exons(exons: &mut Vec<(i32, i32)>, cigar: &[Cigar], pos: i32) -> Result<()> {
let mut pos = pos;
for op in cigar {
match op {
&Cigar::Match(length) => {
pos += length as i32;
exons.push((pos - length as i32, pos));
Ok(())
}
&Cigar::RefSkip(length) |
&Cigar::Del(length) => {
pos += length as i32;
Ok(())
}
&Cigar::Ins(_) |
&Cigar::SoftClip(_) |
&Cigar::HardClip(_) |
&Cigar::Pad(_) => Ok(()),
c => Err(format!("Bad CIGAR string: {:?}", c)),
}?;
}
Ok(())
}
fn open_file(options: &Options,
read_number: i32,
strand: &str,
split_strand: &str,
fhs: &mut HashMap<String, Option<File>>)
-> Result<String> {
let mut prefix = PathBuf::new();
prefix.set_file_name(&options.bamfile);
prefix.set_extension("");
let track_name = vec![if !options.trackname.is_empty() {
options.trackname.clone()
} else {
let p = prefix.as_path().to_str().ok_or("Failed to parse path!")?;
p.to_string()
},
if options.split_read && read_number > 0 {
format!(".r{}", read_number)
} else {
"".to_string()
},
if split_strand != "uu" && !strand.is_empty() {
format!(".{}", strand)
} else {
"".to_string()
}]
.join("");
let filename = vec![if !options.out.is_empty() {
options.out.clone()
} else {
let p = prefix.as_path().to_str().ok_or("Failed to parse path!")?;
p.to_string()
},
if options.split_read && read_number > 0 {
format!(".r{}", read_number)
} else {
"".to_string()
},
if split_strand != "uu" && !strand.is_empty() {
format!(".{}", strand)
} else {
"".to_string()
},
".bedgraph".to_string()]
.join("");
// initialize the file if needed
if !fhs.contains_key(&filename) {
let mut f = File::create(&filename)?;
if options.trackline {
writeln!(f,
"track type=bedGraph name=\"{}\" description=\"{}\" visibility=full",
track_name,
track_name)?;
}
fhs.insert(filename.clone(), Some(f));
}
Ok(filename)
}
fn write_chr(options: &Options,
chr: &(u32, String),
histogram: &HashMap<(i32, String), Vec<i32>>,
fhs: &mut HashMap<String, Option<File>>,
split_strand: &str)
-> Result<()> {
for (key, histo) in histogram {
let read_number = key.0;
let strand = &key.1;
let filename = open_file(options, read_number, strand, split_strand, fhs)?;
let mut f = fhs.get_mut(&filename).r()?;
let file = f.as_mut().r()?;
let mut writer = BufWriter::new(file);
// scan the histogram to produce the bedgraph data
let mut start: usize = 0;
let mut end: usize = 0;
let ref_length: usize = chr.0 as usize;
while start < ref_length {
while (if end < histo.len() { histo[end] } else { 0 }) ==
(if start < histo.len() { histo[start] } else { 0 }) &&
end < ref_length {
end += 1
}
if options.zero || (if start < histo.len() { histo[start] } else { 0 }) > 0 {
writeln!(writer,
"{}\t{}\t{}\t{}",
chr.1,
start,
end,
if strand == "-" {
-histo[start]
} else {
histo[start]
})?;
}
start = end;
}
}
Ok(())
}
fn analyze_bam(options: &Options,
split_strand: &str,
autostrand_pass: bool,
intervals: &Option<HashMap<String, intervaltree::IntervalTree<u8>>>)
-> Result<()> {
if !Path::new(&options.bamfile).exists() {
return Err(format!("Bam file {} could not be found!", &options.bamfile).into());
}
let bam = (match Reader::from_path(&options.bamfile) {
Ok(reader) => Ok(reader),
Err(_) => Err("Error: BGZFError"),
})?;
let header = bam.header();
let mut refs: Vec<(u32, String)> = Vec::new();
refs.resize(header.target_count() as usize, (0, "".to_string()));
let target_names = header.target_names();
for target_name in target_names {
let tid = header.tid(target_name).r()?;
let target_len = header.target_len(tid).r()?;
let target_name = std::str::from_utf8(target_name)?;
refs[tid as usize] = (target_len, target_name.to_string());
}
if options.fixchr {
for r in &mut refs {
let regex = Regex::new(r"^(chr|Zv9_)")?;
if regex.is_match(&r.1) {
let refname = r.1.to_string();
r.1.clear();
r.1.push_str(&format!("chr{}", refname));
}
}
}
if autostrand_pass {
writeln!(stderr(),
"Running strand detection phase on {}",
options.bamfile)?;
} else {
writeln!(stderr(), "Building histograms for {}", options.bamfile)?;
}
// build a lookup map for the refseqs
let mut refmap: HashMap<String, usize> = HashMap::new();
for (i, _) in refs.iter().enumerate() {
refmap.insert(refs[i].1.to_string(), i);
}
let mut lastchr: i32 = -1;
let mut fhs: HashMap<String, Option<File>> = HashMap::new();
let mut histogram: HashMap<(i32, String), Vec<i32>> = HashMap::new();
let mut autostrand_totals: HashMap<char, i64> = HashMap::new();
autostrand_totals.insert('s', 0);
autostrand_totals.insert('r', 0);
let mut autostrand_totals2: HashMap<char, i64> = HashMap::new();
autostrand_totals2.insert('s', 0);
autostrand_totals2.insert('r', 0);
let mut read = rust_htslib::bam::record::Record::new();
while bam.read(&mut read).is_ok() {
// if we've hit a new chr, write out the bedgraph data and clear the histogram
if lastchr == -1 || read.tid() != lastchr {
if !autostrand_pass && !histogram.is_empty() && lastchr != -1 {
write_chr(options,
&refs[lastchr as usize],
&histogram,
&mut fhs,
split_strand)?;
}
histogram.clear();
lastchr = read.tid();
}
// skip this read if it's no good
let paired = read.is_paired();
let proper = read.is_proper_pair();
let primary = !read.is_secondary();
if (options.paired_only && !paired) || (options.primary_only && !primary) ||
(options.proper_only && !proper) {
continue;
}
// skip if it's not unique and we want unique alignments
if options.uniq {
let hits = read.aux("NH".to_string().as_bytes());
if hits == None || hits.r()?.integer() != 1 {
continue;
}
}
let mut exons: Vec<(i32, i32)> = Vec::new();
let mut get_exons: Vec<(i32, i32)> = Vec::new();
cigar2exons(&mut get_exons, &read.cigar(), read.pos())?;
if !options.split_exons && !get_exons.is_empty() {
let first = get_exons.get(0).r()?;
let last = get_exons.get(get_exons.len() - 1).r()?;
exons = vec![(first.0, last.1)];
}
let read_number = if read.is_secondary() { 2 } else { 1 };
// attempt to determine the strandedness of the transcript
// read numbers match, is not reverse, is not flipped
// let xs = read.aux("XS".as_bytes());
let strand =
//if xs.is_some() {
// str::from_utf8(xs.r()?.string())?
//} else
if read_number == 1 {
if split_strand.chars().nth(0).r()? == 'r' {
if read.is_reverse() { "+" } else { "-" }
} else if split_strand.chars().nth(0).r()? == 's' {
if read.is_reverse() { "-" } else { "+" }
} else {
""
}
} else if read_number == 2 {
if split_strand.chars().nth(1).r()? == 's' {
if read.is_reverse() { "-" } else { "+" }
} else if split_strand.chars().nth(1).r()? == 'r' {
if read.is_reverse() { "+" } else { "-" }
} else {
""
}
} else {
""
};
let read_num = if options.split_read { read_number } else { 0 };
let ref_length = refs[read.tid() as usize].0;
// add the read to the histogram
for exon in exons {
// try to determine the strandedness of the data
if autostrand_pass {
if intervals.is_some() {
let intervals = intervals.as_ref().r()?;
if intervals.contains_key(&refs[lastchr as usize].1) {
let mut overlapping_annot: Vec<intervaltree::Interval<u8>> = Vec::new();
intervals[&refs[lastchr as usize].1]
.find_overlapping(exon.0 + 1, exon.1, &mut overlapping_annot);
for interval in overlapping_annot {
let overlap_length = std::cmp::min(exon.1, interval.stop) -
std::cmp::max(exon.0, interval.start - 1);
let strandtype = if read.is_reverse() == (interval.value == b'-') {
's'
} else {
'r'
};
if read_number == 1 {
let at = autostrand_totals.get_mut(&strandtype).r()?;
*at += overlap_length as i64
} else if read_number == 2 {
let at2 = autostrand_totals2.get_mut(&strandtype).r()?;
*at2 += overlap_length as i64
}
}
}
}
} else {
let tuple = (read_num, strand.to_string());
if !histogram.contains_key(&tuple) {
histogram.insert(tuple.clone(), Vec::new());
}
// keep track of chromosome sizes
if ref_length < exon.1 as u32 {
refs[read.tid() as usize].0 = exon.1 as u32;
}
if histogram[&tuple].len() < ref_length as usize {
let h = histogram.get_mut(&tuple).r()?;
h.resize(ref_length as usize, 0);
}
for pos in exon.0..exon.1 {
let h = histogram.get_mut(&tuple).r()?;
(*h)[pos as usize] += 1;
}
}
}
}
if !autostrand_pass && !histogram.is_empty() && lastchr != -1 {
write_chr(options,
&refs[lastchr as usize],
&histogram,
&mut fhs,
split_strand)?;
}
// make sure empty files were created
if histogram.is_empty() && !autostrand_pass {
for read_number in if options.split_read {
vec![1, 2]
} else {
vec![0]
} {
for s in if split_strand != "uu" {
vec!["+", "-"]
} else {
vec![""]
} {
open_file(options, read_number, s, split_strand, &mut fhs)?;
}
}
}
// close the filehandles
for (_, fh) in &mut fhs {
*fh = None;
}
if autostrand_pass {
// get the read 1 and read2 totals
let mut total1: i64 = 0;
let mut total2: i64 = 0;
for i in &autostrand_totals {
total1 += *i.1;
}
for i in &autostrand_totals2 {
total2 += *i.1;
}
// figure out the best and second-best strand types for reads 1 and 2
let mut best1: (char, i64) = ('\0', 0);
let mut second_best1: (char, i64) = ('\0', 0);
let mut best2: (char, i64) = ('\0', 0);
let mut second_best2: (char, i64) = ('\0', 0);
for i in autostrand_totals {
if i.1 > 0 {
writeln!(stderr(),
"Total evidence for read 1 strand type {}: {}",
i.0,
i.1)?;
}
if best1.1 < i.1 {
second_best1 = best1;
best1 = i;
} else if second_best1.1 < i.1 {
second_best1 = i;
}
}
for i in autostrand_totals2 {
if i.1 > 0 {
writeln!(stderr(),
"Total evidence for read 2 strand type {}: {}",
i.0,
i.1)?;
}
if best2.1 < i.1 {
second_best2 = best2;
best2 = i;
} else if second_best2.1 < i.1 {
second_best2 = i;
}
}
let threshold: f64 = 0.0; //threshold isn't working, set to zero
let strand1 = if total1 > 0 {
if threshold < (best1.1 - second_best1.1) as f64 / (total1) as f64 {
best1.0
} else {
'u'
}
} else {
'u'
};
let strand2 = if total2 > 0 {
if threshold < (best2.1 - second_best2.1) as f64 / (total2) as f64 {
best2.0
} else {
'u'
}
} else {
'u'
};
let best_strand = format!("{}{}", strand1, strand2);
writeln!(stderr(),
"autostrand_pass found best strand type: {}",
best_strand)?;
// re-run analyzeBam with the strand type indicated
analyze_bam(options, &best_strand, false, intervals)?;
}
if !autostrand_pass && options.bigwig {
// Convert bedgraph file to bigwig file
for fh in &fhs {
// write the genome file for bigwigs
let genome_filename = format!("{}.genome", fh.0);
{
let mut genome_fh = File::create(&genome_filename)?;
for r in &refs {
writeln!(genome_fh, "{}\t{}", r.1, r.0)?;
}
}
// run bedGraphToBigWig
let regex = Regex::new(r"\.bedgraph$")?;
let bigwig_file = regex.replace(fh.0, ".bw");
let sorted_bedgraph = regex.replace(fh.0, ".sorted.bedgraph");
for command in vec![Command::new("sort")
.args(&["-k1,1", "-k2,2n", "-o", &sorted_bedgraph, fh.0])
.env("LC_COLLATE", "C"),
Command::new("bedGraphToBigWig")
.args(&[&sorted_bedgraph, &genome_filename, &bigwig_file])] {
let mut child = command.spawn()?;
let exit_code = child.wait()?;
if !exit_code.success() {
if exit_code.code().is_some() {
let code = exit_code.code().r()?;
return Err(format!("Nonzero exit code {} returned from \
command: {:?}",
code,
command)
.into());
} else {
return Err(format!("Command was interrupted: {:?}", command).into());
}
}
}
// remove the bedgraph file
std::fs::remove_file(fh.0)?;
// remove the sorted bedgraph file
std::fs::remove_file(sorted_bedgraph)?;
// remove the genome file
std::fs::remove_file(&genome_filename)?;
}
};
Ok(())
}
struct Options {
split_exons: bool,
split_read: bool,
zero: bool,
paired_only: bool,
proper_only: bool,
primary_only: bool,
trackline: bool,
bigwig: bool,
uniq: bool,
fixchr: bool,
bamfile: String,
trackname: String,
out: String,
autostrand: String,
split_strand: String,
}
impl Options {
fn default() -> Options {
Options {
split_exons: false,
split_read: false,
zero: false,
paired_only: false,
proper_only: false,
primary_only: false,
trackline: false,
bigwig: false,
uniq: false,
fixchr: false,
bamfile: "".to_string(),
trackname: "".to_string(),
out: "".to_string(),
autostrand: "".to_string(),
split_strand: "uu".to_string(),
}
}
}
fn run() -> Result<()> {
let mut options = Options { ..Options::default() };
{
let mut ap = ArgumentParser::new();
ap.set_description("Convert a bam file into a bedgraph/bigwig file.");
ap.refer(&mut options.bamfile)
.add_argument("BAMFILE", Store, "Input BAM filename")
.required();
ap.refer(&mut options.split_exons)
.add_option(&["--split"],
StoreTrue,
"Use CIGAR string to split alignment into separate exons (default)")
.add_option(&["--nosplit"], StoreFalse, "");
ap.refer(&mut options.autostrand)
.add_option(&["--autostrand"],
Store,
"Attempt to determine the strandedness of the input data using an \
annotation file. Must be a .bam file.")
.metavar("ANNOT_BAMFILE");
ap.refer(&mut options.split_strand)
.add_option(&["--strand"],
Store,
"Split output bedgraph by strand: Possible values: u s r uu us ur su ss \
sr ru rs rr, first char is read1, second is read2, u=unstranded, \
s=stranded, r=reverse")
.metavar("[TYPE]");
ap.refer(&mut options.split_read)
.add_option(&["--read"],
StoreTrue,
"Split output bedgraph by read number")
.add_option(&["--noread"], StoreFalse, "(default)");
ap.refer(&mut options.zero)
.add_option(&["--zero"], StoreTrue, "Pad output bedgraph with zeroes")
.add_option(&["--nozero"], StoreFalse, "(default)");
ap.refer(&mut options.fixchr)
.add_option(&["--fixchr"],
StoreTrue,
"Transform chromosome names to be UCSC-compatible")
.add_option(&["--nofixchr"], StoreFalse, "(default)");
ap.refer(&mut options.paired_only)
.add_option(&["--paired"],
StoreTrue,
"Only output paired read alignments")
.add_option(&["--nopaired"], StoreFalse, "(default)");
ap.refer(&mut options.proper_only)
.add_option(&["--proper"],
StoreTrue,
"Only output proper-paired read alignments")
.add_option(&["--noproper"], StoreFalse, "(default)");
ap.refer(&mut options.primary_only)
.add_option(&["--primary"], StoreTrue, "Only output primary alignments")
.add_option(&["--noprimary"], StoreFalse, "(default)");
ap.refer(&mut options.bigwig)
.add_option(&["--bigwig"],
StoreTrue,
"Output bigwig files (requires bedGraphToBigWig in $PATH)")
.add_option(&["--nobigwig"], StoreFalse, "(default)");
ap.refer(&mut options.uniq)
.add_option(&["--uniq"],
StoreTrue,
"Keep only unique alignments (NH:i:1)")
.add_option(&["--nouniq"], StoreFalse, "(default)");
ap.refer(&mut options.out)
.add_option(&["--out"], Store, "Output file prefix")
.metavar("FILE");
ap.refer(&mut options.trackline)
.add_option(&["--trackline"],
StoreTrue,
"Output a UCSC track line (default)")
.add_option(&["--notrackline"], StoreFalse, "");
ap.refer(&mut options.trackname)
.add_option(&["--trackname"], Store, "Name of track for the track line")
.metavar("TRACKNAME");
if ap.parse(std::env::args().collect(),
&mut std::io::sink(),
&mut std::io::sink())
.is_err() {
let name = if std::env::args().count() > 0 {
std::env::args().nth(0).r()?
} else {
"unknown".to_string()
};
ap.print_help(&name, &mut stderr())?;
std::process::exit(1);
}
}
options.split_strand = options.split_strand.to_ascii_lowercase();
if options.split_strand.len() == 1 {
options.split_strand = options.split_strand + "u";
}
let regex = Regex::new(r"^[usr][usr]$")?;
if !regex.is_match(&options.split_strand) {
return Err(format!("Invalid value for split_strand: \"{}\": values must be \
one of: u s r uu us ur su ss sr ru rs rr",
options.split_strand)
.into());
}
// read in the annotation file
let mut intervals: Option<HashMap<String, intervaltree::IntervalTree<u8>>> = None;
if !options.autostrand.is_empty() {
if !Path::new(&options.autostrand).exists() {
return Err(format!("Autostrand Bam file {} could not be found!",
&options.autostrand)
.into());
}
let bam = match rust_htslib::bam::Reader::from_path(&options.autostrand) {
Ok(r) => Ok(r),
Err(_) => Err("BGZFError!"),
}?;
let header = bam.header();
let mut refs: Vec<(u32, String)> = Vec::new();
refs.resize(header.target_count() as usize, (0, "".to_string()));
let target_names = header.target_names();
for target_name in target_names {
let tid = header.tid(target_name).r()?;
let target_len = header.target_len(tid).r()?;
let target_name = std::str::from_utf8(target_name)?;
refs[tid as usize] = (target_len, target_name.to_string());
}
if options.fixchr {
for r in &mut refs {
let regex = Regex::new(r"^(chr|Zv9_)")?;
if regex.is_match(&r.1) {
let refname = r.1.to_string();
r.1.clear();
r.1.push_str(&format!("chr{}", refname));
}
}
}
let mut interval_lists: HashMap<String, Vec<intervaltree::Interval<u8>>> = HashMap::new();
let mut read = rust_htslib::bam::record::Record::new();
while bam.read(&mut read).is_ok() {
let chr = refs[read.tid() as usize].1.clone();
if !interval_lists.contains_key(&chr) {
interval_lists.insert(chr.clone(), Vec::new());
}
let mut exons: Vec<(i32, i32)> = Vec::new();
cigar2exons(&mut exons, &read.cigar(), read.pos())?;
if !exons.is_empty() {
let interval_list = interval_lists.get_mut(&chr).r()?;
interval_list.push(intervaltree::Interval::new(read.pos() + 1,
exons[exons.len() - 1].1,
if read.is_reverse() {
b'-'
} else {
b'+'
}));
}
}
for (chr, list) in &interval_lists {
if intervals.is_none() {
intervals = Some(HashMap::new());
}
let interval = intervals.as_mut().r()?;
interval.insert(chr.clone(), intervaltree::IntervalTree::new_from(list));
}
}
// // analyze the bam file and produce histograms
if !options.autostrand.is_empty() {
// make both stranded and unstranded files
analyze_bam(&options,
&options.split_strand,
!options.autostrand.is_empty(),
&intervals)?;
analyze_bam(&options, "uu", false, &intervals)?;
} else {
analyze_bam(&options,
&options.split_strand,
!options.autostrand.is_empty(),
&intervals)?;
}
Ok(())
}
fn main() {
// enable stack traces
std::env::set_var("RUST_BACKTRACE", "1");
if let Err(ref e) = run() {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
::std::process::exit(1);
}
}
|
#![feature(core)]
#![feature(io)]
#![feature(net)]
#![feature(old_path)]
extern crate hyper;
extern crate "rustc-serialize" as rustc_serialize;
use std::io::prelude::*;
use std::fs::File;
use std::net::IpAddr;
use hyper::Server;
use hyper::server::Request;
use hyper::server::Response;
use hyper::net::Fresh;
use hyper::server::Handler;
use rustc_serialize::json::decode;
#[derive(RustcDecodable)]
pub struct HookConfiguration {
hooks: Vec<HookConfig>,
}
#[derive(RustcDecodable)]
pub struct HookConfig {
name: String,
action: HookAction,
}
#[derive(RustcDecodable)]
pub struct HookAction {
script: String,
pwd: String,
}
#[derive(RustcDecodable)]
pub struct GitHook {
before: String,
after: String,
repository: Repository,
}
#[derive(RustcDecodable)]
pub struct Repository {
name: String,
url: String,
}
pub struct Daemon {
config: HookConfiguration,
}
impl Daemon {
fn deploy(&self, hk: &HookConfig) {
println!("Processing {}", hk.name);
println!("{:?}", hk.action.script );
}
}
impl Handler for Daemon {
fn handle(&self, req: Request, res: Response<Fresh>) {
let mut s = String::new();
let mut myreq = req;
let err = myreq.read_to_string(&mut s);
// match err {
// Err(e) => println!("Failed to read file: {:?}", e),
// _ => {
// let decoded: GitHook = decode(s.as_slice()).unwrap();
// let repo_name = decoded.repository.name;
// println!("Repository {}", repo_name);
// match self.config.hooks.iter().filter(|&binding| binding.name == repo_name).next() {
// Some(hk) => self.deploy(hk),
// None => println!("No hook for {}", repo_name),
// }
// }
// }
let mut res = res.start().unwrap();
res.write_all(b"OK.").unwrap();
res.end().unwrap();
}
}
fn main() {
let mut json_config = String::new();
let config_location = &Path::new("config.json");
match File::open(config_location) {
Err(err) => panic!("Error during config file read: {:?}. {} {}",
config_location, err.description(), err.detail().unwrap_or("".to_string())),
Ok(icf) => {
let mut config_file = icf;
config_file.read_to_string(&mut json_config).ok().unwrap()
},
};
let config: HookConfiguration = match decode(json_config.as_slice()) {
Err(err) => panic!("{}", err),
Ok(content) => content,
};
let d = Daemon{config: config};
let port = 5000;
println!("Starting up, listening on port {}", port);
Server::new(d).listen(IpAddr::new_v4(127, 0, 0, 1), port).unwrap();
}
working code
#![feature(core)]
#![feature(io)]
#![feature(net)]
#![feature(old_path)]
extern crate hyper;
extern crate "rustc-serialize" as rustc_serialize;
use std::io::prelude::*;
use std::fs::File;
use std::net::IpAddr;
use hyper::Server;
use hyper::server::Request;
use hyper::server::Response;
use hyper::net::Fresh;
use hyper::server::Handler;
use rustc_serialize::json::decode;
#[derive(RustcDecodable)]
pub struct HookConfiguration {
hooks: Vec<HookConfig>,
}
#[derive(RustcDecodable)]
pub struct HookConfig {
name: String,
action: HookAction,
}
#[derive(RustcDecodable)]
pub struct HookAction {
script: String,
pwd: String,
}
#[derive(RustcDecodable)]
pub struct GitHook {
before: String,
after: String,
repository: Repository,
}
#[derive(RustcDecodable)]
pub struct Repository {
name: String,
url: String,
}
pub struct Daemon {
config: HookConfiguration,
}
impl Daemon {
fn deploy(&self, hk: &HookConfig) {
println!("Processing {}", hk.name);
println!("{:?}", hk.action.script );
}
}
impl Handler for Daemon {
fn handle(&self, req: Request, res: Response<Fresh>) {
let mut s = String::new();
let mut myreq = req;
match myreq.read_to_string(&mut s) {
Ok(_) => {
let decoded: GitHook = decode(s.as_slice()).unwrap();
let repo_name = decoded.repository.name;
println!("Repository {}", repo_name);
match self.config.hooks.iter().filter(|&binding| binding.name == repo_name).next() {
Some(hk) => self.deploy(hk),
None => println!("No hook for {}", repo_name),
}
},
_ => {}
}
let mut res = res.start().unwrap();
res.write_all(b"OK.").unwrap();
res.end().unwrap();
}
}
fn main() {
let mut json_config = String::new();
let config_location = &Path::new("config.json");
match File::open(config_location) {
Err(err) => panic!("Error during config file read: {:?}. {} {}",
config_location, err.description(), err.detail().unwrap_or("".to_string())),
Ok(icf) => {
let mut config_file = icf;
config_file.read_to_string(&mut json_config).ok().unwrap()
},
};
let config: HookConfiguration = match decode(json_config.as_slice()) {
Err(err) => panic!("{}", err),
Ok(content) => content,
};
let d = Daemon{config: config};
let port = 5000;
println!("Starting up, listening on port {}", port);
Server::new(d).listen(IpAddr::new_v4(127, 0, 0, 1), port).unwrap();
} |
extern crate byteorder;
extern crate camera_controllers;
extern crate docopt;
extern crate piston;
extern crate flate2;
extern crate fps_counter;
#[macro_use]
extern crate gfx;
extern crate gfx_device_gl;
extern crate gfx_voxel;
extern crate image;
extern crate libc;
extern crate memmap;
extern crate rustc_serialize;
extern crate sdl2;
extern crate sdl2_window;
extern crate shader_version;
extern crate time;
extern crate vecmath;
extern crate zip;
use gfx::traits::{Device, Stream, StreamFactory};
// Reexport modules from gfx_voxel while stuff is moving
// from Hematite to the library.
pub use gfx_voxel::{ array, cube };
use std::cell::RefCell;
use std::cmp::max;
use std::f32::consts::PI;
use std::f32::INFINITY;
use std::fs::File;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use array::*;
use docopt::Docopt;
use piston::event_loop::{ Events, EventLoop };
use flate2::read::GzDecoder;
use sdl2_window::Sdl2Window;
use shader::Renderer;
use vecmath::{ vec3_add, vec3_scale, vec3_normalized };
use piston::window::{ Size, Window, AdvancedWindow, OpenGLWindow,
WindowSettings };
pub mod minecraft;
pub mod chunk;
pub mod shader;
use minecraft::*;
use minecraft::biome::Biomes;
use minecraft::block_state::BlockStates;
static USAGE: &'static str = "
hematite, Minecraft made in Rust!
Usage:
hematite [options] <world>
Options:
-p, --path Fully qualified path for world folder.
--mcversion=<version> Minecraft version [default: 1.8.3].
";
#[derive(RustcDecodable)]
struct Args {
arg_world: String,
flag_path: bool,
flag_mcversion: String,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|dopt| dopt.decode())
.unwrap_or_else(|e| e.exit());
// Automagically pull MC assets
minecraft::fetch_assets(&args.flag_mcversion);
// Automagically expand path if world is located at
// $MINECRAFT_ROOT/saves/<world_name>
let world = if args.flag_path {
PathBuf::from(&args.arg_world)
} else {
let mut mc_path = minecraft::vanilla_root_path();
mc_path.push("saves");
mc_path.push(args.arg_world);
mc_path
};
let file_name = PathBuf::from(world.join("level.dat"));
let level_reader = GzDecoder::new(File::open(file_name).unwrap()).unwrap();
let level = minecraft::nbt::Nbt::from_reader(level_reader).unwrap();
println!("{:?}", level);
let player_pos: [f32; 3] = Array::from_iter(
level["Data"]["Player"]["Pos"]
.as_double_list().unwrap().iter().map(|&x| x as f32)
);
let player_chunk = [player_pos.x(), player_pos.z()]
.map(|x| (x / 16.0).floor() as i32);
let player_rot = level["Data"]["Player"]["Rotation"]
.as_float_list().unwrap();
let player_yaw = player_rot[0];
let player_pitch = player_rot[1];
let regions = player_chunk.map(|x| x >> 5);
let region_file = world.join(
format!("region/r.{}.{}.mca", regions[0], regions[1])
);
let region = minecraft::region::Region::open(®ion_file).unwrap();
let loading_title = format!(
"Hematite loading... - {}",
world.file_name().unwrap().to_str().unwrap()
);
let mut window: Sdl2Window = WindowSettings::new(
loading_title,
Size { width: 854, height: 480 })
.fullscreen(false)
.exit_on_esc(true)
.samples(0)
.vsync(false)
.build()
.unwrap();
let (mut device, mut factory) = gfx_device_gl::create(|s|
window.get_proc_address(s) as *const _
);
let Size { width: w, height: h } = window.size();
let frame = factory.make_fake_output(w as u16, h as u16);
let stream: gfx::OwnedStream<gfx_device_gl::Device, _> = factory.create_stream(frame);
let assets = Path::new("./assets");
// Load biomes.
let biomes = Biomes::load(&assets);
// Load block state definitions and models.
let block_states = BlockStates::load(&assets, &mut factory);
let mut renderer = Renderer::new(factory, stream, block_states.texture.handle());
let mut chunk_manager = chunk::ChunkManager::new();
println!("Started loading chunks...");
let c_bases = player_chunk.map(|x| max(0, (x & 0x1f) - 8) as u8);
for cz in c_bases[1]..c_bases[1] + 16 {
for cx in c_bases[0]..c_bases[0] + 16 {
match region.get_chunk_column(cx, cz) {
Some(column) => {
let (cx, cz) = (
cx as i32 + regions[0] * 32,
cz as i32 + regions[1] * 32
);
chunk_manager.add_chunk_column(cx, cz, column)
}
None => {}
}
}
}
println!("Finished loading chunks.");
let projection_mat = camera_controllers::CameraPerspective {
fov: 70.0,
near_clip: 0.1,
far_clip: 1000.0,
aspect_ratio: {
let Size { width: w, height: h } = window.size();
(w as f32) / (h as f32)
}
}.projection();
renderer.set_projection(projection_mat);
let mut first_person_settings = camera_controllers::FirstPersonSettings::keyboard_wasd();
first_person_settings.speed_horizontal = 8.0;
first_person_settings.speed_vertical = 4.0;
let mut first_person = camera_controllers::FirstPerson::new(
player_pos,
first_person_settings
);
first_person.yaw = PI - player_yaw / 180.0 * PI;
first_person.pitch = player_pitch / 180.0 * PI;
let mut fps_counter = fps_counter::FPSCounter::new();
let mut pending_chunks = vec![];
chunk_manager.each_chunk_and_neighbors(
|coords, buffer, chunks, column_biomes| {
pending_chunks.push((coords, buffer, chunks, column_biomes));
}
);
let mut capture_cursor = false;
println!("Press C to capture mouse");
let mut staging_buffer = vec![];
let ref window = Rc::new(RefCell::new(window));
for e in window.clone().events()
.ups(120)
.max_fps(10_000)
{
use piston::input::Button::Keyboard;
use piston::input::Input::{ Move, Press };
use piston::input::keyboard::Key;
use piston::input::Motion::MouseRelative;
use piston::input::Event;
match e {
Event::Render(_) => {
// Apply the same y/z camera offset vanilla minecraft has.
let mut camera = first_person.camera(0.0);
camera.position[1] += 1.62;
let mut xz_forward = camera.forward;
xz_forward[1] = 0.0;
xz_forward = vec3_normalized(xz_forward);
camera.position = vec3_add(
camera.position,
vec3_scale(xz_forward, 0.0)
);
let view_mat = camera.orthogonal();
renderer.set_view(view_mat);
renderer.clear();
let mut num_chunks: usize = 0;
let mut num_sorted_chunks: usize = 0;
let mut num_total_chunks: usize = 0;
let start_time = time::precise_time_ns();
chunk_manager.each_chunk(|cx, cy, cz, _, buffer| {
match buffer.borrow_mut().as_mut() {
Some(buffer) => {
num_total_chunks += 1;
let inf = INFINITY;
let mut bb_min = [inf, inf, inf];
let mut bb_max = [-inf, -inf, -inf];
let xyz = [cx, cy, cz].map(|x| x as f32 * 16.0);
for &dx in [0.0, 16.0].iter() {
for &dy in [0.0, 16.0].iter() {
for &dz in [0.0, 16.0].iter() {
use vecmath::col_mat4_transform;
let v = vec3_add(xyz, [dx, dy, dz]);
let xyzw = col_mat4_transform(view_mat, [v[0], v[1], v[2], 1.0]);
let v = col_mat4_transform(projection_mat, xyzw);
let xyz = vec3_scale([v[0], v[1], v[2]], 1.0 / v[3]);
bb_min = Array::from_fn(|i| bb_min[i].min(xyz[i]));
bb_max = Array::from_fn(|i| bb_max[i].max(xyz[i]));
}
}
}
let cull_bits: [bool; 3] = Array::from_fn(|i| {
let (min, max) = (bb_min[i], bb_max[i]);
min.signum() == max.signum()
&& min.abs().min(max.abs()) >= 1.0
});
if !cull_bits.iter().any(|&cull| cull) {
renderer.render(buffer);
num_chunks += 1;
if bb_min[0] < 0.0 && bb_max[0] > 0.0
|| bb_min[1] < 0.0 && bb_max[1] > 0.0 {
num_sorted_chunks += 1;
}
}
}
None => {}
}
});
let end_time = time::precise_time_ns();
renderer.stream.flush(&mut device);
let frame_end_time = time::precise_time_ns();
let fps = fps_counter.tick();
let title = format!(
"Hematite sort={} render={} total={} in {:.2}ms+{:.2}ms @ {}FPS - {}",
num_sorted_chunks,
num_chunks,
num_total_chunks,
(end_time - start_time) as f64 / 1e6,
(frame_end_time - end_time) as f64 / 1e6,
fps, world.file_name().unwrap().to_str().unwrap()
);
window.borrow_mut().set_title(title);
}
Event::AfterRender(_) => {
device.cleanup();
}
Event::Update(_) => {
use std::i32;
// HACK(eddyb) find the closest chunk to the player.
// The pending vector should be sorted instead.
let pp = first_person.position.map(|x| (x / 16.0).floor() as i32);
let closest = pending_chunks.iter().enumerate().fold(
(None, i32::max_value()),
|(best_i, best_dist), (i, &(cc, _, _, _))| {
let xyz = [cc[0] - pp[0], cc[1] - pp[1], cc[2] - pp[2]]
.map(|x| x * x);
let dist = xyz[0] + xyz[1] + xyz[2];
if dist < best_dist {
(Some(i), dist)
} else {
(best_i, best_dist)
}
}
).0;
let pending = closest.and_then(|i| {
// Vec swap_remove doesn't return Option anymore
match pending_chunks.len() {
0 => None,
_ => Some(pending_chunks.swap_remove(i))
}
});
match pending {
Some((coords, buffer, chunks, column_biomes)) => {
minecraft::block_state::fill_buffer(
&block_states, &biomes, &mut staging_buffer,
coords, chunks, column_biomes
);
*buffer.borrow_mut() = Some(
renderer.create_buffer(&staging_buffer[..])
);
staging_buffer.clear();
if pending_chunks.is_empty() {
println!("Finished filling chunk vertex buffers.");
}
}
None => {}
}
}
Event::Input(Press(Keyboard(Key::C))) => {
println!("Turned cursor capture {}",
if capture_cursor { "off" } else { "on" });
capture_cursor = !capture_cursor;
window.borrow_mut().set_capture_cursor(capture_cursor);
}
Event::Input(Move(MouseRelative(_, _))) => {
if !capture_cursor {
// Don't send the mouse event to the FPS controller.
continue;
}
}
_ => {}
}
first_person.event(&e);
}
}
Revert "Fixed Camera shifting Bug"
extern crate byteorder;
extern crate camera_controllers;
extern crate docopt;
extern crate piston;
extern crate flate2;
extern crate fps_counter;
#[macro_use]
extern crate gfx;
extern crate gfx_device_gl;
extern crate gfx_voxel;
extern crate image;
extern crate libc;
extern crate memmap;
extern crate rustc_serialize;
extern crate sdl2;
extern crate sdl2_window;
extern crate shader_version;
extern crate time;
extern crate vecmath;
extern crate zip;
use gfx::traits::{Device, Stream, StreamFactory};
// Reexport modules from gfx_voxel while stuff is moving
// from Hematite to the library.
pub use gfx_voxel::{ array, cube };
use std::cell::RefCell;
use std::cmp::max;
use std::f32::consts::PI;
use std::f32::INFINITY;
use std::fs::File;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use array::*;
use docopt::Docopt;
use piston::event_loop::{ Events, EventLoop };
use flate2::read::GzDecoder;
use sdl2_window::Sdl2Window;
use shader::Renderer;
use vecmath::{ vec3_add, vec3_scale, vec3_normalized };
use piston::window::{ Size, Window, AdvancedWindow, OpenGLWindow,
WindowSettings };
pub mod minecraft;
pub mod chunk;
pub mod shader;
use minecraft::*;
use minecraft::biome::Biomes;
use minecraft::block_state::BlockStates;
static USAGE: &'static str = "
hematite, Minecraft made in Rust!
Usage:
hematite [options] <world>
Options:
-p, --path Fully qualified path for world folder.
--mcversion=<version> Minecraft version [default: 1.8.3].
";
#[derive(RustcDecodable)]
struct Args {
arg_world: String,
flag_path: bool,
flag_mcversion: String,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|dopt| dopt.decode())
.unwrap_or_else(|e| e.exit());
// Automagically pull MC assets
minecraft::fetch_assets(&args.flag_mcversion);
// Automagically expand path if world is located at
// $MINECRAFT_ROOT/saves/<world_name>
let world = if args.flag_path {
PathBuf::from(&args.arg_world)
} else {
let mut mc_path = minecraft::vanilla_root_path();
mc_path.push("saves");
mc_path.push(args.arg_world);
mc_path
};
let file_name = PathBuf::from(world.join("level.dat"));
let level_reader = GzDecoder::new(File::open(file_name).unwrap()).unwrap();
let level = minecraft::nbt::Nbt::from_reader(level_reader).unwrap();
println!("{:?}", level);
let player_pos: [f32; 3] = Array::from_iter(
level["Data"]["Player"]["Pos"]
.as_double_list().unwrap().iter().map(|&x| x as f32)
);
let player_chunk = [player_pos.x(), player_pos.z()]
.map(|x| (x / 16.0).floor() as i32);
let player_rot = level["Data"]["Player"]["Rotation"]
.as_float_list().unwrap();
let player_yaw = player_rot[0];
let player_pitch = player_rot[1];
let regions = player_chunk.map(|x| x >> 5);
let region_file = world.join(
format!("region/r.{}.{}.mca", regions[0], regions[1])
);
let region = minecraft::region::Region::open(®ion_file).unwrap();
let loading_title = format!(
"Hematite loading... - {}",
world.file_name().unwrap().to_str().unwrap()
);
let mut window: Sdl2Window = WindowSettings::new(
loading_title,
Size { width: 854, height: 480 })
.fullscreen(false)
.exit_on_esc(true)
.samples(0)
.vsync(false)
.build()
.unwrap();
let (mut device, mut factory) = gfx_device_gl::create(|s|
window.get_proc_address(s) as *const _
);
let Size { width: w, height: h } = window.size();
let frame = factory.make_fake_output(w as u16, h as u16);
let stream: gfx::OwnedStream<gfx_device_gl::Device, _> = factory.create_stream(frame);
let assets = Path::new("./assets");
// Load biomes.
let biomes = Biomes::load(&assets);
// Load block state definitions and models.
let block_states = BlockStates::load(&assets, &mut factory);
let mut renderer = Renderer::new(factory, stream, block_states.texture.handle());
let mut chunk_manager = chunk::ChunkManager::new();
println!("Started loading chunks...");
let c_bases = player_chunk.map(|x| max(0, (x & 0x1f) - 8) as u8);
for cz in c_bases[1]..c_bases[1] + 16 {
for cx in c_bases[0]..c_bases[0] + 16 {
match region.get_chunk_column(cx, cz) {
Some(column) => {
let (cx, cz) = (
cx as i32 + regions[0] * 32,
cz as i32 + regions[1] * 32
);
chunk_manager.add_chunk_column(cx, cz, column)
}
None => {}
}
}
}
println!("Finished loading chunks.");
let projection_mat = camera_controllers::CameraPerspective {
fov: 70.0,
near_clip: 0.1,
far_clip: 1000.0,
aspect_ratio: {
let Size { width: w, height: h } = window.size();
(w as f32) / (h as f32)
}
}.projection();
renderer.set_projection(projection_mat);
let mut first_person_settings = camera_controllers::FirstPersonSettings::keyboard_wasd();
first_person_settings.speed_horizontal = 8.0;
first_person_settings.speed_vertical = 4.0;
let mut first_person = camera_controllers::FirstPerson::new(
player_pos,
first_person_settings
);
first_person.yaw = PI - player_yaw / 180.0 * PI;
first_person.pitch = player_pitch / 180.0 * PI;
let mut fps_counter = fps_counter::FPSCounter::new();
let mut pending_chunks = vec![];
chunk_manager.each_chunk_and_neighbors(
|coords, buffer, chunks, column_biomes| {
pending_chunks.push((coords, buffer, chunks, column_biomes));
}
);
let mut capture_cursor = false;
println!("Press C to capture mouse");
let mut staging_buffer = vec![];
let ref window = Rc::new(RefCell::new(window));
for e in window.clone().events()
.ups(120)
.max_fps(10_000)
{
use piston::input::Button::Keyboard;
use piston::input::Input::{ Move, Press };
use piston::input::keyboard::Key;
use piston::input::Motion::MouseRelative;
use piston::input::Event;
match e {
Event::Render(_) => {
// Apply the same y/z camera offset vanilla minecraft has.
let mut camera = first_person.camera(0.0);
camera.position[1] += 1.62;
let mut xz_forward = camera.forward;
xz_forward[1] = 0.0;
xz_forward = vec3_normalized(xz_forward);
camera.position = vec3_add(
camera.position,
vec3_scale(xz_forward, 0.1)
);
let view_mat = camera.orthogonal();
renderer.set_view(view_mat);
renderer.clear();
let mut num_chunks: usize = 0;
let mut num_sorted_chunks: usize = 0;
let mut num_total_chunks: usize = 0;
let start_time = time::precise_time_ns();
chunk_manager.each_chunk(|cx, cy, cz, _, buffer| {
match buffer.borrow_mut().as_mut() {
Some(buffer) => {
num_total_chunks += 1;
let inf = INFINITY;
let mut bb_min = [inf, inf, inf];
let mut bb_max = [-inf, -inf, -inf];
let xyz = [cx, cy, cz].map(|x| x as f32 * 16.0);
for &dx in [0.0, 16.0].iter() {
for &dy in [0.0, 16.0].iter() {
for &dz in [0.0, 16.0].iter() {
use vecmath::col_mat4_transform;
let v = vec3_add(xyz, [dx, dy, dz]);
let xyzw = col_mat4_transform(view_mat, [v[0], v[1], v[2], 1.0]);
let v = col_mat4_transform(projection_mat, xyzw);
let xyz = vec3_scale([v[0], v[1], v[2]], 1.0 / v[3]);
bb_min = Array::from_fn(|i| bb_min[i].min(xyz[i]));
bb_max = Array::from_fn(|i| bb_max[i].max(xyz[i]));
}
}
}
let cull_bits: [bool; 3] = Array::from_fn(|i| {
let (min, max) = (bb_min[i], bb_max[i]);
min.signum() == max.signum()
&& min.abs().min(max.abs()) >= 1.0
});
if !cull_bits.iter().any(|&cull| cull) {
renderer.render(buffer);
num_chunks += 1;
if bb_min[0] < 0.0 && bb_max[0] > 0.0
|| bb_min[1] < 0.0 && bb_max[1] > 0.0 {
num_sorted_chunks += 1;
}
}
}
None => {}
}
});
let end_time = time::precise_time_ns();
renderer.stream.flush(&mut device);
let frame_end_time = time::precise_time_ns();
let fps = fps_counter.tick();
let title = format!(
"Hematite sort={} render={} total={} in {:.2}ms+{:.2}ms @ {}FPS - {}",
num_sorted_chunks,
num_chunks,
num_total_chunks,
(end_time - start_time) as f64 / 1e6,
(frame_end_time - end_time) as f64 / 1e6,
fps, world.file_name().unwrap().to_str().unwrap()
);
window.borrow_mut().set_title(title);
}
Event::AfterRender(_) => {
device.cleanup();
}
Event::Update(_) => {
use std::i32;
// HACK(eddyb) find the closest chunk to the player.
// The pending vector should be sorted instead.
let pp = first_person.position.map(|x| (x / 16.0).floor() as i32);
let closest = pending_chunks.iter().enumerate().fold(
(None, i32::max_value()),
|(best_i, best_dist), (i, &(cc, _, _, _))| {
let xyz = [cc[0] - pp[0], cc[1] - pp[1], cc[2] - pp[2]]
.map(|x| x * x);
let dist = xyz[0] + xyz[1] + xyz[2];
if dist < best_dist {
(Some(i), dist)
} else {
(best_i, best_dist)
}
}
).0;
let pending = closest.and_then(|i| {
// Vec swap_remove doesn't return Option anymore
match pending_chunks.len() {
0 => None,
_ => Some(pending_chunks.swap_remove(i))
}
});
match pending {
Some((coords, buffer, chunks, column_biomes)) => {
minecraft::block_state::fill_buffer(
&block_states, &biomes, &mut staging_buffer,
coords, chunks, column_biomes
);
*buffer.borrow_mut() = Some(
renderer.create_buffer(&staging_buffer[..])
);
staging_buffer.clear();
if pending_chunks.is_empty() {
println!("Finished filling chunk vertex buffers.");
}
}
None => {}
}
}
Event::Input(Press(Keyboard(Key::C))) => {
println!("Turned cursor capture {}",
if capture_cursor { "off" } else { "on" });
capture_cursor = !capture_cursor;
window.borrow_mut().set_capture_cursor(capture_cursor);
}
Event::Input(Move(MouseRelative(_, _))) => {
if !capture_cursor {
// Don't send the mouse event to the FPS controller.
continue;
}
}
_ => {}
}
first_person.event(&e);
}
}
|
extern crate byteorder;
extern crate flate2;
use std::io;
use std::io::prelude::*;
use std::fs::File;
use std::mem;
use byteorder::{LittleEndian, WriteBytesExt};
use flate2::Compression;
use flate2::Flush;
use flate2::Compress;
use flate2::Status;
struct ScenHeader<'a> {
version: &'a[u8; 4],
header_type: i32,
timestamp: i32,
instructions: &'a str,
players: Vec<Player<'a>>,
filename: &'a str,
messages: ScenMessages<'a>,
image: ScenImage<'a>,
map_size: u32
}
struct Player<'a> {
name: &'a str,
active: u32,
human: u32,
civilization: u32,
resources: BaseResources
}
struct ScenMessages<'a> {
objectives: &'a str,
hints: &'a str,
scouts: &'a str,
history: &'a str,
victory: &'a str,
loss: &'a str,
}
struct ScenImage<'a> {
filename: &'a str,
included: bool,
width: i32,
height: i32,
include: i16
}
struct BaseResources {
gold: u32,
wood: u32,
food: u32,
stone: u32,
ore: u32,
}
struct MapTile {
terrain: u8,
elevation: u8,
}
impl<'a> ScenHeader<'a> {
fn to_bytes(&self) -> Result<Vec<u8>, io::Error> {
let mut buf = vec![];
let instructions_length = self.instructions.len() as i32;
let header_length = 20 + instructions_length;
try!(buf.write(self.version));
try!(buf.write_i32::<LittleEndian>(header_length));
try!(buf.write_i32::<LittleEndian>(self.header_type));
try!(buf.write_i32::<LittleEndian>(self.timestamp));
try!(buf.write_i32::<LittleEndian>(instructions_length));
try!(buf.write(self.instructions.as_bytes()));
try!(buf.write_i32::<LittleEndian>(0));
try!(buf.write_i32::<LittleEndian>(self.players.len() as i32));
let mut zlib_buf = vec![];
try!(zlib_buf.write_u32::<LittleEndian>(19246));
try!(zlib_buf.write_f32::<LittleEndian>(1.22 /* UserPatch */));
for i in 0..16 {
if self.players.len() > i {
let name = self.players[i].name;
try!(zlib_buf.write_all(name.as_bytes()));
try!(zlib_buf.write_all(&vec![0; 256 - name.len()]));
} else {
try!(zlib_buf.write_all(&vec![0; 256]));
}
}
for i in 0..16 {
if self.players.len() > i {
// player name ID in string table
try!(zlib_buf.write_i32::<LittleEndian>(0));
} else {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
}
for i in 0..16 {
if self.players.len() <= i {
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(4));
continue;
}
try!(zlib_buf.write_u32::<LittleEndian>(self.players[i].active));
try!(zlib_buf.write_u32::<LittleEndian>(self.players[i].human));
try!(zlib_buf.write_u32::<LittleEndian>(self.players[i].civilization));
try!(zlib_buf.write_u32::<LittleEndian>(4));
}
try!(zlib_buf.write_u32::<LittleEndian>(1));
try!(zlib_buf.write_all(&[0]));
try!(zlib_buf.write_f32::<LittleEndian>(-1.0));
try!(zlib_buf.write_u16::<LittleEndian>(self.filename.len() as u16));
try!(zlib_buf.write_all(self.filename.as_bytes()));
try!(zlib_buf.write_all(
&try!(self.messages.to_bytes())
));
// cinematics
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_all(
&try!(self.image.to_bytes())
));
for _ in 0..16 {
// two 0-length strings
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
}
// Player AI names
for _ in 0..8 {
try!(zlib_buf.write_u16::<LittleEndian>(0));
}
// Unused players
for _ in 0..8 {
let name = "RandomGame";
try!(zlib_buf.write_u16::<LittleEndian>(name.len() as u16));
try!(zlib_buf.write(&name.as_bytes()));
}
// AI source code
for _ in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
try!(zlib_buf.write_i32::<LittleEndian>(0));
// 0-length AI source code string
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// Source code for unused players
for _ in 0..(3 * 8) {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// AI type
for _ in 0..8 {
try!(zlib_buf.write(&[0]));
}
// Unused players
try!(zlib_buf.write(&[0; 8]));
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Resources
for i in 0..16 {
if self.players.len() > i {
let p = &self.players[i];
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.food));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.wood));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.gold));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.stone));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.ore));
try!(zlib_buf.write_u32::<LittleEndian>(0 /* ??? */));
}
else {
// Unused players
try!(zlib_buf.write(&vec![0; 6 * mem::size_of::<u32>()]));
}
}
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Scenario goals: 10 * int32
// Conquest; unknown; Relics; unknown; Exploration; unknown;
// All; Mode; Score; Time Limit
let scenario_goals_size = 10 * mem::size_of::<i32>();
try!(zlib_buf.write(&vec![0; scenario_goals_size]));
// Diplomacy
for fromPlayer in 0..16 {
for toPlayer in 0..16 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
}
// ???
try!(zlib_buf.write(&[0; 11520]));
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Allied victory
for player in 0..16 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// Technology count??
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Technology something??
for _ in 0..(16 * 30) {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Unit count??
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Unit something??
for _ in 0..(16 * 30) {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Building count??
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Buildings something??
for _ in 0..(16 * 20) {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// ???
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
// All Techs
try!(zlib_buf.write_u32::<LittleEndian>(0));
// Starting age
for _ in 0..8 {
try!(zlib_buf.write_u32::<LittleEndian>(0));
}
// Gaia
try!(zlib_buf.write_u32::<LittleEndian>(0));
// Unused
for _ in 1..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Camera
try!(zlib_buf.write_i32::<LittleEndian>(0 /* x */));
try!(zlib_buf.write_i32::<LittleEndian>(0 /* y */));
// AI type
try!(zlib_buf.write_u32::<LittleEndian>(0));
// Map tiles
try!(zlib_buf.write_u32::<LittleEndian>(self.map_size));
try!(zlib_buf.write_u32::<LittleEndian>(self.map_size));
for x in 0..self.map_size {
for y in 0..self.map_size {
try!(zlib_buf.write(
&try!(MapTile::new(((x + y) % 40) as u8, 1).to_bytes())
));
}
}
// Units sections
try!(zlib_buf.write_u32::<LittleEndian>(9));
// Resources again??
for i in 0..8 {
if self.players.len() > i {
let p = &self.players[i];
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.food as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.wood as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.gold as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.stone as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.ore as f32));
try!(zlib_buf.write_f32::<LittleEndian>(0.0 /* ??? */));
try!(zlib_buf.write_f32::<LittleEndian>(0.0 /* population */));
}
else {
// Unused players
try!(zlib_buf.write(&vec![0; 7 * mem::size_of::<f32>()]));
}
}
for i in 0..9 {
// Zero units
try!(zlib_buf.write_u32::<LittleEndian>(0));
// for unit in players[i].units:
// putFloat(unit.x)
// putFloat(unit.y)
// putFloat(unit.unknown1)
// putUInt32(unit.id)
// putUInt16(unit.type)
// putInt8(unit.unknown2)
// putFloat(unit.angle)
// putUInt16(unit.frame)
// putInt32(unit.inId)
}
// Playable players
try!(zlib_buf.write_u32::<LittleEndian>(9));
for player in 1..9 {
let name = "Promisory";
try!(zlib_buf.write_i16::<LittleEndian>(name.len() as i16));
try!(zlib_buf.write(&name.as_bytes()));
try!(zlib_buf.write_f32::<LittleEndian>(101.0 /* camera x */));
try!(zlib_buf.write_f32::<LittleEndian>(101.0 /* camera y */));
try!(zlib_buf.write_i16::<LittleEndian>(101 /* ?? */));
try!(zlib_buf.write_i16::<LittleEndian>(101 /* ?? */));
try!(zlib_buf.write(&[0])); // allied victory (again?)
// Diplomacy again
try!(zlib_buf.write_u16::<LittleEndian>(9));
try!(zlib_buf.write(&[0; 9]));
// Diplo to gaia?? from gaia?
for _ in 0..9 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// Player colour
try!(zlib_buf.write_u32::<LittleEndian>(player));
// ???
try!(zlib_buf.write_f32::<LittleEndian>(2.0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
// ???
try!(zlib_buf.write(&[0; 8 + 7]));
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
try!(zlib_buf.write_u32::<LittleEndian>(0x9999999A));
try!(zlib_buf.write_u32::<LittleEndian>(0x3FF99999));
try!(zlib_buf.write(&[0]));
// Triggers
try!(zlib_buf.write_i32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
let mut compressed_buf = vec![];
compressed_buf.reserve(zlib_buf.len());
let mut compressor = Compress::new(Compression::Default, false);
match compressor.compress_vec(&zlib_buf, &mut compressed_buf, Flush::Sync) {
Status::Ok => println!("hoi"),
Status::BufError => panic!("BufError"),
Status::StreamEnd => panic!("StreamEnd"),
};
println!("compressed: {} -> {}", zlib_buf.len(), compressed_buf.len());
buf.write_all(&compressed_buf);
Ok(buf)
}
}
impl<'a> Player<'a> {
fn empty<'b>() -> Player<'b> {
Player {
name: "",
active: 0,
human: 0,
civilization: 0,
resources: BaseResources {
wood: 0,
food: 0,
gold: 0,
stone: 0,
ore: 0,
},
}
}
}
impl<'a> ScenMessages<'a> {
fn to_bytes(&self) -> Result<Vec<u8>, io::Error> {
let mut buf = vec![];
for _ in 0..6 {
try!(buf.write_i32::<LittleEndian>(0));
}
try!(buf.write_u16::<LittleEndian>(self.objectives.len() as u16));
try!(buf.write(&self.objectives.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.hints.len() as u16));
try!(buf.write(&self.hints.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.victory.len() as u16));
try!(buf.write(&self.victory.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.loss.len() as u16));
try!(buf.write(&self.loss.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.history.len() as u16));
try!(buf.write(&self.history.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.scouts.len() as u16));
try!(buf.write(&self.scouts.as_bytes()));
Ok(buf)
}
}
impl <'a> ScenImage<'a> {
fn to_bytes(&self) -> Result<Vec<u8>, io::Error> {
let mut buf = vec![];
try!(buf.write_u16::<LittleEndian>(self.filename.len() as u16));
try!(buf.write(&self.filename.as_bytes()));
try!(buf.write_i32::<LittleEndian>(if self.included { 1 } else { 0 }));
try!(buf.write_i32::<LittleEndian>(self.width));
try!(buf.write_i32::<LittleEndian>(self.height));
try!(buf.write_i16::<LittleEndian>(self.include));
Ok(buf)
}
}
impl MapTile {
fn new(terrain: u8, elevation: u8) -> MapTile {
MapTile {
terrain: terrain,
elevation: elevation,
}
}
fn to_bytes(&self) -> Result<[u8; 3], io::Error> {
Ok([ self.terrain, self.elevation, 0 ])
}
}
fn test(filename: &str) -> Result<(), io::Error> {
let mut buf = try!(File::create(filename));
let header = ScenHeader {
version: b"1.21",
header_type: 2,
timestamp: 1451422223,
instructions: "Build a fancy-pants base!",
filename: filename,
players: vec![
Player {
name: "Hello World, from Rust!",
active: 1,
human: 2,
civilization: 1,
resources: BaseResources {
wood: 100,
food: 200,
gold: 300,
stone: 400,
ore: 0,
},
},
Player {
name: "Filthy Opponent",
active: 1,
human: 0,
civilization: 18,
resources: BaseResources {
wood: 200,
food: 200,
gold: 100,
stone: 200,
ore: 0,
},
},
],
messages: ScenMessages {
objectives: "",
hints: "",
scouts: "",
history: "",
victory: "",
loss: "",
},
image: ScenImage {
filename: "",
included: false,
width: 0,
height: 0,
include: 1,
},
map_size: 220,
};
buf.write_all(&try!(header.to_bytes())).map(|_| ())
}
fn main() {
match test("Test Scenario.scx") {
Ok(()) => (),
Err(e) => panic!("oops {}", e)
}
}
fix warnings
extern crate byteorder;
extern crate flate2;
use std::io;
use std::io::prelude::*;
use std::fs::File;
use std::mem;
use byteorder::{LittleEndian, WriteBytesExt};
use flate2::Compression;
use flate2::Flush;
use flate2::Compress;
use flate2::Status;
struct ScenHeader<'a> {
version: &'a[u8; 4],
header_type: i32,
timestamp: i32,
instructions: &'a str,
players: Vec<Player<'a>>,
filename: &'a str,
messages: ScenMessages<'a>,
image: ScenImage<'a>,
map_size: u32
}
struct Player<'a> {
name: &'a str,
active: u32,
human: u32,
civilization: u32,
resources: BaseResources
}
struct ScenMessages<'a> {
objectives: &'a str,
hints: &'a str,
scouts: &'a str,
history: &'a str,
victory: &'a str,
loss: &'a str,
}
struct ScenImage<'a> {
filename: &'a str,
included: bool,
width: i32,
height: i32,
include: i16
}
struct BaseResources {
gold: u32,
wood: u32,
food: u32,
stone: u32,
ore: u32,
}
struct MapTile {
terrain: u8,
elevation: u8,
}
impl<'a> ScenHeader<'a> {
fn to_bytes(&self) -> Result<Vec<u8>, io::Error> {
let mut buf = vec![];
let instructions_length = self.instructions.len() as i32;
let header_length = 20 + instructions_length;
try!(buf.write(self.version));
try!(buf.write_i32::<LittleEndian>(header_length));
try!(buf.write_i32::<LittleEndian>(self.header_type));
try!(buf.write_i32::<LittleEndian>(self.timestamp));
try!(buf.write_i32::<LittleEndian>(instructions_length));
try!(buf.write(self.instructions.as_bytes()));
try!(buf.write_i32::<LittleEndian>(0));
try!(buf.write_i32::<LittleEndian>(self.players.len() as i32));
let mut zlib_buf = vec![];
try!(zlib_buf.write_u32::<LittleEndian>(19246));
try!(zlib_buf.write_f32::<LittleEndian>(1.22 /* UserPatch */));
for i in 0..16 {
if self.players.len() > i {
let name = self.players[i].name;
try!(zlib_buf.write_all(name.as_bytes()));
try!(zlib_buf.write_all(&vec![0; 256 - name.len()]));
} else {
try!(zlib_buf.write_all(&vec![0; 256]));
}
}
for i in 0..16 {
if self.players.len() > i {
// player name ID in string table
try!(zlib_buf.write_i32::<LittleEndian>(0));
} else {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
}
for i in 0..16 {
if self.players.len() <= i {
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(4));
continue;
}
try!(zlib_buf.write_u32::<LittleEndian>(self.players[i].active));
try!(zlib_buf.write_u32::<LittleEndian>(self.players[i].human));
try!(zlib_buf.write_u32::<LittleEndian>(self.players[i].civilization));
try!(zlib_buf.write_u32::<LittleEndian>(4));
}
try!(zlib_buf.write_u32::<LittleEndian>(1));
try!(zlib_buf.write_all(&[0]));
try!(zlib_buf.write_f32::<LittleEndian>(-1.0));
try!(zlib_buf.write_u16::<LittleEndian>(self.filename.len() as u16));
try!(zlib_buf.write_all(self.filename.as_bytes()));
try!(zlib_buf.write_all(
&try!(self.messages.to_bytes())
));
// cinematics
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_all(
&try!(self.image.to_bytes())
));
for _ in 0..16 {
// two 0-length strings
try!(zlib_buf.write_u16::<LittleEndian>(0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
}
// Player AI names
for _ in 0..8 {
try!(zlib_buf.write_u16::<LittleEndian>(0));
}
// Unused players
for _ in 0..8 {
let name = "RandomGame";
try!(zlib_buf.write_u16::<LittleEndian>(name.len() as u16));
try!(zlib_buf.write(&name.as_bytes()));
}
// AI source code
for _ in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
try!(zlib_buf.write_i32::<LittleEndian>(0));
// 0-length AI source code string
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// Source code for unused players
for _ in 0..(3 * 8) {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// AI type
for _ in 0..8 {
try!(zlib_buf.write(&[0]));
}
// Unused players
try!(zlib_buf.write(&[0; 8]));
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Resources
for i in 0..16 {
if self.players.len() > i {
let p = &self.players[i];
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.food));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.wood));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.gold));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.stone));
try!(zlib_buf.write_u32::<LittleEndian>(p.resources.ore));
try!(zlib_buf.write_u32::<LittleEndian>(0 /* ??? */));
}
else {
// Unused players
try!(zlib_buf.write(&vec![0; 6 * mem::size_of::<u32>()]));
}
}
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Scenario goals: 10 * int32
// Conquest; unknown; Relics; unknown; Exploration; unknown;
// All; Mode; Score; Time Limit
let scenario_goals_size = 10 * mem::size_of::<i32>();
try!(zlib_buf.write(&vec![0; scenario_goals_size]));
// Diplomacy
for from_player in 0..16 {
for to_player in 0..16 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
}
// ???
try!(zlib_buf.write(&[0; 11520]));
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Allied victory
for player in 0..16 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// Technology count??
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Technology something??
for _ in 0..(16 * 30) {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Unit count??
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Unit something??
for _ in 0..(16 * 30) {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Building count??
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
for player in 0..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Buildings something??
for _ in 0..(16 * 20) {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// ???
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
// All Techs
try!(zlib_buf.write_u32::<LittleEndian>(0));
// Starting age
for _ in 0..8 {
try!(zlib_buf.write_u32::<LittleEndian>(0));
}
// Gaia
try!(zlib_buf.write_u32::<LittleEndian>(0));
// Unused
for _ in 1..8 {
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
// Separator
try!(zlib_buf.write_u32::<LittleEndian>(0xFFFFFF9D));
// Camera
try!(zlib_buf.write_i32::<LittleEndian>(0 /* x */));
try!(zlib_buf.write_i32::<LittleEndian>(0 /* y */));
// AI type
try!(zlib_buf.write_u32::<LittleEndian>(0));
// Map tiles
try!(zlib_buf.write_u32::<LittleEndian>(self.map_size));
try!(zlib_buf.write_u32::<LittleEndian>(self.map_size));
for x in 0..self.map_size {
for y in 0..self.map_size {
try!(zlib_buf.write(
&try!(MapTile::new(((x + y) % 40) as u8, 1).to_bytes())
));
}
}
// Units sections
try!(zlib_buf.write_u32::<LittleEndian>(9));
// Resources again??
for i in 0..8 {
if self.players.len() > i {
let p = &self.players[i];
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.food as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.wood as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.gold as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.stone as f32));
try!(zlib_buf.write_f32::<LittleEndian>(p.resources.ore as f32));
try!(zlib_buf.write_f32::<LittleEndian>(0.0 /* ??? */));
try!(zlib_buf.write_f32::<LittleEndian>(0.0 /* population */));
}
else {
// Unused players
try!(zlib_buf.write(&vec![0; 7 * mem::size_of::<f32>()]));
}
}
for i in 0..9 {
// Zero units
try!(zlib_buf.write_u32::<LittleEndian>(0));
// for unit in players[i].units:
// putFloat(unit.x)
// putFloat(unit.y)
// putFloat(unit.unknown1)
// putUInt32(unit.id)
// putUInt16(unit.type)
// putInt8(unit.unknown2)
// putFloat(unit.angle)
// putUInt16(unit.frame)
// putInt32(unit.inId)
}
// Playable players
try!(zlib_buf.write_u32::<LittleEndian>(9));
for player in 1..9 {
let name = "Promisory";
try!(zlib_buf.write_i16::<LittleEndian>(name.len() as i16));
try!(zlib_buf.write(&name.as_bytes()));
try!(zlib_buf.write_f32::<LittleEndian>(101.0 /* camera x */));
try!(zlib_buf.write_f32::<LittleEndian>(101.0 /* camera y */));
try!(zlib_buf.write_i16::<LittleEndian>(101 /* ?? */));
try!(zlib_buf.write_i16::<LittleEndian>(101 /* ?? */));
try!(zlib_buf.write(&[0])); // allied victory (again?)
// Diplomacy again
try!(zlib_buf.write_u16::<LittleEndian>(9));
try!(zlib_buf.write(&[0; 9]));
// Diplo to gaia?? from gaia?
for _ in 0..9 {
try!(zlib_buf.write_i32::<LittleEndian>(0));
}
// Player colour
try!(zlib_buf.write_u32::<LittleEndian>(player));
// ???
try!(zlib_buf.write_f32::<LittleEndian>(2.0));
try!(zlib_buf.write_u16::<LittleEndian>(0));
// ???
try!(zlib_buf.write(&[0; 8 + 7]));
try!(zlib_buf.write_i32::<LittleEndian>(-1));
}
try!(zlib_buf.write_u32::<LittleEndian>(0x9999999A));
try!(zlib_buf.write_u32::<LittleEndian>(0x3FF99999));
try!(zlib_buf.write(&[0]));
// Triggers
try!(zlib_buf.write_i32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
try!(zlib_buf.write_u32::<LittleEndian>(0));
let mut compressed_buf = vec![];
compressed_buf.reserve(zlib_buf.len());
let mut compressor = Compress::new(Compression::Default, false);
match compressor.compress_vec(&zlib_buf, &mut compressed_buf, Flush::Sync) {
Status::Ok => println!("hoi"),
Status::BufError => panic!("BufError"),
Status::StreamEnd => panic!("StreamEnd"),
};
println!("compressed: {} -> {}", zlib_buf.len(), compressed_buf.len());
try!(buf.write_all(&compressed_buf));
Ok(buf)
}
}
impl<'a> Player<'a> {
fn empty<'b>() -> Player<'b> {
Player {
name: "",
active: 0,
human: 0,
civilization: 0,
resources: BaseResources {
wood: 0,
food: 0,
gold: 0,
stone: 0,
ore: 0,
},
}
}
}
impl<'a> ScenMessages<'a> {
fn to_bytes(&self) -> Result<Vec<u8>, io::Error> {
let mut buf = vec![];
for _ in 0..6 {
try!(buf.write_i32::<LittleEndian>(0));
}
try!(buf.write_u16::<LittleEndian>(self.objectives.len() as u16));
try!(buf.write(&self.objectives.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.hints.len() as u16));
try!(buf.write(&self.hints.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.victory.len() as u16));
try!(buf.write(&self.victory.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.loss.len() as u16));
try!(buf.write(&self.loss.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.history.len() as u16));
try!(buf.write(&self.history.as_bytes()));
try!(buf.write_u16::<LittleEndian>(self.scouts.len() as u16));
try!(buf.write(&self.scouts.as_bytes()));
Ok(buf)
}
}
impl <'a> ScenImage<'a> {
fn to_bytes(&self) -> Result<Vec<u8>, io::Error> {
let mut buf = vec![];
try!(buf.write_u16::<LittleEndian>(self.filename.len() as u16));
try!(buf.write(&self.filename.as_bytes()));
try!(buf.write_i32::<LittleEndian>(if self.included { 1 } else { 0 }));
try!(buf.write_i32::<LittleEndian>(self.width));
try!(buf.write_i32::<LittleEndian>(self.height));
try!(buf.write_i16::<LittleEndian>(self.include));
Ok(buf)
}
}
impl MapTile {
fn new(terrain: u8, elevation: u8) -> MapTile {
MapTile {
terrain: terrain,
elevation: elevation,
}
}
fn to_bytes(&self) -> Result<[u8; 3], io::Error> {
Ok([ self.terrain, self.elevation, 0 ])
}
}
fn test(filename: &str) -> Result<(), io::Error> {
let mut buf = try!(File::create(filename));
let header = ScenHeader {
version: b"1.21",
header_type: 2,
timestamp: 1451422223,
instructions: "Build a fancy-pants base!",
filename: filename,
players: vec![
Player {
name: "Hello World, from Rust!",
active: 1,
human: 2,
civilization: 1,
resources: BaseResources {
wood: 100,
food: 200,
gold: 300,
stone: 400,
ore: 0,
},
},
Player {
name: "Filthy Opponent",
active: 1,
human: 0,
civilization: 18,
resources: BaseResources {
wood: 200,
food: 200,
gold: 100,
stone: 200,
ore: 0,
},
},
],
messages: ScenMessages {
objectives: "",
hints: "",
scouts: "",
history: "",
victory: "",
loss: "",
},
image: ScenImage {
filename: "",
included: false,
width: 0,
height: 0,
include: 1,
},
map_size: 220,
};
buf.write_all(&try!(header.to_bytes())).map(|_| ())
}
fn main() {
match test("Test Scenario.scx") {
Ok(()) => (),
Err(e) => panic!("oops {}", e)
}
}
|
fn main() {
println!("Hello, world!");
}
Basic server, doesn't yet respond
use std::net::TcpListener;
use std::io::Read;
fn main() {
match TcpListener::bind("127.0.0.1:0") {
Ok(listener) => {
println!("{:?}", listener);
loop {
match listener.accept() {
Ok((mut stream, addr)) => {
println!("{:?}@{}", stream, addr);
let mut contents: Vec<u8> = Vec::new();
match stream.read_to_end(&mut contents) {
Ok(read) =>
println!("{} = \"{:?}\"", read, contents),
Err(error) =>
println!("Couldn't read from the stream: {}", error),
}
}
Err(error) =>
println!("Couldn't accept a connection: {}", error)
};
}
}
Err(error) => {
println!("Couldn't open the listener: {}", error);
}
}
}
|
#![feature(slice_bytes)]
extern crate libc;
extern crate errno;
extern crate rustc_serialize; //debug hex dump
use std::io::Write;
use rustc_serialize::hex::ToHex;
const ETH_P_ALL: u16 = 0x0003;
const ETH_P_ARP: u16 = 0x0806;
const IFNAMSIZ: usize = 16; // net/if.h
const SIOCGIFINDEX: libc::c_int = 0x8933;
const RECV_BUF_LEN: usize = 1542;
#[allow(non_camel_case_types)]
struct ifreq {
ifr_name: [u8; IFNAMSIZ],
ifr_ifindex: libc::c_int
}
fn ifindex_from_ifname(ifname: &str, sock: libc::c_int) -> libc::c_int {
let mut ifr = ifreq {
ifr_name: [0; IFNAMSIZ],
ifr_ifindex: 0
};
std::slice::bytes::copy_memory(ifname.as_bytes(), &mut ifr.ifr_name);
if unsafe { libc::funcs::bsd44::ioctl(sock, SIOCGIFINDEX, &ifr) } == -1 {
let err = errno::errno();
panic!("Error getting ifindex: {} ({})", err, err.0);
}
return ifr.ifr_ifindex;
}
fn main() {
// tcpdump -dd arp and incoming
let bpf_filter_arp_incoming: [sock_filter; 6] = [
sock_filter { code: 0x28, jt: 0, jf: 0, k: 0x0000000c },
sock_filter { code: 0x15, jt: 0, jf: 3, k: 0x00000806 },
sock_filter { code: 0x28, jt: 0, jf: 0, k: 0xfffff004 },
sock_filter { code: 0x15, jt: 1, jf: 0, k: 0x00000004 },
sock_filter { code: 0x06, jt: 0, jf: 0, k: 0x00040000 },
sock_filter { code: 0x06, jt: 0, jf: 0, k: 0x00000000 },
];
let bpf_filter_arp_incoming_prog: sock_fprog = sock_fprog {
len: 6,
filter: bpf_filter_arp_incoming.as_ptr()
};
let mut args = std::env::args();
if args.len() != 3 {
std::io::stderr().write("Usage: arpmasqd LISTEN_IFACE SEND_ADDR\n".as_ref()).unwrap();
panic!("number of arguments");
}
args.next();
//unwrap is ok because arg count has been checked
let listen_iface = args.next().unwrap();
let send_addr = args.next().unwrap();
let listen_socket = unsafe { libc::socket(libc::AF_PACKET, libc::SOCK_RAW, ETH_P_ALL.to_be() as i32) };
if listen_socket == -1 {
let err = errno::errno();
println!("maybe try: sudo setcap CAP_NET_RAW+eip arpmasqd");
panic!("Error opening socket: {} ({})", err, err.0);
}
let listen_sockaddr = libc::sockaddr_ll {
sll_family: libc::AF_PACKET as u16,
sll_protocol: ETH_P_ALL.to_be(),
sll_ifindex: ifindex_from_ifname(&listen_iface, listen_socket),
sll_hatype: 0,
sll_pkttype: 0,
sll_halen: 0,
sll_addr: [0; 8]
};
let bind_result = unsafe { libc::bind(listen_socket, std::mem::transmute(&listen_sockaddr), std::mem::size_of_val(&listen_sockaddr) as u32) };
if bind_result == -1 {
let err = errno::errno();
panic!("Error binding socket: {} ({})", err, err.0);
}
let buf = [0u8; RECV_BUF_LEN];
let mut recv_sockaddr = unsafe { std::mem::zeroed::<libc::sockaddr_ll>() };
let mut recv_sockaddr_len: u32 = std::mem::size_of_val(&recv_sockaddr) as u32;
let recv_result = unsafe { libc::recvfrom(listen_socket, std::mem::transmute(&buf), RECV_BUF_LEN as u64, 0, std::mem::transmute(&recv_sockaddr), &mut recv_sockaddr_len) };
if recv_result == -1 {
let err = errno::errno();
panic!("Error in recvfrom: {} ({})", err, err.0);
}
println!("From: {}", recv_sockaddr.sll_addr[0..6].to_hex());
println!("Data: {}", buf[0..(recv_result as usize)].to_hex());
}
added BPF filter
#![feature(slice_bytes)]
extern crate libc;
extern crate errno;
extern crate rustc_serialize; //debug hex dump
use std::io::Write;
use rustc_serialize::hex::ToHex;
const ETH_P_ALL: u16 = 0x0003;
const ETH_P_ARP: u16 = 0x0806;
const IFNAMSIZ: usize = 16; // net/if.h
const SIOCGIFINDEX: libc::c_int = 0x8933;
const RECV_BUF_LEN: usize = 1542;
const SO_ATTACH_FILTER: libc::c_int = 26;
#[repr(C)]
#[allow(non_camel_case_types)]
struct sock_filter { /* Filter block */
code: u16, /* Actual filter code */
jt: u8, /* Jump true */
jf: u8, /* Jump false */
k: u32 /* Generic multiuse field */
}
#[repr(C)]
#[allow(non_camel_case_types)]
#[derive(Debug)]
struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
len: u16, /* Number of filter blocks */
filter: *const sock_filter
}
#[repr(C)]
#[allow(non_camel_case_types)]
struct ifreq {
ifr_name: [u8; IFNAMSIZ],
ifr_ifindex: libc::c_int
}
fn ifindex_from_ifname(ifname: &str, sock: libc::c_int) -> libc::c_int {
let mut ifr = ifreq {
ifr_name: [0; IFNAMSIZ],
ifr_ifindex: 0
};
std::slice::bytes::copy_memory(ifname.as_bytes(), &mut ifr.ifr_name);
if unsafe { libc::funcs::bsd44::ioctl(sock, SIOCGIFINDEX, &ifr) } == -1 {
let err = errno::errno();
panic!("Error getting ifindex: {} ({})", err, err.0);
}
return ifr.ifr_ifindex;
}
fn main() {
// tcpdump -dd arp and incoming
let bpf_filter_arp_incoming: [sock_filter; 6] = [
sock_filter { code: 0x28, jt: 0, jf: 0, k: 0x0000000c },
sock_filter { code: 0x15, jt: 0, jf: 3, k: 0x00000806 },
sock_filter { code: 0x28, jt: 0, jf: 0, k: 0xfffff004 },
sock_filter { code: 0x15, jt: 1, jf: 0, k: 0x00000004 },
sock_filter { code: 0x06, jt: 0, jf: 0, k: 0x00040000 },
sock_filter { code: 0x06, jt: 0, jf: 0, k: 0x00000000 },
];
let bpf_filter_arp_incoming_prog: sock_fprog = sock_fprog {
len: 6,
filter: bpf_filter_arp_incoming.as_ptr()
};
let mut args = std::env::args();
if args.len() != 3 {
std::io::stderr().write("Usage: arpmasqd LISTEN_IFACE SEND_ADDR\n".as_ref()).unwrap();
panic!("number of arguments");
}
args.next();
//unwrap is ok because arg count has been checked
let listen_iface = args.next().unwrap();
let send_addr = args.next().unwrap();
let listen_socket = unsafe { libc::socket(libc::AF_PACKET, libc::SOCK_RAW, ETH_P_ALL.to_be() as i32) };
if listen_socket == -1 {
let err = errno::errno();
println!("maybe try: sudo setcap CAP_NET_RAW+eip arpmasqd");
panic!("Error opening socket: {} ({})", err, err.0);
}
let attach_filter_res = unsafe { libc::setsockopt(listen_socket, libc::SOL_SOCKET, SO_ATTACH_FILTER, std::mem::transmute(&bpf_filter_arp_incoming_prog), std::mem::size_of_val(&bpf_filter_arp_incoming_prog) as u32) };
if attach_filter_res == -1 {
let err = errno::errno();
panic!("Error attaching filter: {} ({})", err, err.0);
}
let listen_sockaddr = libc::sockaddr_ll {
sll_family: libc::AF_PACKET as u16,
sll_protocol: ETH_P_ALL.to_be(),
sll_ifindex: ifindex_from_ifname(&listen_iface, listen_socket),
sll_hatype: 0,
sll_pkttype: 0,
sll_halen: 0,
sll_addr: [0; 8]
};
let bind_result = unsafe { libc::bind(listen_socket, std::mem::transmute(&listen_sockaddr), std::mem::size_of_val(&listen_sockaddr) as u32) };
if bind_result == -1 {
let err = errno::errno();
panic!("Error binding socket: {} ({})", err, err.0);
}
let buf = [0u8; RECV_BUF_LEN];
let mut recv_sockaddr = unsafe { std::mem::zeroed::<libc::sockaddr_ll>() };
let mut recv_sockaddr_len: u32 = std::mem::size_of_val(&recv_sockaddr) as u32;
let recv_result = unsafe { libc::recvfrom(listen_socket, std::mem::transmute(&buf), RECV_BUF_LEN as u64, 0, std::mem::transmute(&recv_sockaddr), &mut recv_sockaddr_len) };
if recv_result == -1 {
let err = errno::errno();
panic!("Error in recvfrom: {} ({})", err, err.0);
}
println!("From: {}", recv_sockaddr.sll_addr[0..6].to_hex());
println!("Data: {}", buf[0..(recv_result as usize)].to_hex());
}
|
use csv;
use indicatif;
use lazy_static;
use prettytable;
use regex;
use rusqlite;
use rustyline;
use std::env;
use std::error::Error;
use std::fs::File;
fn _normalize_col(col: &str) -> String {
lazy_static::lazy_static! {
static ref RE: regex::Regex = regex::Regex::new(r"\(.*?\)").unwrap();
}
RE.replace_all(col, "")
.to_lowercase()
.trim()
.replace(" ", "_")
.replace(".", "_")
.replace("-", "_")
.replace("/", "_")
.replace("?", "")
}
fn _create_table(db: &mut rusqlite::Connection, table_name: &str, cols: &[String]) {
let create_columns = cols
.iter()
.map(|c| format!("{} varchar", c))
.collect::<Vec<String>>()
.join(", ");
db.execute(
&format!("CREATE TABLE {} ({})", table_name, create_columns),
&[] as &[&dyn rusqlite::types::ToSql],
)
.unwrap();
}
fn _load_table_from_path(
db: &mut rusqlite::Connection,
table_name: &str,
path: String,
) -> Vec<String> {
let mut num_rows = 0;
let f = File::open(path).unwrap();
let file_size = f.metadata().unwrap().len();
let mut reader = csv::Reader::from_reader(f);
let normalized_cols =
reader
.headers()
.unwrap()
.iter()
.map(_normalize_col)
.fold(vec![], |mut v, orig_col| {
let mut col = orig_col.clone();
let mut i = 1;
while v.contains(&col) {
col = format!("{}_{}", orig_col, i);
i += 1
}
v.push(col);
v
});
_create_table(db, table_name, &normalized_cols);
let insert_query = format!(
"INSERT INTO {} VALUES ({})",
table_name,
normalized_cols
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(", ")
);
let pb = indicatif::ProgressBar::new(file_size);
pb.set_style(
indicatif::ProgressStyle::default_bar()
.template("[{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})")
.progress_chars("#>-"),
);
let mut records = reader.records();
let tx = db.transaction().unwrap();
{
let mut stmt = tx.prepare(&insert_query).unwrap();
while let Some(row) = records.next() {
stmt.execute(&row.unwrap()).unwrap();
num_rows += 1;
if num_rows % 10000 == 0 {
pb.set_position(records.reader().position().byte())
}
}
}
tx.commit().unwrap();
pb.finish();
println!(
"Loaded {} rows into {}({})",
num_rows,
table_name,
normalized_cols.join(", "),
);
normalized_cols
}
struct FromAnySqlType {
value: String,
}
impl rusqlite::types::FromSql for FromAnySqlType {
fn column_result(
value: rusqlite::types::ValueRef<'_>,
) -> Result<FromAnySqlType, rusqlite::types::FromSqlError> {
let result = match value {
rusqlite::types::ValueRef::Null => "null".to_string(),
rusqlite::types::ValueRef::Integer(v) => v.to_string(),
rusqlite::types::ValueRef::Real(v) => v.to_string(),
rusqlite::types::ValueRef::Blob(v) | rusqlite::types::ValueRef::Text(v) => {
String::from_utf8(v.to_vec()).unwrap()
}
};
Ok(FromAnySqlType { value: result })
}
}
fn _prepare_query<'a>(
conn: &'a mut rusqlite::Connection,
query: &str,
) -> Result<rusqlite::Statement<'a>, String> {
conn.prepare(&query).map_err(|e| e.description().to_owned())
}
fn _handle_query(conn: &mut rusqlite::Connection, line: &str) -> Result<(), String> {
let mut stmt = _prepare_query(conn, line)?;
let mut table = prettytable::Table::new();
let mut title_row = prettytable::Row::new(vec![]);
for col in stmt.column_names() {
title_row.add_cell(prettytable::Cell::new(col));
}
table.set_titles(title_row);
table.set_format(*prettytable::format::consts::FORMAT_NO_LINESEP_WITH_TITLE);
let mut results = stmt.query(&[] as &[&dyn rusqlite::types::ToSql]).unwrap();
while let Ok(Some(r)) = results.next() {
let mut row = prettytable::Row::new(vec![]);
for i in 0..r.column_count() {
let cell: FromAnySqlType = r.get(i).unwrap();
row.add_cell(prettytable::Cell::new(&cell.value));
}
table.add_row(row);
}
table.printstd();
Ok(())
}
fn _handle_export(conn: &mut rusqlite::Connection, line: &str) -> Result<(), String> {
lazy_static::lazy_static! {
static ref RE: regex::Regex = regex::Regex::new(r"^\.export\(([\w_\-\./]+)\) (.*)").unwrap();
}
let caps = RE
.captures(line)
.ok_or_else(|| "Must match `.export(file-name) SQL`".to_owned())?;
let destination_path = &caps[1];
let query = &caps[2];
let mut stmt = _prepare_query(conn, query)?;
let mut writer = csv::Writer::from_path(destination_path).unwrap();
writer.write_record(stmt.column_names()).unwrap();
let mut results = stmt.query(&[] as &[&dyn rusqlite::types::ToSql]).unwrap();
while let Ok(Some(r)) = results.next() {
writer
.write_record((0..r.column_count()).map(|i| {
let cell: FromAnySqlType = r.get(i).unwrap();
cell.value
}))
.unwrap();
}
Ok(())
}
fn _process_query(conn: &mut rusqlite::Connection, line: &str) {
let result = if line.starts_with(".export") {
_handle_export(conn, line)
} else {
_handle_query(conn, line)
};
if let Err(e) = result {
println!("{}", e);
}
}
struct SimpleWordCompleter {
words: Vec<String>,
}
static BREAK_CHARS: [u8; 4] = [b' ', b'(', b')', b','];
impl SimpleWordCompleter {
fn new(words: Vec<String>) -> SimpleWordCompleter {
SimpleWordCompleter { words }
}
}
impl rustyline::Helper for SimpleWordCompleter {}
impl rustyline::hint::Hinter for SimpleWordCompleter {
fn hint(&self, _line: &str, _pos: usize, _ctx: &rustyline::Context<'_>) -> Option<String> {
None
}
}
impl rustyline::highlight::Highlighter for SimpleWordCompleter {}
impl rustyline::completion::Completer for SimpleWordCompleter {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<String>)> {
let (start, word) = rustyline::completion::extract_word(line, pos, None, &BREAK_CHARS);
let matches = self
.words
.iter()
.filter(|w| w.starts_with(word))
.cloned()
.collect();
Ok((start, matches))
}
}
fn main() {
let mut paths = env::args().skip(1);
let mut conn = rusqlite::Connection::open_in_memory().unwrap();
let mut base_words = [
"distinct", "select", "from", "group", "by", "order", "where", "count", "limit", "offset",
".export",
]
.iter()
.map(|s| s.to_string())
.collect::<Vec<String>>();
if paths.len() == 1 {
let mut col_names = _load_table_from_path(&mut conn, "t", paths.next().unwrap());
base_words.append(&mut col_names);
} else {
for (idx, path) in paths.enumerate() {
let mut col_names = _load_table_from_path(&mut conn, &format!("t{}", idx + 1), path);
base_words.append(&mut col_names);
}
}
let completer = SimpleWordCompleter::new(base_words);
let mut rl = rustyline::Editor::new();
rl.set_helper(Some(completer));
loop {
match rl.readline("> ") {
Ok(line) => {
if line.trim().is_empty() {
continue;
}
_process_query(&mut conn, &line);
rl.add_history_entry(line);
}
Err(rustyline::error::ReadlineError::Interrupted) => {
println!("Interrupted");
continue;
}
Err(rustyline::error::ReadlineError::Eof) => {
break;
}
Err(err) => {
println!("Error: {}", err);
break;
}
}
}
}
Handle errors slightly better
use csv;
use indicatif;
use lazy_static;
use prettytable;
use regex;
use rusqlite;
use rustyline;
use std::env;
use std::error::Error;
use std::fs::File;
fn _normalize_col(col: &str) -> String {
lazy_static::lazy_static! {
static ref RE: regex::Regex = regex::Regex::new(r"\(.*?\)").unwrap();
}
RE.replace_all(col, "")
.to_lowercase()
.trim()
.replace(" ", "_")
.replace(".", "_")
.replace("-", "_")
.replace("/", "_")
.replace("?", "")
}
fn _create_table(db: &mut rusqlite::Connection, table_name: &str, cols: &[String]) {
let create_columns = cols
.iter()
.map(|c| format!("{} varchar", c))
.collect::<Vec<String>>()
.join(", ");
db.execute(
&format!("CREATE TABLE {} ({})", table_name, create_columns),
&[] as &[&dyn rusqlite::types::ToSql],
)
.unwrap();
}
fn _load_table_from_path(
db: &mut rusqlite::Connection,
table_name: &str,
path: String,
) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let mut num_rows = 0;
let f = File::open(path)?;
let file_size = f.metadata().unwrap().len();
let mut reader = csv::Reader::from_reader(f);
let normalized_cols =
reader
.headers()
.unwrap()
.iter()
.map(_normalize_col)
.fold(vec![], |mut v, orig_col| {
let mut col = orig_col.clone();
let mut i = 1;
while v.contains(&col) {
col = format!("{}_{}", orig_col, i);
i += 1
}
v.push(col);
v
});
_create_table(db, table_name, &normalized_cols);
let insert_query = format!(
"INSERT INTO {} VALUES ({})",
table_name,
normalized_cols
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(", ")
);
let pb = indicatif::ProgressBar::new(file_size);
pb.set_style(
indicatif::ProgressStyle::default_bar()
.template("[{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})")
.progress_chars("#>-"),
);
let mut records = reader.records();
let tx = db.transaction().unwrap();
{
let mut stmt = tx.prepare(&insert_query).unwrap();
while let Some(row) = records.next() {
stmt.execute(&row.unwrap()).unwrap();
num_rows += 1;
if num_rows % 10000 == 0 {
pb.set_position(records.reader().position().byte())
}
}
}
tx.commit().unwrap();
pb.finish();
println!(
"Loaded {} rows into {}({})",
num_rows,
table_name,
normalized_cols.join(", "),
);
Ok(normalized_cols)
}
struct FromAnySqlType {
value: String,
}
impl rusqlite::types::FromSql for FromAnySqlType {
fn column_result(
value: rusqlite::types::ValueRef<'_>,
) -> Result<FromAnySqlType, rusqlite::types::FromSqlError> {
let result = match value {
rusqlite::types::ValueRef::Null => "null".to_string(),
rusqlite::types::ValueRef::Integer(v) => v.to_string(),
rusqlite::types::ValueRef::Real(v) => v.to_string(),
rusqlite::types::ValueRef::Blob(v) | rusqlite::types::ValueRef::Text(v) => {
String::from_utf8(v.to_vec()).unwrap()
}
};
Ok(FromAnySqlType { value: result })
}
}
fn _prepare_query<'a>(
conn: &'a mut rusqlite::Connection,
query: &str,
) -> Result<rusqlite::Statement<'a>, String> {
conn.prepare(&query).map_err(|e| e.description().to_owned())
}
fn _handle_query(conn: &mut rusqlite::Connection, line: &str) -> Result<(), String> {
let mut stmt = _prepare_query(conn, line)?;
let mut table = prettytable::Table::new();
let mut title_row = prettytable::Row::new(vec![]);
for col in stmt.column_names() {
title_row.add_cell(prettytable::Cell::new(col));
}
table.set_titles(title_row);
table.set_format(*prettytable::format::consts::FORMAT_NO_LINESEP_WITH_TITLE);
let mut results = stmt.query(&[] as &[&dyn rusqlite::types::ToSql]).unwrap();
while let Ok(Some(r)) = results.next() {
let mut row = prettytable::Row::new(vec![]);
for i in 0..r.column_count() {
let cell: FromAnySqlType = r.get(i).unwrap();
row.add_cell(prettytable::Cell::new(&cell.value));
}
table.add_row(row);
}
table.printstd();
Ok(())
}
fn _handle_export(conn: &mut rusqlite::Connection, line: &str) -> Result<(), String> {
lazy_static::lazy_static! {
static ref RE: regex::Regex = regex::Regex::new(r"^\.export\(([\w_\-\./]+)\) (.*)").unwrap();
}
let caps = RE
.captures(line)
.ok_or_else(|| "Must match `.export(file-name) SQL`".to_owned())?;
let destination_path = &caps[1];
let query = &caps[2];
let mut stmt = _prepare_query(conn, query)?;
let mut writer = csv::Writer::from_path(destination_path).unwrap();
writer.write_record(stmt.column_names()).unwrap();
let mut results = stmt.query(&[] as &[&dyn rusqlite::types::ToSql]).unwrap();
while let Ok(Some(r)) = results.next() {
writer
.write_record((0..r.column_count()).map(|i| {
let cell: FromAnySqlType = r.get(i).unwrap();
cell.value
}))
.unwrap();
}
Ok(())
}
fn _process_query(conn: &mut rusqlite::Connection, line: &str) {
let result = if line.starts_with(".export") {
_handle_export(conn, line)
} else {
_handle_query(conn, line)
};
if let Err(e) = result {
println!("{}", e);
}
}
struct SimpleWordCompleter {
words: Vec<String>,
}
static BREAK_CHARS: [u8; 4] = [b' ', b'(', b')', b','];
impl SimpleWordCompleter {
fn new(words: Vec<String>) -> SimpleWordCompleter {
SimpleWordCompleter { words }
}
}
impl rustyline::Helper for SimpleWordCompleter {}
impl rustyline::hint::Hinter for SimpleWordCompleter {
fn hint(&self, _line: &str, _pos: usize, _ctx: &rustyline::Context<'_>) -> Option<String> {
None
}
}
impl rustyline::highlight::Highlighter for SimpleWordCompleter {}
impl rustyline::completion::Completer for SimpleWordCompleter {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<String>)> {
let (start, word) = rustyline::completion::extract_word(line, pos, None, &BREAK_CHARS);
let matches = self
.words
.iter()
.filter(|w| w.starts_with(word))
.cloned()
.collect();
Ok((start, matches))
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut paths = env::args().skip(1);
let mut conn = rusqlite::Connection::open_in_memory().unwrap();
let mut base_words = [
"distinct", "select", "from", "group", "by", "order", "where", "count", "limit", "offset",
".export",
]
.iter()
.map(|s| s.to_string())
.collect::<Vec<String>>();
if paths.len() == 1 {
let mut col_names = _load_table_from_path(&mut conn, "t", paths.next().unwrap())?;
base_words.append(&mut col_names);
} else {
for (idx, path) in paths.enumerate() {
let mut col_names = _load_table_from_path(&mut conn, &format!("t{}", idx + 1), path)?;
base_words.append(&mut col_names);
}
}
let completer = SimpleWordCompleter::new(base_words);
let mut rl = rustyline::Editor::new();
rl.set_helper(Some(completer));
loop {
match rl.readline("> ") {
Ok(line) => {
if line.trim().is_empty() {
continue;
}
_process_query(&mut conn, &line);
rl.add_history_entry(line);
}
Err(rustyline::error::ReadlineError::Interrupted) => {
println!("Interrupted");
continue;
}
Err(rustyline::error::ReadlineError::Eof) => {
break;
}
Err(err) => {
println!("Error: {}", err);
break;
}
}
}
Ok(())
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate js;
extern crate libc;
extern crate rustc_serialize;
extern crate mio;
use std::ffi::CStr;
use std::ptr;
use std::str;
use std::io::prelude::*;
use std::fs::File;
use js::{JSCLASS_RESERVED_SLOTS_MASK,JSCLASS_RESERVED_SLOTS_SHIFT,JSCLASS_GLOBAL_SLOT_COUNT,JSCLASS_IS_GLOBAL};
use js::jsapi::JS_GlobalObjectTraceHook;
use js::jsapi::{CallArgs,CompartmentOptions,OnNewGlobalHookOption,Rooted,Value};
use js::jsapi::{JS_DefineFunction,JS_Init,JS_InitStandardClasses,JS_NewGlobalObject,JS_EncodeStringToUTF8,JS_ReportError,JS_ReportPendingException,JS_CallFunctionName,CurrentGlobalOrNull,JS_SetReservedSlot,JS_GetReservedSlot};
use js::jsapi::{JSAutoCompartment,JSAutoRequest,JSContext,JSClass};
use js::jsapi::{JS_SetGCParameter, JSGCParamKey, JSGCMode};
use js::jsapi::{RootedValue, HandleObject, HandleValue, HandleValueArray};
use js::jsval::{UndefinedValue,DoubleValue,PrivateValue};
use js::rust::Runtime;
use rustc_serialize::json;
use mio::{EventLoop, Handler};
static CLASS: &'static JSClass = &JSClass {
name: b"test\0" as *const u8 as *const libc::c_char,
flags: JSCLASS_IS_GLOBAL | ((JSCLASS_GLOBAL_SLOT_COUNT & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT),
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: None,
resolve: None,
convert: None,
finalize: None,
call: None,
hasInstance: None,
construct: None,
trace: Some(JS_GlobalObjectTraceHook),
reserved: [0 as *mut _; 25]
};
#[derive(RustcDecodable, RustcEncodable)]
struct Timeout {
timestamp: u64,
timeout: u64
}
struct EventLoopHandler {
rt: Runtime
}
impl Handler for EventLoopHandler {
type Timeout = u64;
type Message = ();
fn timeout(&mut self, event_loop: &mut EventLoop<EventLoopHandler>, timestamp: u64) {
let cx = self.rt.cx();
let _ar = JSAutoRequest::new(cx);
unsafe {
let global = CurrentGlobalOrNull(cx);
let mut rval = Rooted::new(cx, UndefinedValue());
assert!(!global.is_null());
let global_root = Rooted::new(cx, global);
let elems = [DoubleValue(timestamp as f64)];
let args = HandleValueArray{ length_: 1, elements_: &elems as *const Value };
JS_CallFunctionName(cx, global_root.handle(), b"_recv\0".as_ptr() as *const libc::c_char, &args, rval.handle_mut());
}
//event_loop.shutdown();
}
}
fn callback(cx: *mut JSContext, message: &str) {
let timeout_msg: Timeout = json::decode(message).unwrap();
let _ar = JSAutoRequest::new(cx);
unsafe {
let global = CurrentGlobalOrNull(cx);
assert!(!global.is_null());
//let _ac = JSAutoCompartment::new(cx, global);
let value = JS_GetReservedSlot(global, 0);
assert!(!value.is_undefined());
let event_loop = value.to_private() as *mut EventLoop<EventLoopHandler>;
(*event_loop).timeout_ms(timeout_msg.timestamp, timeout_msg.timeout);
};
}
fn main() {
unsafe {
JS_Init();
}
let runtime = Runtime::new();
let context = runtime.cx();
let h_option = OnNewGlobalHookOption::FireOnNewGlobalHook;
let c_option = CompartmentOptions::default();
let _ar = JSAutoRequest::new(context);
let global = unsafe { JS_NewGlobalObject(context, CLASS, ptr::null_mut(), h_option, &c_option) };
let global_root = Rooted::new(context, global);
let global = global_root.handle();
let _ac = JSAutoCompartment::new(context, global.get());
unsafe {
JS_SetGCParameter(runtime.rt(), JSGCParamKey::JSGC_MODE, JSGCMode::JSGC_MODE_INCREMENTAL as u32);
JS_InitStandardClasses(context, global);
let send_fn = JS_DefineFunction(context, global, b"_send\0".as_ptr() as *const libc::c_char,
Some(send), 1, 0);
assert!(!send_fn.is_null());
let print_fn = JS_DefineFunction(context, global, b"_print\0".as_ptr() as *const libc::c_char,
Some(print), 1, 0);
assert!(!print_fn.is_null());
}
let mut event_loop = EventLoop::new().unwrap();
let mut boxed_event_loop = Box::new(event_loop);
let mut handler = EventLoopHandler { rt: runtime };
let box_ptr = Box::into_raw(boxed_event_loop);
unsafe {
JS_SetReservedSlot(global.get(), 0,
PrivateValue(box_ptr as *const libc::c_void));
boxed_event_loop = Box::from_raw(box_ptr);
}
let mut f = File::open("src/bootstrap.js").unwrap();
let mut source = String::new();
f.read_to_string(&mut source);
handler.rt.evaluate_script(global, source, "bootstrap.js".to_string(), 1);
let _ = &boxed_event_loop.run(&mut handler);
}
unsafe extern "C" fn send(cx: *mut JSContext, argc: u32, vp: *mut Value) -> bool {
let args = CallArgs::from_vp(vp, argc);
if args._base.argc_ != 1 {
JS_ReportError(cx, b"_send() requires exactly 1 argument\0".as_ptr() as *const libc::c_char);
return false;
}
let arg = args.get(0);
let js = js::rust::ToString(cx, arg);
let message_root = Rooted::new(cx, js);
let message = JS_EncodeStringToUTF8(cx, message_root.handle());
let message = CStr::from_ptr(message);
callback(cx, str::from_utf8(message.to_bytes()).unwrap());
args.rval().set(UndefinedValue());
return true;
}
unsafe extern "C" fn print(cx: *mut JSContext, argc: u32, vp: *mut Value) -> bool {
let args = CallArgs::from_vp(vp, argc);
let output = (0..args._base.argc_)
.map(|i| fmt_js_value(cx, args.get(i)))
.collect::<Vec<String>>()
.join(" ");
println!("{}", output);
args.rval().set(UndefinedValue());
return true;
}
fn fmt_js_value(cx: *mut JSContext, val: HandleValue) -> String {
let js = unsafe { js::rust::ToString(cx, val) };
let message_root = Rooted::new(cx, js);
let message = unsafe { JS_EncodeStringToUTF8(cx, message_root.handle()) };
let message = unsafe { CStr::from_ptr(message) };
String::from(str::from_utf8(message.to_bytes()).unwrap())
}
Fix warnings and add error messages
extern crate js;
extern crate libc;
extern crate rustc_serialize;
extern crate mio;
use std::ffi::CStr;
use std::ptr;
use std::str;
use std::io::prelude::*;
use std::fs::File;
use js::{JSCLASS_RESERVED_SLOTS_MASK,JSCLASS_RESERVED_SLOTS_SHIFT,JSCLASS_GLOBAL_SLOT_COUNT,JSCLASS_IS_GLOBAL};
use js::jsapi::JS_GlobalObjectTraceHook;
use js::jsapi::{CallArgs,CompartmentOptions,OnNewGlobalHookOption,Rooted,Value};
use js::jsapi::{JS_DefineFunction,JS_Init,JS_InitStandardClasses,JS_NewGlobalObject,JS_EncodeStringToUTF8,JS_ReportError,JS_ReportPendingException,JS_CallFunctionName,CurrentGlobalOrNull,JS_SetReservedSlot,JS_GetReservedSlot};
use js::jsapi::{JSAutoCompartment,JSAutoRequest,JSContext,JSClass};
use js::jsapi::{JS_SetGCParameter,JSGCParamKey,JSGCMode};
use js::jsapi::{HandleValue,HandleValueArray};
use js::jsval::{UndefinedValue,DoubleValue,PrivateValue};
use js::rust::Runtime;
use rustc_serialize::json;
use mio::{EventLoop,Handler};
static CLASS: &'static JSClass = &JSClass {
name: b"Global\0" as *const u8 as *const libc::c_char,
flags: JSCLASS_IS_GLOBAL | ((JSCLASS_GLOBAL_SLOT_COUNT & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT),
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: None,
resolve: None,
convert: None,
finalize: None,
call: None,
hasInstance: None,
construct: None,
trace: Some(JS_GlobalObjectTraceHook),
reserved: [0 as *mut _; 25]
};
#[derive(RustcDecodable, RustcEncodable)]
struct Timeout {
timestamp: u64,
timeout: u64
}
struct EventLoopHandler {
rt: Runtime
}
impl Handler for EventLoopHandler {
type Timeout = u64;
type Message = ();
fn timeout(&mut self, event_loop: &mut EventLoop<EventLoopHandler>, timestamp: u64) {
let cx = self.rt.cx();
let _ar = JSAutoRequest::new(cx);
unsafe {
let global = CurrentGlobalOrNull(cx);
let mut rval = Rooted::new(cx, UndefinedValue());
assert!(!global.is_null());
let global_root = Rooted::new(cx, global);
let elems = [DoubleValue(timestamp as f64)];
let args = HandleValueArray{ length_: 1, elements_: &elems as *const Value };
JS_CallFunctionName(cx, global_root.handle(), b"_recv\0".as_ptr() as *const libc::c_char, &args, rval.handle_mut());
}
//event_loop.shutdown();
}
}
fn callback(cx: *mut JSContext, message: &str) {
let timeout_msg: Timeout = json::decode(message).unwrap();
let _ar = JSAutoRequest::new(cx);
unsafe {
let global = CurrentGlobalOrNull(cx);
assert!(!global.is_null());
let value = JS_GetReservedSlot(global, 0);
assert!(!value.is_undefined());
let event_loop = value.to_private() as *mut EventLoop<EventLoopHandler>;
let _ = (*event_loop).timeout_ms(timeout_msg.timestamp, timeout_msg.timeout);
};
}
fn main() {
unsafe {
JS_Init();
}
let runtime = Runtime::new();
let cx = runtime.cx();
let h_option = OnNewGlobalHookOption::FireOnNewGlobalHook;
let c_option = CompartmentOptions::default();
let _ar = JSAutoRequest::new(cx);
let global = unsafe { JS_NewGlobalObject(cx, CLASS, ptr::null_mut(), h_option, &c_option) };
let global_root = Rooted::new(cx, global);
let global = global_root.handle();
let _ac = JSAutoCompartment::new(cx, global.get());
unsafe {
JS_SetGCParameter(runtime.rt(), JSGCParamKey::JSGC_MODE, JSGCMode::JSGC_MODE_INCREMENTAL as u32);
JS_InitStandardClasses(cx, global);
let send_fn = JS_DefineFunction(cx, global, b"_send\0".as_ptr() as *const libc::c_char,
Some(send), 1, 0);
assert!(!send_fn.is_null());
let print_fn = JS_DefineFunction(cx, global, b"_print\0".as_ptr() as *const libc::c_char,
Some(print), 1, 0);
assert!(!print_fn.is_null());
}
let event_loop = EventLoop::new().unwrap();
let mut boxed_event_loop = Box::new(event_loop);
let mut handler = EventLoopHandler { rt: runtime };
let box_ptr = Box::into_raw(boxed_event_loop);
unsafe {
JS_SetReservedSlot(global.get(), 0,
PrivateValue(box_ptr as *const libc::c_void));
boxed_event_loop = Box::from_raw(box_ptr);
}
let mut file = match File::open("src/bootstrap.js") {
Err(_) => panic!("Error opening file"),
Ok(file) => file
};
let mut source = String::new();
if let Err(_) = file.read_to_string(&mut source) {
panic!("Error reading file");
};
match handler.rt.evaluate_script(global, source, "bootstrap.js".to_string(), 1) {
Err(_) => unsafe { JS_ReportPendingException(cx); panic!("Error executing JS") },
_ => ()
};
let _ = &boxed_event_loop.run(&mut handler);
}
unsafe extern "C" fn send(cx: *mut JSContext, argc: u32, vp: *mut Value) -> bool {
let args = CallArgs::from_vp(vp, argc);
if args._base.argc_ != 1 {
JS_ReportError(cx, b"_send() requires exactly 1 argument\0".as_ptr() as *const libc::c_char);
return false;
}
let arg = args.get(0);
let js = js::rust::ToString(cx, arg);
let message_root = Rooted::new(cx, js);
let message = JS_EncodeStringToUTF8(cx, message_root.handle());
let message = CStr::from_ptr(message);
callback(cx, str::from_utf8(message.to_bytes()).unwrap());
args.rval().set(UndefinedValue());
return true;
}
unsafe extern "C" fn print(cx: *mut JSContext, argc: u32, vp: *mut Value) -> bool {
let args = CallArgs::from_vp(vp, argc);
let output = (0..args._base.argc_)
.map(|i| fmt_js_value(cx, args.get(i)))
.collect::<Vec<String>>()
.join(" ");
println!("{}", output);
args.rval().set(UndefinedValue());
return true;
}
fn fmt_js_value(cx: *mut JSContext, val: HandleValue) -> String {
let js = unsafe { js::rust::ToString(cx, val) };
let message_root = Rooted::new(cx, js);
let message = unsafe { JS_EncodeStringToUTF8(cx, message_root.handle()) };
let message = unsafe { CStr::from_ptr(message) };
String::from(str::from_utf8(message.to_bytes()).unwrap())
}
|
extern crate hyper;
extern crate rustc_serialize;
extern crate chrono;
extern crate regex;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate docopt;
use std::ascii::AsciiExt;
use std::io::Read;
use std::env;
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::fmt;
use std::error::Error;
use std::result;
use chrono::naive::datetime::NaiveDateTime;
use chrono::offset::local::Local;
use chrono::offset::fixed::FixedOffset;
use chrono::datetime::DateTime;
use chrono::Datelike;
use rustc_serialize::json;
use rustc_serialize::Decodable;
use hyper::client::Client;
use regex::Regex;
use docopt::Docopt;
static USAGE: &'static str = "
Washing State Ferry Schedules
Usage:
wsf [options] <from> <to>
wsf -h
<from> and <to> are a prefix of the departing terminal and arriving
terminal, respectively. For example 'wsf sea ba' is equivalent to
'wsf Seattle \"Bainbridge Island\"'.
Options:
-h --help Show this screen.
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_from: String,
arg_to: String,
}
fn run() -> Result<()> {
try!(env_logger::init());
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
let from_in: &str = &args.arg_from.to_ascii_lowercase();
let to_in: &str = &args.arg_to.to_ascii_lowercase();
// pull in api key at *build* time from environment
let mut s = Session::new(env!("WSDOT_API_KEY"));
let now = Local::now();
let mut from: Option<i32> = None;
let mut to: Option<i32> = None;
for terminal in s.terminals().unwrap().iter() {
if terminal.Description.to_ascii_lowercase().starts_with(&from_in) {
from = Some(terminal.TerminalID);
}
if terminal.Description.to_ascii_lowercase().starts_with(&to_in) {
to = Some(terminal.TerminalID);
}
}
let tc = s.schedule(from.unwrap(), to.unwrap()).unwrap();
for time in tc.Times.iter() {
if time.depart_time() > now {
println!("{}\t{}\t{}\t{}",
time.depart_time().time(),
tc.DepartingTerminalName,
tc.ArrivingTerminalName,
time.VesselName );
}
}
s.save_cache()
}
fn main() {
match run() {
Ok(_) => {},
Err(e) => {
println!("");
std::process::exit(1);
}
}
}
struct Session {
api_key: String,
client: Client,
cache: Cache,
cacheflushdate: String,
cache_path: String,
offline: bool,
}
impl Session {
fn new(api_key: &str) -> Session {
let mut cache_path: PathBuf = env::home_dir().unwrap();
cache_path.push(".wsf.cache");
let cache_path = format!("{}", cache_path.display());
let mut s = Session {
api_key: api_key.to_string(),
client: Client::new(),
cache: Cache::load(&cache_path),
cacheflushdate: String::new(),
cache_path: cache_path,
offline: false,
};
s.offline = match s.get::<String>(format!("/cacheflushdate")) {
Ok(cfd) => {
s.cacheflushdate = cfd;
false
},
Err(_) => true,
};
s
}
fn save_cache(&mut self) -> Result<()> {
self.cache.cache_flush_date = self.cacheflushdate.clone();
let mut f = try!(File::create(&self.cache_path));
let encoded = try!(json::encode(&self.cache));
Ok(try!(f.write_all(encoded.as_bytes())))
}
fn get<T: Decodable>(&self, path: String) -> Result<T> {
let url = &format!("http://www.wsdot.wa.gov/ferries/api/schedule/rest{}?apiaccesscode={}",
path,
self.api_key);
let mut res = try!(self.client.get(url).send());
assert_eq!(res.status, hyper::Ok);
let mut buf = String::new();
try!(res.read_to_string(&mut buf));
Ok(try!(json::decode::<T>(&buf)))
}
fn terminals(&mut self) -> Result<Vec<Terminal>> {
if self.offline || (self.cache.cache_flush_date == self.cacheflushdate) {
return Ok(self.cache.terminals.clone())
}
else {
let now = Local::today();
let path = format!("/terminals/{}-{}-{}", now.year(), now.month(), now.day());
let routes: Vec<Terminal> = try!(self.get(path));
self.cache.terminals = routes.clone();
return Ok(routes);
}
}
fn schedule(&mut self, from: i32, to: i32) -> Result<TerminalCombo> {
let mut cache_is_stale = true;
let cache_key = format!("{} {}", from, to);
if self.offline || (self.cache.cache_flush_date == self.cacheflushdate) {
if self.cache.sailings.contains_key(&cache_key) {
// cache is up to date and has route!
// unwrap is correct as we checked for enry first
return Ok(self.cache.sailings.get(&cache_key).unwrap().clone());
}
else {
// cache is up to date, but we don't have this route in it
cache_is_stale = false;
}
}
if cache_is_stale {
self.cache.sailings.clear();
}
let now = Local::now();
let path = format!("/schedule/{}-{}-{}/{}/{}",
now.year(), now.month(), now.day(), from, to);
let schedule: Schedule = try!(self.get(path));
self.cache.sailings.insert(cache_key, schedule.TerminalCombos[0].clone());
Ok(schedule.TerminalCombos[0].clone())
}
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
struct Cache {
terminals: Vec<Terminal>,
sailings: HashMap<String, TerminalCombo>,
cache_flush_date: String,
}
impl Cache {
fn load(path: &String) -> Cache {
let r = File::open(path);
match r {
Ok(mut f) => {
let mut s = String::new();
f.read_to_string(&mut s).unwrap();
let cache = json::decode(&s).unwrap();
cache
},
Err(_) => {
Cache {
terminals: vec![],
sailings: HashMap::new(),
cache_flush_date: String::new(),
}
}
}
}
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Terminal {
TerminalID: i32,
Description: String,
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Clone, Debug)]
struct SailingTime {
DepartingTime: String,
ArrivingTime: Option<String>,
VesselName: String,
}
impl SailingTime {
// parse date strings of form "/Date(1436318400000-0700)/"
fn depart_time(&self) -> DateTime<Local> {
let re = Regex::new(r"^/Date\((\d{10})000-(\d{2})(\d{2})\)/$").unwrap();
let caps = re.captures(&self.DepartingTime).unwrap();
let epoch: i64 = caps.at(1).unwrap().parse().unwrap();
let tz_hours: i32 = caps.at(2).unwrap().parse().unwrap();
let tz_minutes: i32 = caps.at(3).unwrap().parse().unwrap();
let nd = NaiveDateTime::from_timestamp(epoch, 0);
let tz = FixedOffset::west((tz_hours * 3600) + (tz_minutes * 60));
let fotz: DateTime<FixedOffset> = DateTime::from_utc(nd, tz);
fotz.with_timezone(&Local)
}
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Clone, Debug)]
struct TerminalCombo {
Times: Vec<SailingTime>,
DepartingTerminalName: String,
ArrivingTerminalName: String,
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Debug)]
struct Schedule {
TerminalCombos: Vec<TerminalCombo>,
}
type Result<T> = result::Result<T, CliError>;
#[derive(Debug)]
enum CliError {
Log(log::SetLoggerError),
Parse(rustc_serialize::json::DecoderError),
SaveCache(rustc_serialize::json::EncoderError),
Http(hyper::error::Error),
Io(std::io::Error),
BrokenReality(String),
}
impl From<rustc_serialize::json::EncoderError> for CliError {
fn from(err: rustc_serialize::json::EncoderError) -> CliError {
CliError::SaveCache(err)
}
}
impl From<log::SetLoggerError> for CliError {
fn from(err: log::SetLoggerError) -> CliError {
CliError::Log(err)
}
}
impl From<hyper::error::Error> for CliError {
fn from(err: hyper::error::Error) -> CliError {
CliError::Http(err)
}
}
impl From<std::io::Error> for CliError {
fn from(err: std::io::Error) -> CliError {
CliError::Io(err)
}
}
impl From<rustc_serialize::json::DecoderError> for CliError {
fn from(err: rustc_serialize::json::DecoderError) -> CliError {
CliError::Parse(err)
}
}
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unimplemented!();
/*
TODO implement
match *self {
CliError::Io(ref err) => err.fmt(f),
CliError::Csv(ref err) => err.fmt(f),
CliError::NotFound => write!(f, "No matching cities with a \
population were found."),
}
*/
write!(f, "oops!")
}
}
impl Error for CliError {
fn description(&self) -> &str {
unimplemented!();
/*
TODO implement
match *self {
CliError::Io(ref err) => err.description(),
CliError::Csv(ref err) => err.description(),
CliError::NotFound => "not found",
}
*/
"broke and went boom"
}
}
better way to convert str to String
extern crate hyper;
extern crate rustc_serialize;
extern crate chrono;
extern crate regex;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate docopt;
use std::ascii::AsciiExt;
use std::io::Read;
use std::env;
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::fmt;
use std::error::Error;
use std::result;
use chrono::naive::datetime::NaiveDateTime;
use chrono::offset::local::Local;
use chrono::offset::fixed::FixedOffset;
use chrono::datetime::DateTime;
use chrono::Datelike;
use rustc_serialize::json;
use rustc_serialize::Decodable;
use hyper::client::Client;
use regex::Regex;
use docopt::Docopt;
static USAGE: &'static str = "
Washing State Ferry Schedules
Usage:
wsf [options] <from> <to>
wsf -h
<from> and <to> are a prefix of the departing terminal and arriving
terminal, respectively. For example 'wsf sea ba' is equivalent to
'wsf Seattle \"Bainbridge Island\"'.
Options:
-h --help Show this screen.
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_from: String,
arg_to: String,
}
fn run() -> Result<()> {
try!(env_logger::init());
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
let from_in: &str = &args.arg_from.to_ascii_lowercase();
let to_in: &str = &args.arg_to.to_ascii_lowercase();
// pull in api key at *build* time from environment
let mut s = Session::new(env!("WSDOT_API_KEY"));
let now = Local::now();
let mut from: Option<i32> = None;
let mut to: Option<i32> = None;
for terminal in s.terminals().unwrap().iter() {
if terminal.Description.to_ascii_lowercase().starts_with(&from_in) {
from = Some(terminal.TerminalID);
}
if terminal.Description.to_ascii_lowercase().starts_with(&to_in) {
to = Some(terminal.TerminalID);
}
}
let tc = s.schedule(from.unwrap(), to.unwrap()).unwrap();
for time in tc.Times.iter() {
if time.depart_time() > now {
println!("{}\t{}\t{}\t{}",
time.depart_time().time(),
tc.DepartingTerminalName,
tc.ArrivingTerminalName,
time.VesselName );
}
}
s.save_cache()
}
fn main() {
match run() {
Ok(_) => {},
Err(e) => {
println!("");
std::process::exit(1);
}
}
}
struct Session {
api_key: String,
client: Client,
cache: Cache,
cacheflushdate: String,
cache_path: String,
offline: bool,
}
impl Session {
fn new(api_key: &str) -> Session {
let mut cache_path: PathBuf = env::home_dir().unwrap();
cache_path.push(".wsf.cache");
let cache_path = format!("{}", cache_path.display());
let mut s = Session {
api_key: api_key.to_string(),
client: Client::new(),
cache: Cache::load(&cache_path),
cacheflushdate: String::new(),
cache_path: cache_path,
offline: false,
};
s.offline = match s.get::<String>("/cacheflushdate".to_owned()) {
Ok(cfd) => {
s.cacheflushdate = cfd;
false
},
Err(_) => true,
};
s
}
fn save_cache(&mut self) -> Result<()> {
self.cache.cache_flush_date = self.cacheflushdate.clone();
let mut f = try!(File::create(&self.cache_path));
let encoded = try!(json::encode(&self.cache));
Ok(try!(f.write_all(encoded.as_bytes())))
}
fn get<T: Decodable>(&self, path: String) -> Result<T> {
let url = &format!("http://www.wsdot.wa.gov/ferries/api/schedule/rest{}?apiaccesscode={}",
path,
self.api_key);
let mut res = try!(self.client.get(url).send());
assert_eq!(res.status, hyper::Ok);
let mut buf = String::new();
try!(res.read_to_string(&mut buf));
Ok(try!(json::decode::<T>(&buf)))
}
fn terminals(&mut self) -> Result<Vec<Terminal>> {
if self.offline || (self.cache.cache_flush_date == self.cacheflushdate) {
return Ok(self.cache.terminals.clone())
}
else {
let now = Local::today();
let path = format!("/terminals/{}-{}-{}", now.year(), now.month(), now.day());
let routes: Vec<Terminal> = try!(self.get(path));
self.cache.terminals = routes.clone();
return Ok(routes);
}
}
fn schedule(&mut self, from: i32, to: i32) -> Result<TerminalCombo> {
let mut cache_is_stale = true;
let cache_key = format!("{} {}", from, to);
if self.offline || (self.cache.cache_flush_date == self.cacheflushdate) {
if self.cache.sailings.contains_key(&cache_key) {
// cache is up to date and has route!
// unwrap is correct as we checked for enry first
return Ok(self.cache.sailings.get(&cache_key).unwrap().clone());
}
else {
// cache is up to date, but we don't have this route in it
cache_is_stale = false;
}
}
if cache_is_stale {
self.cache.sailings.clear();
}
let now = Local::now();
let path = format!("/schedule/{}-{}-{}/{}/{}",
now.year(), now.month(), now.day(), from, to);
let schedule: Schedule = try!(self.get(path));
self.cache.sailings.insert(cache_key, schedule.TerminalCombos[0].clone());
Ok(schedule.TerminalCombos[0].clone())
}
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
struct Cache {
terminals: Vec<Terminal>,
sailings: HashMap<String, TerminalCombo>,
cache_flush_date: String,
}
impl Cache {
fn load(path: &String) -> Cache {
let r = File::open(path);
match r {
Ok(mut f) => {
let mut s = String::new();
f.read_to_string(&mut s).unwrap();
let cache = json::decode(&s).unwrap();
cache
},
Err(_) => {
Cache {
terminals: vec![],
sailings: HashMap::new(),
cache_flush_date: String::new(),
}
}
}
}
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Terminal {
TerminalID: i32,
Description: String,
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Clone, Debug)]
struct SailingTime {
DepartingTime: String,
ArrivingTime: Option<String>,
VesselName: String,
}
impl SailingTime {
// parse date strings of form "/Date(1436318400000-0700)/"
fn depart_time(&self) -> DateTime<Local> {
let re = Regex::new(r"^/Date\((\d{10})000-(\d{2})(\d{2})\)/$").unwrap();
let caps = re.captures(&self.DepartingTime).unwrap();
let epoch: i64 = caps.at(1).unwrap().parse().unwrap();
let tz_hours: i32 = caps.at(2).unwrap().parse().unwrap();
let tz_minutes: i32 = caps.at(3).unwrap().parse().unwrap();
let nd = NaiveDateTime::from_timestamp(epoch, 0);
let tz = FixedOffset::west((tz_hours * 3600) + (tz_minutes * 60));
let fotz: DateTime<FixedOffset> = DateTime::from_utc(nd, tz);
fotz.with_timezone(&Local)
}
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Clone, Debug)]
struct TerminalCombo {
Times: Vec<SailingTime>,
DepartingTerminalName: String,
ArrivingTerminalName: String,
}
#[allow(non_snake_case)]
#[derive(RustcDecodable, RustcEncodable, Debug)]
struct Schedule {
TerminalCombos: Vec<TerminalCombo>,
}
type Result<T> = result::Result<T, CliError>;
#[derive(Debug)]
enum CliError {
Log(log::SetLoggerError),
Parse(rustc_serialize::json::DecoderError),
SaveCache(rustc_serialize::json::EncoderError),
Http(hyper::error::Error),
Io(std::io::Error),
BrokenReality(String),
}
impl From<rustc_serialize::json::EncoderError> for CliError {
fn from(err: rustc_serialize::json::EncoderError) -> CliError {
CliError::SaveCache(err)
}
}
impl From<log::SetLoggerError> for CliError {
fn from(err: log::SetLoggerError) -> CliError {
CliError::Log(err)
}
}
impl From<hyper::error::Error> for CliError {
fn from(err: hyper::error::Error) -> CliError {
CliError::Http(err)
}
}
impl From<std::io::Error> for CliError {
fn from(err: std::io::Error) -> CliError {
CliError::Io(err)
}
}
impl From<rustc_serialize::json::DecoderError> for CliError {
fn from(err: rustc_serialize::json::DecoderError) -> CliError {
CliError::Parse(err)
}
}
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unimplemented!();
/*
TODO implement
match *self {
CliError::Io(ref err) => err.fmt(f),
CliError::Csv(ref err) => err.fmt(f),
CliError::NotFound => write!(f, "No matching cities with a \
population were found."),
}
*/
write!(f, "oops!")
}
}
impl Error for CliError {
fn description(&self) -> &str {
unimplemented!();
/*
TODO implement
match *self {
CliError::Io(ref err) => err.description(),
CliError::Csv(ref err) => err.description(),
CliError::NotFound => "not found",
}
*/
"broke and went boom"
}
}
|
#![recursion_limit="100"]
#[macro_use]
extern crate version;
#[macro_use]
extern crate debug_unreachable;
#[macro_use]
extern crate chomp;
extern crate chrono;
extern crate colored;
extern crate clap;
// TODO: clean up imports
use std::path::{Path, PathBuf};
use std::fs::{File, Metadata};
use std::collections::{HashMap, HashSet, BTreeMap};
use std::ascii::{AsciiExt};
use std::env;
use std::process;
use std::marker::PhantomData;
use colored::*;
use clap::{Arg, App, SubCommand};
// use chrono::*;
use chrono::offset::utc::UTC;
use chrono::offset::{TimeZone, Offset};
use chrono::offset::local::Local;
use chrono::datetime::DateTime;
use chrono::naive::datetime::NaiveDateTime;
use chrono::naive::date::NaiveDate;
use chrono::naive::time::NaiveTime;
use chrono::duration::Duration;
// TODO: reorg this
use chomp::{SimpleResult, Error, ParseError, ParseResult};
use chomp::primitives::{InputBuffer, IntoInner, State};
use chomp::{Input, U8Result, parse_only};
use chomp::buffer::{Source, Stream, StreamError};
use chomp::{take_while1, token};
use chomp::parsers::{string, eof, any, satisfy};
use chomp::combinators::{or, many_till, many, many1, skip_many, skip_many1, look_ahead, option, sep_by};
use chomp::ascii::{is_whitespace, decimal, digit};
// use chomp::*;
fn main() {
let version: &str = &format!("v{} (semver.org)", version!());
let cmd_matches = App::new("gtdtxt")
.version(version) // semver semantics
.about("CLI app to parse a human-readable text file for managing GTD workflow")
.author("Alberto Leal <mailforalberto@gmail.com> (github.com/dashed)")
.arg(
Arg::with_name("due-within")
.next_line_help(true)
.help("Display tasks due within a time duration.{n}Example: 2 days 4 hrs")
.short("w")
.long("due-within")
.required(false)
.takes_value(true)
.multiple(false)
)
.arg(
Arg::with_name("hide-by-default")
.help("Hide tasks by default. Usage of flags / options are necessary to display tasks.")
.short("x")
.long("hide-by-default")
.required(false)
)
.arg(
Arg::with_name("show-overdue")
.help("Show overdue tasks. Used with --hide-by-default")
.short("a")
.long("show-overdue")
.required(false)
)
.arg(
Arg::with_name("show-incomplete")
.help("Show incomplete tasks. Used with --hide-by-default")
.short("b")
.long("show-incomplete")
.required(false)
)
.arg(
Arg::with_name("show-flagged")
.help("Show flagged tasks. Used with --hide-by-default")
.short("e")
.long("show-flagged")
.required(false)
)
.arg(
Arg::with_name("show-nonproject-tasks")
.help("Show tasks that are not in a project. Used with --hide-by-default")
.short("g")
.long("show-nonproject-tasks")
.required(false)
)
.arg(
Arg::with_name("show-project-tasks")
.help("Show tasks that are not in a project. Used with --hide-by-default")
.short("j")
.long("show-project-tasks")
.required(false)
)
.arg(
Arg::with_name("hide-overdue")
.help("Hide overdue tasks.")
.short("o")
.long("hide-overdue")
.required(false)
)
.arg(
Arg::with_name("show-done")
.help("Show completed tasks.")
.short("d")
.long("show-done")
.required(false)
)
.arg(
Arg::with_name("show-deferred")
.help("Reveal deferred tasks.")
.short("r")
.long("show-deferred")
.required(false)
)
.arg(
Arg::with_name("show-incubate")
.help("Show incubated tasks.")
.short("i")
.long("show-incubate")
.required(false)
)
.arg(
Arg::with_name("hide-incomplete")
.help("Hide incomplete tasks.")
.short("I")
.long("hide-incomplete")
.required(false)
)
.arg(
Arg::with_name("validate")
.help("Validate file and suppress any output.")
.short("q")
.long("validate")
.required(false)
)
.arg(
Arg::with_name("hide-nonproject-tasks")
.help("Hide tasks not belonging to a project.")
.short("n")
.long("hide-nonproject-tasks")
.required(false)
)
.arg(
Arg::with_name("show-only-flagged")
.help("Show only flagged tasks.")
.short("f")
.long("show-only-flagged")
.required(false)
)
.arg(
Arg::with_name("hide-flagged")
.help("Hide flagged tasks.")
.short("F")
.long("hide-flagged")
.required(false)
)
.arg(
Arg::with_name("sort-overdue-by-priority")
.help("Sort overdue tasks by priority.")
.short("z")
.long("sort-overdue-by-priority")
.required(false)
)
.arg(
Arg::with_name("filter-by-project")
.next_line_help(true)
.help("Filter using given project path.{n}Example: path / to / project")
.short("p")
.long("filter-by-project")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|path| {
let path = path.trim();
if path.len() <= 0 {
return Err(String::from("invalid project path"));
}
return Ok(());
})
)
.arg(
Arg::with_name("show-with-project")
.next_line_help(true)
.help("Show tasks with given project path.\
Used with --hide-by-default{n}\
Example: path / to / project")
.short("k")
.long("show-with-project")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|path| {
let path = path.trim();
if path.len() <= 0 {
return Err(String::from("invalid project path"));
}
return Ok(());
})
)
.arg(
Arg::with_name("filter-by-tag")
.next_line_help(true)
.help("Filter using given tag or list of comma separated tags.{n}Example: chore, art, to watch")
.short("t")
.long("filter-by-tag")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|tag| {
let tag = tag.trim();
if tag.len() <= 0 {
return Err(String::from("invalid tag"));
}
return Ok(());
})
)
.arg(
Arg::with_name("filter-by-context")
.next_line_help(true)
.help("Filter using given context or list of comma separated contexts.{n}Example: phone, computer, internet connection, office")
.short("c")
.long("filter-by-context")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|context| {
let context = context.trim();
if context.len() <= 0 {
return Err(String::from("invalid context"));
}
return Ok(());
})
)
.arg(
Arg::with_name("path to gtdtxt file")
.help("Path to gtdtxt file.")
.required(true)
.index(1)
.validator(|gtdtxt_file| {
let gtdtxt_file = gtdtxt_file.trim();
if gtdtxt_file.len() <= 0 {
return Err(String::from("invalid path to file"));
} else {
return Ok(());
}
})
).get_matches();
let path_to_file: String = cmd_matches.value_of("path to gtdtxt file")
.unwrap()
.trim()
.to_string();
let base_root = format!("{}", env::current_dir().unwrap().display());
let mut journal = GTD::new(base_root);
// due within filter
if let Some(due_within) = cmd_matches.value_of("due-within") {
let due_within = due_within.trim();
match parse_only(|i| multiple_time_range(i), due_within.as_bytes()) {
Ok(mut result) => {
journal.due_within = Duration::seconds(result as i64);
},
Err(e) => {
println!("Unable to parse value to option `--due-within`: {}", due_within);
process::exit(1);
// TODO: refactor
// panic!("{:?}", e);
}
}
}
// project path filters
if let Some(project_paths) = cmd_matches.values_of("filter-by-project") {
for project_path in project_paths {
match parse_only(|i| string_list(i, b'/'), project_path.as_bytes()) {
Ok(mut result) => {
journal.add_project_filter(&mut result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
if let Some(project_paths) = cmd_matches.values_of("show-with-project") {
for project_path in project_paths {
match parse_only(|i| string_list(i, b'/'), project_path.as_bytes()) {
Ok(mut result) => {
journal.add_project_whitelist(&mut result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
// tag filters
if let Some(tags) = cmd_matches.values_of("filter-by-tag") {
for tag in tags {
match parse_only(|i| string_list(i, b','), tag.as_bytes()) {
Ok(mut result) => {
if result.len() > 0 {
journal.filter_by_tags = true;
}
journal.add_tag_filters(result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
// context filters
if let Some(contexts) = cmd_matches.values_of("filter-by-context") {
for context in contexts {
match parse_only(|i| string_list(i, b','), context.as_bytes()) {
Ok(mut result) => {
if result.len() > 0 {
journal.filter_by_contexts = true;
}
journal.add_context_filters(result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
// flags
journal.sort_overdue_by_priority = cmd_matches.is_present("sort-overdue-by-priority");
journal.hide_flagged = cmd_matches.is_present("hide-flagged");
journal.show_only_flagged = cmd_matches.is_present("show-only-flagged");
journal.show_done = cmd_matches.is_present("show-done");
journal.show_incubate = cmd_matches.is_present("show-incubate");
journal.show_deferred = cmd_matches.is_present("show-deferred");
journal.hide_overdue = cmd_matches.is_present("hide-overdue");
journal.hide_nonproject_tasks = cmd_matches.is_present("hide-nonproject-tasks");
journal.hide_incomplete = cmd_matches.is_present("hide-incomplete");
journal.hide_tasks_by_default = cmd_matches.is_present("hide-by-default");
journal.show_overdue = cmd_matches.is_present("show-overdue");
journal.show_incomplete = cmd_matches.is_present("show-incomplete");
journal.show_flagged = cmd_matches.is_present("show-flagged");
journal.show_nonproject_tasks = cmd_matches.is_present("show-nonproject-tasks");
journal.show_project_tasks = cmd_matches.is_present("show-project-tasks");
parse_file(None, path_to_file.clone(), &mut journal);
let journal: GTD = journal;
if cmd_matches.is_present("validate") {
println!("{:>20} {}", "Tasks found".purple(), format!("{}", journal.tasks.len()).bold().purple());
println!("File validated.");
return;
}
// Display tasks
let mut display_divider = false;
if journal.due_within.num_seconds() > 0 {
println!("{:>11} {} {}",
"",
"Displaying tasks due within".bold().white(),
Timerange::new(journal.due_within.num_seconds() as u64).print(10).white().bold()
);
display_divider = true;
}
if journal.show_only_flagged {
println!("{:>11} {}",
"",
"Displaying only flagged tasks.".bold().white()
);
display_divider = true;
} else if journal.hide_flagged {
println!("{:>11} {}",
"",
"Hiding flagged tasks.".bold().white()
);
display_divider = true;
}
if display_divider {
println!("");
}
let mut print_line: bool = false;
let mut num_displayed: u32 = 0;
let mut num_overdue = 0;
let mut num_deferred = 0;
let mut num_done = 0;
// display tasks that are overdue
for (_, bucket) in journal.overdue.iter() {
if bucket.len() <= 0 {
continue;
}
num_overdue += bucket.len();
if !journal.hide_overdue {
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, bucket);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
}
// display inbox ordered by priority.
// incubated tasks are not included
for (_, inbox) in journal.inbox.iter() {
if inbox.len() <= 0 {
continue;
}
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, inbox);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
// display deferred tasks ordered by priority
for (_, deferred) in journal.deferred.iter() {
if deferred.len() <= 0 {
continue;
}
num_deferred += deferred.len();
if journal.show_deferred || journal.hide_tasks_by_default {
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, deferred);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
}
// display completed tasks
for (_, done) in journal.done.iter() {
if done.len() <= 0 {
continue;
}
num_done += done.len();
if journal.show_done || journal.hide_tasks_by_default {
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, done);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
}
if num_displayed > 0 {
println!("");
}
println!(" {}",
"Tasks completed in the past week (tracked using `done:`)".purple().bold()
);
let mut days_ago = 0;
loop {
print!("{:>11} {}",
format!("{} {}", days_ago, "days ago").purple(),
format!("|").purple()
);
if days_ago >= 7 {
break;
}
days_ago = days_ago + 1;
}
println!("");
let mut days_ago = 0;
loop {
let items_num = match journal.pulse.get(&days_ago) {
None => 0,
Some(bucket) => {
(*bucket).len()
}
};
print!("{:>11} {}",
format!("{}", items_num).bold().purple(),
format!("|").purple()
);
if days_ago >= 7 {
break;
}
days_ago = days_ago + 1;
}
println!("");
println!("");
println!("{:>20} {}",
"Tasks overdue".purple(),
format!("{}", num_overdue).bold().purple()
);
println!("{:>20} {}",
"Tasks deferred".purple(),
format!("{}", num_deferred).bold().purple()
);
println!("{:>20} {}",
"Tasks complete".purple(),
format!("{}", num_done).bold().purple()
);
println!("{:>20} {}",
"Tasks found".purple(),
format!("{}", journal.tasks.len()).bold().purple()
);
println!("{:>20} {}",
"Tasks not displayed".purple(),
format!("{}", journal.tasks.len() as u32 - num_displayed).bold().purple()
);
println!("{:>20} {}",
"Tasks displayed".purple(),
format!("{}", num_displayed).bold().purple()
);
println!("{:>20} {}",
"Executed at".purple(),
format!("{}", Local::now().naive_local().format("%B %-d, %Y %-l:%M:%S %p")).purple()
);
}
/* printers */
fn print_vector_of_tasks(journal: >D, inbox: &Vec<i32>) -> u32 {
let mut print_line: bool = false;
let mut num_displayed: u32 = 0;
for task_id in inbox {
if print_line {
println!("");
}
let task: &Task = journal.tasks.get(task_id).unwrap();
print_task(journal, task);
num_displayed = num_displayed + 1;
if !print_line {
print_line = true;
}
}
num_displayed
}
fn print_task(journal: >D, task: &Task) {
if task.flag && !journal.show_only_flagged {
println!("{:>11} ",
"Flagged".bold().yellow()
);
}
match task.title {
None => {
println!("Missing task title (i.e. `task: <title>`) in task block found {}",
task.debug_range_string()
);
process::exit(1);
},
Some(ref title) => {
println!("{:>11} {}", "Task:".blue().bold(), title);
}
}
match task.status {
None => {},
Some(ref status) => {
let status_string = match status {
&Status::Done => {
"Done".green()
},
&Status::NotDone => {
"Not Done".red().bold()
},
&Status::Incubate => {
"Incubate".purple()
}
};
println!("{:>11} {}", "Status:".bold().blue(), status_string);
}
}
match task.created_at {
None => {},
Some(ref created_at) => {
let rel_time = relative_time(created_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Past(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Future(_, rel_time) => {
format!("({})", rel_time)
}
};
println!("{:>11} {} {}",
"Added at:".bold().blue(),
created_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
match task.done_at {
None => {},
Some(ref done_at) => {
let rel_time = relative_time(done_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Past(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Future(_, rel_time) => {
format!("({})", rel_time)
}
};
println!("{:>11} {} {}",
"Done at:".bold().blue(),
done_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
match task.defer {
None => {},
Some(ref defer) => {
match defer {
&Defer::Forever => {
println!("{:>11} {}",
"Defer till:".bold().blue(),
"Forever".bold().green()
);
},
&Defer::Until(defer_till) => {
let rel_time = relative_time(defer_till.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.red()
},
RelativeTime::Past(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().red()
},
RelativeTime::Future(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().green()
}
};
println!("{:>11} {} {}",
"Defer till:".bold().blue(),
defer_till.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
}
}
match task.due_at {
None => {},
Some(ref due_at) => {
let rel_time = relative_time(due_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.red()
},
RelativeTime::Past(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().red()
},
RelativeTime::Future(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().green()
}
};
println!("{:>11} {} {}",
"Due at:".bold().blue(),
due_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
match task.source_file {
None => unsafe { debug_unreachable!() },
Some(ref path) => {
let path = match Path::new(path).strip_prefix(&journal.base_root) {
Err(_) => {
format!("{}", path)
},
Ok(path) => {
format!("./{}", path.display())
}
};
println!("{:>11} {}",
"File:".bold().blue(),
path
);
}
};
println!("{:>11} Lines {} to {}",
"Located:".bold().blue(),
task.task_block_range_start,
task.task_block_range_end
);
match task.tags {
None => {},
Some(ref tags) => {
println!("{:>11} {}",
"Tags:".bold().blue(),
tags.join(", ")
);
}
}
match task.contexts {
None => {},
Some(ref contexts) => {
println!("{:>11} {}",
"Contexts:".bold().blue(),
contexts.join(", ")
);
}
}
match task.project {
None => {},
Some(ref project_path) => {
println!("{:>11} {}",
"Project:".bold().blue(),
project_path.join(" / ")
);
}
}
if task.time > 0 {
println!("{:>11} {}",
"Time spent:".bold().blue(),
Timerange::new(task.time).print(2)
);
}
if task.has_chain() {
let chain_at: NaiveDateTime = task.get_chain();
let rel_time = relative_time(chain_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.red()
},
RelativeTime::Past(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().red()
},
RelativeTime::Future(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().green()
}
};
println!("{:>11} {} {}",
"Last chain:".bold().blue(),
chain_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
if task.priority != 0 {
println!("{:>11} {}", "Priority:".bold().blue(), task.priority);
}
match task.note {
None => {},
Some(ref note) => {
println!("{:>11} {}",
"Notes:".bold().blue(),
note
);
}
}
}
/* data structures */
#[derive(Debug)]
enum NodeType {
Node(Tree),
Leaf
}
// index project filters
type Tree = HashMap<String, NodeType>;
#[derive(Debug)]
enum Status {
Done,
Incubate,
NotDone
}
#[derive(Debug)]
struct Task {
/* debug*/
task_block_range_start: u64,
task_block_range_end: u64,
/* props */
title: Option<String>,
note: Option<String>,
created_at: Option<NaiveDateTime>,
done_at: Option<NaiveDateTime>,
chains: Option<BTreeMap<NaiveDateTime, bool>>,
due_at: Option<NaiveDateTime>,
defer: Option<Defer>,
status: Option<Status>,
project: Option<Vec<String>>,
contexts: Option<Vec<String>>,
tags: Option<Vec<String>>,
priority: i64,
time: u64,
flag: bool,
source_file: Option<String>
}
impl Task {
fn new(task_block_range_start: u64) -> Task {
Task {
task_block_range_start: task_block_range_start,
task_block_range_end: task_block_range_start,
/* props */
title: None,
note: None,
created_at: None,
done_at: None,
chains: None,
due_at: None,
defer: None,
status: None,
project: None,
contexts: None,
tags: None,
priority: 0,
time: 0,
flag: false,
source_file: None
}
}
fn is_done(&self) -> bool {
match self.status {
None => {},
Some(ref status) => {
match status {
&Status::Done => {
return true;
},
_ => {}
}
}
};
return false;
}
fn has_chain(&self) -> bool {
if self.chains.is_none() {
return false;
}
match self.chains {
None => unsafe { debug_unreachable!() },
Some(ref tree) => {
return tree.len() > 0;
}
}
}
fn get_chain(&self) -> NaiveDateTime {
match self.chains {
None => unsafe { debug_unreachable!() },
Some(ref tree) => {
// see: http://stackoverflow.com/a/33699340/412627
// let (key, _) = tree.iter().last().unwrap();
let (key, _) = tree.iter().next_back().unwrap();
return key.clone();
}
}
}
fn debug_range_string(&self) -> String {
if self.task_block_range_start == self.task_block_range_end {
return format!("on line {}", self.task_block_range_start);
}
return format!("between lines {} and {}",
self.task_block_range_start,
self.task_block_range_end
);
}
}
#[derive(Debug)]
struct GTD {
/* debug */
// the line of the last task block line parsed
previous_task_block_line: u64,
/* flag/switches */
hide_flagged: bool,
show_only_flagged: bool,
show_done: bool,
show_incubate: bool,
show_deferred: bool,
hide_overdue: bool,
hide_nonproject_tasks: bool,
hide_incomplete: bool,
project_filter: Tree,
project_whitelist: Tree,
sort_overdue_by_priority: bool,
filter_by_tags: bool,
filter_by_contexts: bool,
due_within: Duration,
hide_tasks_by_default: bool,
show_overdue: bool,
show_incomplete: bool,
show_flagged: bool,
show_nonproject_tasks: bool,
show_project_tasks: bool,
/* data */
base_root: String,
// track files opened
opened_files: HashSet<String>,
// path to file -> vector of task ids
files_with_completed_tasks: HashMap<String, Vec<i32>>,
pulse: HashMap<i64, Vec<i32>>,
tags: HashSet<String>,
contexts: HashSet<String>,
// lookup table for tasks
tasks: HashMap<i32, Task>,
// this contains any tasks that are overdue
// timestamp difference -> task id
overdue: BTreeMap<i64, Vec<i32>>,
// this contains any tasks that are either due soon
// timestamp difference -> task id
// due_soon: BTreeMap<i64, Vec<i32>>,
// inbox contain any tasks that do not have a project
// priority -> vector of task ids ordered by recent appearance
inbox: BTreeMap<i64, Vec<i32>>,
// this contains any tasks that are inactive
// priority -> vector of task ids ordered by recent appearance
deferred: BTreeMap<i64, Vec<i32>>,
// this contains any tasks that are compelted
// priority -> vector of task ids ordered by recent appearance
done: BTreeMap<i64, Vec<i32>>
}
impl GTD {
fn new(base_root: String) -> GTD {
let mut inbox = BTreeMap::new();
// inbox at priority 0
inbox.insert(0, Vec::new());
let inbox = inbox;
let mut done = BTreeMap::new();
// done bucket at priority 0
done.insert(0, Vec::new());
let done = done;
let mut deferred = BTreeMap::new();
// deferred bucket at priority 0
deferred.insert(0, Vec::new());
let deferred = deferred;
GTD {
/* error output */
previous_task_block_line: 0,
/* options */
hide_flagged: false,
show_only_flagged: false,
show_done: false,
show_incubate: false,
show_deferred: false,
hide_overdue: false,
hide_nonproject_tasks: false,
hide_incomplete: false,
project_filter: HashMap::new(),
project_whitelist: HashMap::new(),
sort_overdue_by_priority: false,
filter_by_tags: false,
filter_by_contexts: false,
due_within: Duration::seconds(0),
hide_tasks_by_default: false,
show_overdue: false,
show_incomplete: false,
show_flagged: false,
show_nonproject_tasks: false,
show_project_tasks: false,
/* data */
base_root: base_root,
opened_files: HashSet::new(),
files_with_completed_tasks: HashMap::new(),
pulse: HashMap::new(),
tags: HashSet::new(),
contexts: HashSet::new(),
tasks: HashMap::new(),
inbox: inbox,
done: done,
deferred: deferred,
overdue: BTreeMap::new()
}
}
fn add_tag_filters(&mut self, tags: Vec<String>) {
for tag in tags {
self.tags.insert(tag);
}
}
fn have_tags(&mut self, tags: &Vec<String>) -> bool {
for tag in tags {
if self.tags.contains(tag) {
return true;
}
}
return false;
}
fn add_context_filters(&mut self, contexts: Vec<String>) {
for context in contexts {
self.contexts.insert(context);
}
}
fn have_contexts(&mut self, contexts: &Vec<String>) -> bool {
for context in contexts {
if self.contexts.contains(context) {
return true;
}
}
return false;
}
fn add_project_filter(&mut self, path: &mut Vec<String>) {
traverse(path, &mut self.project_filter);
}
fn add_project_whitelist(&mut self, path: &mut Vec<String>) {
traverse(path, &mut self.project_whitelist);
}
fn has_project_filters(&mut self) -> bool {
self.project_filter.len() > 0
}
fn has_project_whitelist(&mut self) -> bool {
self.project_whitelist.len() > 0
}
fn should_filter_project(&mut self, path: &Vec<String>) -> bool {
return subpath_exists_in_tree(&(self.project_filter), path);
}
fn should_whitelist_project(&mut self, path: &Vec<String>) -> bool {
return subpath_exists_in_tree(&(self.project_whitelist), path);
}
fn add_task(&mut self, task: Task) {
// TODO: is this the best placement for this?
let mut task = task;
task.task_block_range_end = self.previous_task_block_line;
let task = task;
// validation
if task.title.is_none() {
println!("Missing task title (i.e. `task: <title>`) in task block found {}",
task.debug_range_string()
);
process::exit(1);
}
let new_id = self.next_task_id();
match task.done_at {
None => {},
Some(ref done_at) => {
if !task.is_done() {
println!("In file: {}", task.source_file.as_ref().unwrap());
println!("Task is incorrectly given a `done` datetime found at {}",
task.debug_range_string()
);
println!("Mayhaps you forgot to add: 'status: done'");
process::exit(1);
} else {
self.add_to_pulse(done_at, new_id);
}
}
};
// track completed task by its source file
match task.status {
None => {},
Some(ref status) => {
match status {
&Status::Done => {
match task.source_file {
None => unsafe { debug_unreachable!() },
Some(ref source_file) => {
match self.files_with_completed_tasks.get_mut(source_file) {
None => unsafe { debug_unreachable!() },
Some(bucket) => {
(*bucket).push(new_id);
}
}
}
}
},
_ => {}
}
}
}
// sort tasks into the proper data structure that shall be displayed
// to the user
// TODO: refactor eventually
if self.hide_tasks_by_default {
// hide task unless it satisfy [whitelist] filters
self.add_task_default_hidden(&task, new_id);
} else {
// default behaviour
self.add_task_default(&task, new_id);
}
// add task to look-up table
self.tasks.insert(new_id, task);
}
fn add_task_default_hidden(&mut self, task: &Task, new_id: i32) {
if self.should_hide_task(&task) {
return;
}
let mut shall_show: bool = task.tags.is_some() ||
task.contexts.is_some() ||
task.project.is_some() ||
self.show_only_flagged && task.flag ||
self.show_flagged && task.flag ||
self.show_nonproject_tasks && task.project.is_none() ||
self.show_project_tasks && task.project.is_some();
if self.has_project_whitelist() {
let should_whitelist: bool = match task.project {
Some(ref project_path) => {
self.should_whitelist_project(project_path)
},
// TODO: need flag to control this
None => true
};
if should_whitelist {
shall_show = true;
};
};
// sort task by status and priority
match task.status {
None => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
if self.show_overdue || task.due_at.is_some() || shall_show {
self.add_to_overdue(&task, new_id);
}
} else if !self.should_defer(&task) {
if self.show_incomplete || shall_show {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
if self.show_deferred || shall_show {
self.add_to_deferred(task.priority, new_id);
}
}
},
Some(ref status) => {
match status {
&Status::NotDone => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
if self.show_overdue || task.due_at.is_some() || shall_show {
self.add_to_overdue(&task, new_id);
}
} else if !self.should_defer(&task) {
if self.show_incomplete || shall_show {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
if self.show_deferred || shall_show {
self.add_to_deferred(task.priority, new_id);
}
}
},
&Status::Incubate => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
if self.show_overdue || task.due_at.is_some() || shall_show {
self.add_to_overdue(&task, new_id);
}
} else if !self.should_defer(&task) {
if self.show_incomplete || shall_show {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
if self.show_deferred || shall_show {
self.add_to_deferred(task.priority, new_id);
}
}
},
&Status::Done => {
if self.show_done || shall_show {
self.add_to_done(task.priority, new_id);
}
}
}
}
}
}
fn add_task_default(&mut self, task: &Task, new_id: i32) {
if self.should_hide_task(&task) {
return;
}
// sort task by status and priority
match task.status {
None => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
self.add_to_overdue(&task, new_id);
} else if !self.should_defer(&task) {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
} else {
self.add_to_deferred(task.priority, new_id);
}
},
Some(ref status) => {
match status {
&Status::NotDone => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
self.add_to_overdue(&task, new_id);
} else if !self.should_defer(&task) {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
} else {
self.add_to_deferred(task.priority, new_id);
}
},
&Status::Incubate => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
self.add_to_overdue(&task, new_id);
} else if !self.should_defer(&task) {
if self.show_incubate {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
self.add_to_deferred(task.priority, new_id);
}
},
&Status::Done => {
self.add_to_done(task.priority, new_id);
}
}
}
}
}
fn should_hide_task(&mut self, task: &Task) -> bool {
if self.hide_nonproject_tasks &&!task.project.is_some() {
return true;
}
if self.filter_by_tags {
match task.tags {
None => {
// TODO: need flag to control this
return true;
},
Some(ref tags) => {
if !self.have_tags(tags) {
return true;
}
}
}
}
if self.filter_by_contexts {
match task.contexts {
None => {
// TODO: need flag to control this
return true;
},
Some(ref contexts) => {
if !self.have_contexts(contexts) {
return true;
}
}
}
}
// invariant: task belongs to a project
// if necessary, apply any project path apply filters
if self.has_project_filters() {
let should_filter: bool = match task.project {
Some(ref project_path) => {
self.should_filter_project(project_path)
},
// TODO: need flag to control this
None => true
};
if should_filter {
return true;
}
}
if self.show_only_flagged {
return !task.flag;
}
if self.show_flagged && task.flag {
return false;
}
if self.hide_flagged {
return task.flag;
}
// TODO: redundant; remove
// if self.show_project_tasks && task.project.is_some() {
// return false;
// }
return false;
}
fn should_defer(&mut self, task: &Task) -> bool {
// TODO: necessary??
// if self.show_deferred {
// return false;
// }
match task.defer {
None => {
return false;
},
Some(ref defer) => {
match defer {
&Defer::Forever => {
return true;
},
&Defer::Until(defer_till) => {
return defer_till.timestamp() > Local::now().naive_local().timestamp();
}
}
}
}
return false;
}
fn add_to_pulse(&mut self, done_at: &NaiveDateTime, task_id: i32) {
let diff = Local::now().naive_local().timestamp() - done_at.timestamp();
if !(0 <= diff && diff <= chrono::Duration::days(7).num_seconds()) {
return;
}
let diff = diff as f64;
let sec_per_minute: f64 = 60f64;
let sec_per_hour: f64 = sec_per_minute * 60f64;
let sec_per_day: f64 = sec_per_hour * 24f64;
let days_ago = (diff / sec_per_day).floor() as i64;
if !self.pulse.contains_key(&days_ago) {
self.pulse.insert(days_ago, Vec::new());
}
match self.pulse.get_mut(&days_ago) {
None => unsafe { debug_unreachable!("journal.overdue missing expected bucket") },
Some(bucket) => {
(*bucket).push(task_id);
}
}
}
fn is_overdue(&mut self, task: &Task) -> bool {
match task.due_at {
None => {
return false;
},
Some(ref due_at) => {
return (Local::now().naive_local().timestamp() + self.due_within.num_seconds()) >= due_at.timestamp();
}
}
false
}
fn add_to_overdue(&mut self, task: &Task, task_id: i32) {
match task.due_at {
None => {
return;
},
Some(ref due_at) => {
let rel_time = due_at.timestamp() - Local::now().naive_local().timestamp();
let encoded_key = if self.sort_overdue_by_priority {
GTD::encode_priority(task.priority) as i64
} else {
// largest negative numbers appear first
-rel_time
};
if !self.overdue.contains_key(&encoded_key) {
self.overdue.insert(encoded_key, Vec::new());
}
match self.overdue.get_mut(&encoded_key) {
None => unsafe { debug_unreachable!("journal.overdue missing expected bucket") },
Some(bucket) => {
(*bucket).push(task_id);
}
}
}
}
}
fn add_to_inbox(&mut self, task_priority: i64, task_id: i32) {
self.ensure_priority_inbox(task_priority);
let task_priority: i64 = GTD::encode_priority(task_priority);
match self.inbox.get_mut(&task_priority) {
None => unsafe { debug_unreachable!("add_to_inbox: expected priority bucket not found") },
Some(inbox) => {
(*inbox).push(task_id);
}
}
}
fn add_to_deferred(&mut self, task_priority: i64, task_id: i32) {
self.ensure_priority_deferred(task_priority);
let task_priority: i64 = GTD::encode_priority(task_priority);
match self.deferred.get_mut(&task_priority) {
None => unsafe { debug_unreachable!("add_to_deferred: expected priority bucket not found") },
Some(deferred) => {
(*deferred).push(task_id);
}
}
}
fn add_to_done(&mut self, task_priority: i64, task_id: i32) {
self.ensure_priority_done(task_priority);
let task_priority: i64 = GTD::encode_priority(task_priority);
match self.done.get_mut(&task_priority) {
None => unsafe { debug_unreachable!("add_to_done: expected priority bucket not found") },
Some(done) => {
(*done).push(task_id);
}
}
}
fn next_task_id(&mut self) -> i32 {
to_task_id(self.tasks.len() + 1) as i32
}
// TODO: refactor
fn ensure_priority_inbox(&mut self, priority: i64) {
let priority = GTD::encode_priority(priority);
if !self.inbox.contains_key(&priority) {
self.inbox.insert(priority, Vec::new());
}
}
fn ensure_priority_deferred(&mut self, priority: i64) {
let priority = GTD::encode_priority(priority);
if !self.deferred.contains_key(&priority) {
self.deferred.insert(priority, Vec::new());
}
}
fn ensure_priority_done(&mut self, priority: i64) {
let priority = GTD::encode_priority(priority);
if !self.done.contains_key(&priority) {
self.done.insert(priority, Vec::new());
}
}
// TODO: refactor
fn encode_priority(priority: i64) -> i64 {
-priority
}
fn decode_priority(priority: i64) -> i64 {
-priority
}
}
/* gtdtxt file parser */
fn parse_file(parent_file: Option<String>, path_to_file_str: String, journal: &mut GTD) {
let path_to_file: &Path = Path::new(&path_to_file_str);
if !path_to_file.is_file() {
// TODO: return Err(...)
match parent_file {
None => {},
Some(parent_file) => {
println!("In file: {}",
parent_file
);
}
};
println!("Path is not a file: {}",
path_to_file_str
);
process::exit(1);
}
// fetch path to file
let tracked_path = match path_to_file.canonicalize() {
Ok(resolved) => {
let resolved: PathBuf = resolved;
format!("{}", resolved.display())
},
Err(e) => {
panic!("{:?}", e);
}
};
if journal.opened_files.contains(&tracked_path) {
println!("Cyclic includes detected; file already opened: {}", tracked_path);
process::exit(1);
}
let file: File = File::open(path_to_file).ok().expect("Failed to open file");
// track this opened file to ensure we're not opening the same file twice
journal.opened_files.insert(tracked_path.clone());
// save current working directory
let old_working_directory = format!("{}", env::current_dir().unwrap().display());
// set new current working dir
let parent_dir: String = {
let parent_dir = Path::new(&tracked_path).parent().unwrap();
format!("{}", parent_dir.display())
};
if !env::set_current_dir(&parent_dir).is_ok() {
println!("Unable to change working directory to: {}", parent_dir);
process::exit(1);
}
journal.files_with_completed_tasks.insert(tracked_path.clone(), Vec::new());
let mut num_of_lines_parsed = 0;
// parse gtdtxt file
let mut input = Source::new(file);
// directive switches
let mut file_shall_not_contain_completed_tasks: bool = false;
// initial state
let mut previous_state: ParseState = ParseState::Start;
loop {
let mut n = Numbering::new(LineNumber::new(), line_token_parser);
// If we could implement FnMut for Numbering then we would be good, but we need to wrap now:
let mut m = |i| n.parse(i);
match input.parse(m) {
Ok((lines_parsed, line)) => {
// amend behaviour of newline counting
let lines_parsed = if lines_parsed == 0 {
1
} else {
lines_parsed
};
num_of_lines_parsed += lines_parsed;
match line {
LineToken::Task(task_block_line) => {
// mark this line as previous task block seen
journal.previous_task_block_line = num_of_lines_parsed;
let current_task: &mut Task = match previous_state {
ParseState::Task(ref mut task) => {
task
},
_ => {
let mut new_task: Task = Task::new(num_of_lines_parsed);
new_task.source_file = Some(tracked_path.clone());
previous_state = ParseState::Task(new_task);
// TODO: possible to refactor this in a better way?
match previous_state {
ParseState::Task(ref mut task) => {
task
},
_ => unsafe { debug_unreachable!() }
}
}
};
match task_block_line {
TaskBlock::Title(title) => {
current_task.title = Some(title);
},
TaskBlock::Note(note) => {
current_task.note = Some(note);
},
TaskBlock::Project(project) => {
if project.len() > 0 {
current_task.project = Some(project);
} else {
current_task.project = None;
}
},
TaskBlock::Created(created_at) => {
let created_at: NaiveDateTime = created_at;
current_task.created_at = Some(created_at);
},
TaskBlock::Done(done_at) => {
let done_at: NaiveDateTime = done_at;
current_task.done_at = Some(done_at);
},
TaskBlock::Chain(chain_at) => {
let chain_at: NaiveDateTime = chain_at;
match current_task.chains {
None => {
let mut tree = BTreeMap::new();
tree.insert(chain_at, true);
current_task.chains = Some(tree);
},
Some(ref mut tree) => {
tree.insert(chain_at, true);
}
};
},
TaskBlock::Status(status) => {
current_task.status = Some(status);
},
TaskBlock::Due(due_at) => {
let due_at: NaiveDateTime = due_at;
current_task.due_at = Some(due_at);
},
TaskBlock::Defer(defer) => {
current_task.defer = Some(defer);
},
TaskBlock::Contexts(contexts) => {
if contexts.len() > 0 {
current_task.contexts = Some(contexts);
} else {
current_task.contexts = None;
}
},
TaskBlock::Tags(tags) => {
if tags.len() > 0 {
current_task.tags = Some(tags);
} else {
current_task.tags = None;
}
},
TaskBlock::Time(time) => {
current_task.time += time;
},
TaskBlock::ID(id) => {
// println!("id: '{}'", id);
// TODO: complete
},
TaskBlock::Priority(priority) => {
current_task.priority = priority
},
TaskBlock::Flag(flag) => {
current_task.flag = flag;
}
};
},
LineToken::Directive(directive_line) => {
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
previous_state = ParseState::Directive;
match directive_line {
Directive::Include(path_to_file) => {
parse_file(Some(tracked_path.clone()), path_to_file, journal);
},
Directive::ShouldNotContainCompletedTasks(result) => {
file_shall_not_contain_completed_tasks = result;
}
};
},
LineToken::PreBlock => {
// println!("preblock");
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
previous_state = ParseState::PreBlock;
},
LineToken::TaskSeparator => {
// println!("TaskSeparator");
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
previous_state = ParseState::TaskSeparator;
}
};
},
Err(StreamError::Retry) => {
// Needed to refill buffer when necessary
},
Err(StreamError::EndOfInput) => {
break;
},
Err(e) => {
// println!("{:?}", e);
println!("Error parsing starting at line {} in file: {}", num_of_lines_parsed + 1, tracked_path);
process::exit(1);
}
}
};
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
match journal.files_with_completed_tasks.get_mut(&tracked_path) {
None => unsafe { debug_unreachable!() },
Some(bucket) => {
if (*bucket).len() > 0 && file_shall_not_contain_completed_tasks {
println!("Found {} completed tasks that are not supposed to be in file: {}",
(*bucket).len(),
tracked_path);
let task: &Task = journal.tasks.get((*bucket).first().unwrap()).unwrap();
println!("Found a completed task at lines: {} to {}",
task.task_block_range_start,
task.task_block_range_end
);
process::exit(1);
}
}
}
journal.opened_files.remove(&tracked_path);
// restore current working dir
if !env::set_current_dir(&old_working_directory).is_ok() {
println!("Unable to change working directory to: {}", old_working_directory);
process::exit(1);
}
}
/* parsers */
// state machine:
// Start = PreBlock | Task | Directive | TaskSeparator
// PreBlock = PreBlock | Task | Directive | TaskSeparator
// TaskSeparator = PreBlock | Task | Directive | TaskSeparator
// Task = Task | PreBlock | TaskSeparator
// Directive = Directive | PreBlock | TaskSeparator
#[derive(Debug)]
enum ParseState {
Start,
PreBlock,
Task(Task),
Directive,
TaskSeparator
}
#[derive(Debug)]
enum LineToken {
Task(TaskBlock),
Directive(Directive),
PreBlock,
TaskSeparator
}
fn line_token_parser(input: Input<u8>) -> U8Result<LineToken> {
or(input,
|i| parse!{i;
// this line shall not begin with any whitespace
look_ahead(|i| satisfy(i, |c| !is_whitespace(c)));
let line: LineToken = task_seperators() <|>
task_block() <|>
directives();
ret line
},
|i| pre_block(i)
)
}
/* preblock */
fn pre_block(i: Input<u8>) -> U8Result<LineToken> {
parse!{i;
/*
consume comment blocks or whitespace till
one line comments or terminating
*/
let line: Vec<()> = many_till(
|i| or(i,
|i| whitespace(i),
|i| comments_block(i)
),
|i| or(i,
|i| comments_one_line(i),
|i| terminating(i)
)
);
ret LineToken::PreBlock;
}
}
/* task block */
#[derive(Debug)]
enum Defer {
Forever,
Until(NaiveDateTime)
}
// tokens from parser
#[derive(Debug)]
enum TaskBlock {
Title(String),
Created(NaiveDateTime),
Done(NaiveDateTime),
Chain(NaiveDateTime),
Due(NaiveDateTime),
Defer(Defer),
Priority(i64),
Time(u64),
Project(Vec<String>),
Status(Status),
Contexts(Vec<String>),
Tags(Vec<String>),
Flag(bool),
Note(String),
// TODO: complete
ID(String)
}
fn task_block(i: Input<u8>) -> U8Result<LineToken> {
parse!{i;
let line: TaskBlock = task_title() <|>
task_note() <|>
task_priority() <|>
task_project() <|>
task_flag() <|>
task_created() <|>
task_done() <|>
task_chain() <|>
task_status() <|>
task_due() <|>
task_defer() <|>
task_tags() <|>
task_contexts() <|>
task_time() <|>
task_id();
ret LineToken::Task(line)
}
}
fn task_title(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
// aliases
string_ignore_case("task".as_bytes()) <|>
string_ignore_case("todo".as_bytes()) <|>
string_ignore_case("action".as_bytes()) <|>
string_ignore_case("item".as_bytes());
token(b':');
let line = non_empty_line();
ret {
let title: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
TaskBlock::Title(title)
}
}
}
fn task_note(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
// aliases
string_ignore_case("notes".as_bytes()) <|>
string_ignore_case("note".as_bytes()) <|>
string_ignore_case("description".as_bytes()) <|>
string_ignore_case("desc".as_bytes());
token(b':');
skip_many(|i| space_or_tab(i));
let line = or(
|i| non_empty_line(i),
|i| parse!{i;
terminating();
ret {
let line: Vec<u8> = vec![];
line
}
}
);
let other_lines: Vec<String> = many(
|i| or(i,
|i| parse!{i;
// allow empty lines in note
let nothing: Vec<()> = many(|i| parse!{i;
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| end_of_line(i));
ret ()
});
space_or_tab();
let line = non_empty_line();
ret {
let filler = String::from_utf8(vec![b'\n'; nothing.len()]).ok().unwrap();
let line: String = format!("{}{:>11} {}",
filler,
"",
String::from_utf8_lossy(line.as_slice()).trim()
);
line
}
},
|i| parse!{i;
space_or_tab();
let line = non_empty_line();
ret {
let line: String = format!("{:>11} {}",
"",
String::from_utf8_lossy(line.as_slice()).trim()
);
line
}
}
)
);
ret {
let line: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
let other_lines = other_lines.join("\n");
let note = if other_lines.len() > 0 {
if line.len() > 0 {
format!("{}\n{}", line, other_lines)
} else {
format!("{}", other_lines.trim())
}
} else {
format!("{}", line)
};
TaskBlock::Note(note)
}
}
}
fn task_time(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("time".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let time: u64 = multiple_time_range();
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Time(time)
}
}
fn task_priority(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("priority".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let priority: i64 = signed_decimal() <|> decimal();
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Priority(priority)
}
}
fn task_project(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("project".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
let list = string_list(b'/');
ret TaskBlock::Project(list)
}
}
fn task_flag(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("flag".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let input = bool_option_parser();
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Flag(input)
}
}
fn task_created(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("created at".as_bytes()) <|>
string_ignore_case("created".as_bytes()) <|>
string_ignore_case("date".as_bytes()) <|>
string_ignore_case("added at".as_bytes()) <|>
string_ignore_case("added".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let created_at = parse_datetime(false);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Created(created_at)
}
}
fn task_done(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("done at".as_bytes()) <|>
string_ignore_case("done".as_bytes()) <|>
string_ignore_case("completed".as_bytes()) <|>
string_ignore_case("complete".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let done_at = parse_datetime(false);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Done(done_at)
}
}
fn task_chain(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("chain".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let chain_at = parse_datetime(false);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Chain(chain_at)
}
}
fn parse_status(input: Input<u8>) -> U8Result<Status> {
or(input,
|i| parse!{i;
string_ignore_case("done".as_bytes()) <|>
string_ignore_case("complete".as_bytes()) <|>
string_ignore_case("finished".as_bytes()) <|>
string_ignore_case("finish".as_bytes()) <|>
string_ignore_case("fin".as_bytes());
ret Status::Done
},
|i| or(i,
|i| parse!{i;
string_ignore_case("hide".as_bytes()) <|>
string_ignore_case("hidden".as_bytes()) <|>
string_ignore_case("incubate".as_bytes()) <|>
string_ignore_case("later".as_bytes()) <|>
string_ignore_case("someday".as_bytes()) <|>
string_ignore_case("inactive".as_bytes()) <|>
string_ignore_case("not active".as_bytes());
ret Status::Incubate
},
|i| parse!{i;
string_ignore_case("active".as_bytes()) <|>
string_ignore_case("not done".as_bytes()) <|>
string_ignore_case("progress".as_bytes()) <|>
string_ignore_case("in progress".as_bytes()) <|>
string_ignore_case("in-progress".as_bytes()) <|>
string_ignore_case("pending".as_bytes()) <|>
string_ignore_case("is active".as_bytes());
ret Status::NotDone
}
)
)
}
fn task_status(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("status".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let status = parse_status();
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Status(status)
}
}
fn task_due(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("due".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let due_at = parse_datetime(true);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Due(due_at)
}
}
fn task_defer(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("defer till".as_bytes()) <|>
string_ignore_case("defer until".as_bytes()) <|>
string_ignore_case("defer".as_bytes()) <|>
string_ignore_case("hide until".as_bytes()) <|>
string_ignore_case("hidden".as_bytes()) <|>
string_ignore_case("hide till".as_bytes()) <|>
string_ignore_case("hide".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let defer = or(
|i| parse!{i;
string_ignore_case("forever".as_bytes());
ret Defer::Forever
},
|i| parse!{i;
let defer_till = parse_datetime(false);
ret Defer::Until(defer_till)
}
);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Defer(defer)
}
}
fn task_contexts(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("contexts".as_bytes()) <|>
string_ignore_case("context".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
let list = string_list(b',');
ret TaskBlock::Contexts(list)
}
}
fn task_tags(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("tags".as_bytes()) <|>
string_ignore_case("tag".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
let list = string_list(b',');
ret TaskBlock::Tags(list)
}
}
fn task_id(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("id".as_bytes());
token(b':');
let line = non_empty_line();
ret {
let id: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
TaskBlock::ID(id)
}
}
}
/* directives */
#[derive(Debug)]
enum Directive {
Include(String),
ShouldNotContainCompletedTasks(bool)
}
fn directives(input: Input<u8>) -> U8Result<LineToken> {
parse!{input;
let line: Directive = directive_include() <|>
directive_not_contain_done_tasks();
ret {
LineToken::Directive(line)
}
}
}
fn directive_include(input: Input<u8>) -> U8Result<Directive> {
parse!{input;
string_ignore_case("include".as_bytes());
token(b':');
skip_many(|i| space_or_tab(i));
let line = non_empty_line();
ret {
let path_to_file: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
Directive::Include(path_to_file)
}
}
}
fn directive_not_contain_done_tasks(input: Input<u8>) -> U8Result<Directive> {
parse!{input;
string_ignore_case("file_no_done_tasks".as_bytes());
token(b':');
skip_many(|i| space_or_tab(i));
let input = bool_option_parser();
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret Directive::ShouldNotContainCompletedTasks(input)
}
}
/* lines */
enum Line {
Empty,
NonEmpty(Vec<u8>)
}
fn non_empty_line(i: Input<u8>) -> U8Result<Vec<u8>> {
parse_line(i)
.bind(parse_non_empty_line)
}
// TODO: bother moving as closure?
fn parse_non_empty_line(i: Input<u8>, above: Line) -> U8Result<Vec<u8>> {
match above {
Line::Empty => {
// need at least one u8 token
i.incomplete(1)
},
Line::NonEmpty(line) => {
if line.len() <= 0 {
return i.incomplete(1);
}
i.ret(line)
}
}
}
fn parse_line(i: Input<u8>) -> U8Result<Line> {
// many_till(i, any, |i| terminating(i))
or(i,
|i| parse!{i;
terminating();
ret Line::Empty
},
|i| parse!{i;
// lines with just whitespace are probably not interesting
// TODO: consider space_or_tab?
skip_many(|i| whitespace(i));
let line: Vec<u8> = many_till(any, |i| terminating(i));
ret Line::NonEmpty(line)
}
)
}
/* task separator */
fn task_seperators(input: Input<u8>) -> U8Result<LineToken> {
parse!{input;
parse_task_separator("-".as_bytes()) <|>
parse_task_separator("=".as_bytes()) <|>
parse_task_separator("_".as_bytes()) <|>
// TODO: necessary?
parse_task_separator("#".as_bytes()) <|>
parse_task_separator("/".as_bytes()) <|>
parse_task_separator(":".as_bytes()) <|>
parse_task_separator("~".as_bytes()) <|>
parse_task_separator("*".as_bytes());
ret {
LineToken::TaskSeparator
}
}
}
fn parse_task_separator<'a>(input: Input<'a, u8>, token: &[u8])
-> SimpleResult<'a, u8, ()> {
parse!{input;
match_four_tokens(token);
skip_many(|i| string(i, token));
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret ()
}
}
/* comments */
fn comments_one_line(i: Input<u8>) -> U8Result<()> {
parse!{i;
or(
|i| string(i, "//".as_bytes()),
|i| or(i,
|i| string(i, "#".as_bytes()),
|i| string(i, ";".as_bytes())
)
);
let line: Vec<u8> = many_till(|i| any(i), |i| terminating(i));
ret ()
}
}
fn comments_block(i: Input<u8>) -> U8Result<()> {
parse!{i;
string("/*".as_bytes());
let line: Vec<u8> = many_till(|i| any(i), |i| string(i, "*/".as_bytes()));
ret ()
}
}
/* delimited list parser */
fn string_list(input: Input<u8>, delim: u8) -> U8Result<Vec<String>> {
parse!{input;
skip_many(|i| space_or_tab(i));
// TODO: custom delimeter option
let raw_list = delim_sep_list(delim);
let last_item: Vec<u8> = many_till(|i| any(i), |i| terminating(i));
ret {
let mut new_list: Vec<String> = Vec::new();
for item in &raw_list {
let item: String = format!("{}", String::from_utf8_lossy(item.as_slice()).trim());
if item.len() > 0 {
new_list.push(item);
}
}
let last_item: String = format!("{}", String::from_utf8_lossy(last_item.as_slice()).trim());
if last_item.len() > 0 {
new_list.push(last_item);
}
new_list
}
}
}
fn delim_sep_list(i: Input<u8>, delim: u8) -> U8Result<Vec<Vec<u8>>> {
parse!{i;
skip_many(|i| token(i, delim));
let list: Vec<Vec<u8>> = many(|i| delim_sep_item(i, delim));
ret list
}
}
fn delim_sep_item(i: Input<u8>, delim: u8) -> U8Result<Vec<u8>> {
parse!{i;
skip_many(|i| token(i, delim));
let item: Vec<u8> = many_till(|i| non_terminating(i), |i| token(i, delim));
skip_many(|i| token(i, delim));
ret item
}
}
/* misc parsers */
fn bool_option_parser(i: Input<u8>) -> U8Result<bool> {
or(i,
|i| parse!{i;
string_ignore_case("yes".as_bytes()) <|>
string_ignore_case("true".as_bytes());
ret true
},
|i| parse!{i;
string_ignore_case("no".as_bytes()) <|>
string_ignore_case("false".as_bytes());
ret false
}
)
}
fn match_four_tokens<'a>(input: Input<'a, u8>, token: &[u8])
-> SimpleResult<'a, u8, ()> {
parse!{input;
string(token);
string(token);
string(token);
string(token);
ret ()
}
}
fn whitespace(i: Input<u8>) -> U8Result<()> {
parse!{i;
satisfy(|c| is_whitespace(c));
ret ()
}
}
fn space_or_tab(input: Input<u8>) -> U8Result<()> {
parse!{input;
or(
|i| token(i, b' '),
|i| token(i, b'\t')
);
ret ()
}
}
fn non_terminating(i: Input<u8>) -> U8Result<u8> {
or(i,
|i| parse!{i;
terminating();
ret None
},
|i| parse!{i;
let something = any();
ret Some(something)
}
)
.bind(|i, above: Option<u8>| {
match above {
None => {
return i.incomplete(1);
},
Some(c) => {
return i.ret(c);
}
}
})
}
// match eof or various eol
fn terminating(i: Input<u8>) -> U8Result<()> {
or(i,
|i| parse!{i;
end_of_line();
ret ()
},
// NOTE: eof should be matched last
|i| eof(i)
)
}
// Source: https://en.wikipedia.org/wiki/Newline#Unicode
fn end_of_line(i: Input<u8>) -> U8Result<&[u8]> {
// TODO: bother to refactor using parse! macro with <|> operator?
or(i,
|i| parse!{i;
token(b'\r');
token(b'\n');
ret "\r\n".as_bytes()
},
|i| or(i,
|i| parse!{i;
token(b'\n');
ret "\n".as_bytes()
},
|i| or(i,
|i| parse!{i;
token(b'\r');
ret "\r".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{2028}".as_bytes());
ret "\u{2028}".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{2029}".as_bytes());
ret "\u{2029}".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{000B}".as_bytes());
ret "\u{000B}".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{000C}".as_bytes());
ret "\u{000C}".as_bytes()
},
|i| parse!{i;
string("\u{0085}".as_bytes());
ret "\u{0085}".as_bytes()
}
)
)
)
)
)
)
)
}
/* time range parsers */
fn multiple_time_range(i: Input<u8>) -> U8Result<u64> {
parse!{i;
let time: Vec<u64> = many1(|i| parse!{
i;
skip_many(|i| space_or_tab(i));
let range = time_range();
ret range
});
ret {
let time = time.iter().fold(0, |mut sum, &val| {sum += val; sum});
time
}
}
}
fn time_range(i: Input<u8>) -> U8Result<u64> {
parse!{i;
let range: u64 = decimal();
skip_many(|i| space_or_tab(i));
let multiplier = time_range_unit_minutes() <|>
time_range_unit_hours() <|>
time_range_unit_days() <|>
time_range_unit_seconds();
ret {
range * multiplier
}
}
}
fn time_range_unit_seconds(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("seconds".as_bytes()) <|>
string_ignore_case("second".as_bytes()) <|>
string_ignore_case("secs".as_bytes()) <|>
string_ignore_case("sec".as_bytes()) <|>
string_ignore_case("s".as_bytes());
ret 1
}
}
fn time_range_unit_minutes(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("minutes".as_bytes()) <|>
string_ignore_case("minute".as_bytes()) <|>
string_ignore_case("mins".as_bytes()) <|>
string_ignore_case("min".as_bytes()) <|>
string_ignore_case("m".as_bytes());
// 60 seconds in a minute
ret 60
}
}
fn time_range_unit_hours(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("hours".as_bytes()) <|>
string_ignore_case("hour".as_bytes()) <|>
string_ignore_case("hrs".as_bytes()) <|>
string_ignore_case("hr".as_bytes()) <|>
string_ignore_case("h".as_bytes());
// 3600 seconds in an hour
ret 3600
}
}
fn time_range_unit_days(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("days".as_bytes()) <|>
string_ignore_case("day".as_bytes()) <|>
string_ignore_case("dys".as_bytes()) <|>
string_ignore_case("dy".as_bytes()) <|>
string_ignore_case("d".as_bytes());
// 86400 seconds in a day
ret 86400
}
}
/* datetime parsers */
enum Meridiem {
AM,
PM
}
struct Time {
// 24-hour format.
// range from 0 to 23
hour: u32,
minute: u32
}
struct ParsedDate {
// between 1 and 31
day: u32,
// between 1 and 12
month: u32,
// at least 1
year: i32
}
struct ParsedDateTime {
time: Time,
date: ParsedDate
}
fn parse_datetime(i: Input<u8>, end_of_day: bool) -> U8Result<NaiveDateTime> {
or(i,
|i| parse!{i;
let time = parse_time();
skip_many1(|i| space_or_tab(i));
let date = parse_date();
ret ParsedDateTime {
time: time,
date: date
}
},
|i| or(i,
|i| parse!{i;
let date = parse_date();
skip_many1(|i| space_or_tab(i));
let time = parse_time();
ret ParsedDateTime {
time: time,
date: date
}
},
|i| parse!{i;
let date = parse_date();
ret {
if end_of_day {
ParsedDateTime {
date: date,
time: Time {
hour: 23,
minute: 59
}
}
} else {
ParsedDateTime {
date: date,
time: Time {
hour: 0,
minute: 0
}
}
}
}
}
)
)
.bind(|i, above: ParsedDateTime| {
let date = NaiveDate::from_ymd(above.date.year, above.date.month, above.date.day);
let time = NaiveTime::from_hms(above.time.hour, above.time.minute, 0);
let date_time = NaiveDateTime::new(date, time);
i.ret(date_time)
})
}
fn parse_date(i: Input<u8>) -> U8Result<ParsedDate> {
parse!{i;
let month = parse_months();
skip_many1(|i| space_or_tab(i));
let day = parse_day();
or(
|i| parse!{i;
skip_many(|i| space_or_tab(i));
token(b',');
skip_many(|i| space_or_tab(i));
ret ()
},
|i| parse!{i;
skip_many1(|i| space_or_tab(i));
ret ()
}
);
let year = parse_year();
ret ParsedDate {
month: month,
day: day,
year: year
}
}
}
// 5pm
// 5:00pm
// 17:00
fn parse_time(i: Input<u8>) -> U8Result<Time> {
parse!{i;
let time = simple_time() <|>
parse_12_hour_clock() <|>
parse_24_hour_clock();
ret time
}
}
fn simple_time(i: Input<u8>) -> U8Result<Time> {
parse!{i;
let hour = parse_12_hour();
skip_many(|i| space_or_tab(i));
let ampm: Meridiem = parse_am_pm();
ret {
let mut hour: u32 = hour;
match ampm {
Meridiem::AM => {
if hour == 12 {
hour = 0;
}
},
Meridiem::PM => {
if hour != 12 {
// 1 to 11
hour = hour + 12;
}
}
};
Time {
hour: hour,
minute: 0
}
}
}
}
fn parse_12_hour_clock(i: Input<u8>) -> U8Result<Time> {
parse!{i;
let hour = parse_12_hour();
token(b':');
let minute = parse_minute();
skip_many(|i| space_or_tab(i));
let ampm: Meridiem = parse_am_pm();
ret {
let mut hour: u32 = hour;
match ampm {
Meridiem::AM => {
if hour == 12 {
hour = 0;
}
},
Meridiem::PM => {
if hour != 12 {
// 1 to 11
hour = hour + 12;
}
}
};
Time {
hour: hour,
minute: minute
}
}
}
}
fn parse_am_pm(i: Input<u8>) -> U8Result<Meridiem> {
or(i,
|i| parse!{i;
string_ignore_case("pm".as_bytes());
ret Meridiem::PM;
},
|i| parse!{i;
string_ignore_case("am".as_bytes());
ret Meridiem::AM;
}
)
}
fn parse_24_hour_clock(i: Input<u8>) -> U8Result<Time> {
or(i,
|i| parse!{i;
let hour: u32 = parse_24_hour();
token(b':');
let minute: u32 = parse_minute();
ret Time {
hour: hour,
minute: minute
}
},
|i| military_time(i)
)
}
fn military_time(i: Input<u8>) -> U8Result<Time> {
// TODO: refactor; haha...
or(i,
|i| parse!{i;
let hour_2: u8 = digit();
let hour_1: u8 = digit();
let min_2: u8 = digit();
let min_1: u8 = digit();
ret {
let hour_2: u32 = hour_2 as u32 - 48;
let hour_1: u32 = hour_1 as u32 - 48;
let hour = hour_2 * 10 + hour_1;
let min_2: u32 = min_2 as u32 - 48;
let min_1: u32 = min_1 as u32 - 48;
let min = min_2 * 10 + min_1;
Time {
hour: hour,
minute: min
}
}
},
|i| parse!{i;
let hour_1: u8 = digit();
let min_2: u8 = digit();
let min_1: u8 = digit();
ret {
let hour_1: u32 = hour_1 as u32 - 48;
let hour = hour_1;
let min_2: u32 = min_2 as u32 - 48;
let min_1: u32 = min_1 as u32 - 48;
let min = min_2 * 10 + min_1;
Time {
hour: hour,
minute: min
}
}
}
)
.bind(|i, above:Time| {
if 0 <= above.hour && above.hour <= 23 && 0 <= above.minute && above.minute <= 59 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_24_hour(i: Input<u8>) -> U8Result<u32> {
up_to_two_digits(i)
.bind(|i, above:u32| {
if 0 <= above && above <= 23 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_12_hour(i: Input<u8>) -> U8Result<u32> {
up_to_two_digits(i)
.bind(|i, above:u32| {
if 1 <= above && above <= 12 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_minute(i: Input<u8>) -> U8Result<u32> {
two_digits(i)
.bind(|i, above:u32| {
if 0 <= above && above <= 59 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_year(i: Input<u8>) -> U8Result<i32> {
decimal::<u32>(i)
.bind(|i, above:u32| {
if above <= 0 {
// TODO: right usize?
return i.incomplete(1);
}
i.ret(above as i32)
})
}
fn parse_day(i: Input<u8>) -> U8Result<u32> {
up_to_two_digits(i)
.bind(|i, above:u32| {
if above <= 0 || above >= 32 {
// TODO: right usize?
return i.incomplete(1);
}
i.ret(above)
})
}
fn parse_months(i: Input<u8>) -> U8Result<u32> {
parse!{i;
let month: u32 =
resolve_month("january", 1) <|>
resolve_month("jan", 1) <|>
resolve_month("february", 2) <|>
resolve_month("feb", 2) <|>
resolve_month("march", 3) <|>
resolve_month("mar", 3) <|>
resolve_month("april", 4) <|>
resolve_month("apr", 4) <|>
resolve_month("may", 5) <|>
resolve_month("june", 6) <|>
resolve_month("jun", 6) <|>
resolve_month("july", 7) <|>
resolve_month("jul", 7) <|>
resolve_month("august", 8) <|>
resolve_month("aug", 8) <|>
resolve_month("september", 9) <|>
resolve_month("sept", 9) <|>
resolve_month("sep", 9) <|>
resolve_month("october", 10) <|>
resolve_month("oct", 10) <|>
resolve_month("november", 11) <|>
resolve_month("nov", 11) <|>
resolve_month("december", 12) <|>
resolve_month("dec", 12);
ret month;
}
}
fn resolve_month<'a>(i: Input<'a, u8>, month: &str, ret_val: u32) -> SimpleResult<'a, u8, u32> {
parse!{i;
string_ignore_case(month.as_bytes());
ret ret_val
}
}
fn up_to_two_digits(i: Input<u8>) -> U8Result<u32> {
or(i,
|i| parse!{i;
let first_digit: u8 = digit();
let second_digit: u8 = digit();
ret {
let first_digit: u32 = first_digit as u32 - 48;
let second_digit: u32 = second_digit as u32 - 48;
let resolved: u32 = first_digit * 10 + second_digit;
resolved
}
},
|i| parse!{i;
let first_digit: u8 = digit();
ret {
let resolved: u32 = first_digit as u32 - 48;
resolved
}
}
)
}
fn two_digits(i: Input<u8>) -> U8Result<u32> {
parse!{i;
let first_digit: u8 = digit();
let second_digit: u8 = digit();
ret {
let first_digit: u32 = first_digit as u32 - 48;
let second_digit: u32 = second_digit as u32 - 48;
let resolved: u32 = first_digit * 10 + second_digit;
resolved
}
}
}
fn signed_decimal(input: Input<u8>) -> U8Result<i64> {
parse!{input;
let sign: i64 = or(
|i| parse!{i;
token(b'-');
ret -1
},
|i| parse!{i;
token(b'+');
ret 1
}
);
let num: i64 = decimal();
ret {
sign * num
}
}
}
fn string_ignore_case<'a>(i: Input<'a, u8>, s: &[u8])
-> SimpleResult<'a, u8, &'a [u8]> {
let b = i.buffer();
if s.len() > b.len() {
return i.incomplete(s.len() - b.len());
}
let d = &b[..s.len()];
for j in 0..s.len() {
if !(s[j]).eq_ignore_ascii_case(&(d[j])) {
return i.replace(&b[j..]).err(Error::expected(d[j]))
}
}
i.replace(&b[s.len()..]).ret(d)
}
/* helpers */
fn to_task_id(len: usize) -> i32 {
len as i32
}
enum RelativeTime {
Future(i64, String),
Now(i64, String),
Past(i64, String)
}
// src: http://stackoverflow.com/a/6109105/412627
fn relative_time(from: i64, to: i64) -> RelativeTime {
let elapsed_num: u64 = (to - from).abs() as u64;
let range = Timerange::new(elapsed_num).print(2);
let elapsed_num = elapsed_num as i64;
if to > from {
return RelativeTime::Past(elapsed_num, format!("{} ago", range));
} else if to == from {
return RelativeTime::Now(elapsed_num, format!("{} ago", range));
} else {
return RelativeTime::Future(elapsed_num, format!("{} into the future", range));
}
}
struct Timerange {
range: u64
}
impl Timerange {
fn new(range: u64) -> Timerange {
Timerange {
range: range
}
}
fn floor_time_unit(&self) -> (u64, u64, String) {
let sec_per_minute: f64 = 60f64;
let sec_per_hour: f64 = sec_per_minute * 60f64;
let sec_per_day: f64 = sec_per_hour * 24f64;
let sec_per_month: f64 = sec_per_day * 30f64;
let sec_per_year: f64 = sec_per_day * 365f64;
let mut elapsed = self.range as f64;
let mut remainder: f64 = 0f64;
let unit;
if elapsed < sec_per_minute {
unit = "second";
} else if elapsed < sec_per_hour {
remainder = elapsed % sec_per_minute;
elapsed = (elapsed / sec_per_minute).floor();
unit = "minute"
} else if elapsed < sec_per_day {
remainder = elapsed % sec_per_hour;
elapsed = (elapsed / sec_per_hour).floor();
unit = "hour"
} else if elapsed < sec_per_month {
remainder = elapsed % sec_per_day;
elapsed = (elapsed / sec_per_day).floor();
unit = "day"
} else if elapsed < sec_per_year {
remainder = elapsed % sec_per_month;
elapsed = (elapsed / sec_per_month).floor();
unit = "month"
} else {
remainder = elapsed % sec_per_year;
elapsed = (elapsed / sec_per_year).floor();
unit = "year"
}
// pluralize
let unit = if elapsed <= 1f64 {
format!("{}", unit)
} else {
format!("{}s", unit)
};
let elapsed = elapsed as u64;
let remainder = remainder as u64;
return (elapsed, remainder, unit);
}
fn print(&self, depth: u32) -> String {
let (elapsed, remainder, unit) = self.floor_time_unit();
if remainder <= 0 || depth <= 1 {
return format!("{} {}", elapsed, unit);
}
let pretty_remainder = Timerange::new(remainder).print(depth - 1);
if remainder < 60 || depth <= 2 {
return format!("{} {} and {}", elapsed, unit, pretty_remainder);
}
return format!("{} {} {}", elapsed, unit, pretty_remainder);
}
}
// TODO: refactor
fn traverse(path: &mut [String], tree: &mut Tree) {
if path.len() <= 0 {
return;
}
match path.split_first_mut() {
None => unsafe { debug_unreachable!() },
Some((first, rest)) => {
if !tree.contains_key(first) {
tree.insert(first.clone(), NodeType::Leaf);
}
if rest.len() <= 0 {
return;
} else {
let should_replace: bool = match tree.get_mut(first) {
None => unsafe { debug_unreachable!("add_project_filter: NodeType not found") },
Some(node_type) => {
match node_type {
&mut NodeType::Leaf => {
true
},
&mut NodeType::Node(_) => {
false
}
}
}
};
if should_replace {
let mut new_tree: Tree = HashMap::new();
{
let _new_tree = &mut new_tree;
traverse(rest, _new_tree);
};
tree.insert(first.clone(), NodeType::Node(new_tree));
}
}
}
}
}
fn subpath_exists_in_tree(tree: &Tree, path: &Vec<String>) -> bool {
let mut current = tree;
for path_item in path {
if !current.contains_key(path_item) {
return true;
}
match current.get(path_item) {
None => {
return true;
},
Some(node_type) => {
match node_type {
&NodeType::Leaf => {
// path is super path
return false;
},
&NodeType::Node(ref tree) => {
current = tree;
}
}
}
};
}
return false;
}
/*
Adapted from: https://gist.github.com/m4rw3r/1f43559dcd73bf46e845
Thanks to github.com/m4rw3r for wrapping parsers for line number tracking!
*/
pub trait NumberingType {
type Token;
type Position;
fn update(&mut self, &[Self::Token]);
fn position(&self) -> Self::Position;
}
#[derive(Debug)]
pub struct LineNumber(u64);
// Semantics: count number of newlines
impl LineNumber {
pub fn new() -> Self { LineNumber(0) }
}
impl NumberingType for LineNumber {
type Token = u8;
type Position = u64;
fn update(&mut self, b: &[Self::Token]) {
self.0 = self.0 + b.iter().filter(|&&c| c == b'\n').count() as u64;
}
fn position(&self) -> Self::Position {
self.0
}
}
#[derive(Debug)]
pub struct Numbering<'i, T, P, R, E>
where T: NumberingType,
P: FnMut(Input<'i, T::Token>) -> ParseResult<'i, T::Token, R, E>,
R: 'i,
E: 'i,
<T as NumberingType>::Token: 'i {
parser: P,
numbering: T,
_re: PhantomData<&'i (R, E)>,
}
impl<'i, N, P, R, E> Numbering<'i, N, P, R, E>
where N: NumberingType,
P: FnMut(Input<'i, N::Token>) -> ParseResult<'i, N::Token, R, E>,
R: 'i,
E: 'i,
<N as NumberingType>::Position: std::fmt::Debug,
<N as NumberingType>::Token: 'i {
pub fn new(n: N, p: P) -> Self {
Numbering {
parser: p,
numbering: n,
_re: PhantomData,
}
}
pub fn parse(&mut self, i: Input<'i, N::Token>) -> ParseResult<'i, N::Token, (N::Position, R), E> {
use chomp::primitives::InputBuffer;
use chomp::primitives::InputClone;
use chomp::primitives::IntoInner;
use chomp::primitives::State;
let buf = i.clone();
match (self.parser)(i.clone()).into_inner() {
State::Data(remainder, t) => {
self.numbering.update(&buf.buffer()[..buf.buffer().len() - remainder.buffer().len()]);
let pos = self.numbering.position();
remainder.ret((pos, t))
},
State::Error(remainder, e) => {
self.numbering.update(&buf.buffer()[..buf.buffer().len() - remainder.len()]);
buf.replace(remainder).err(e)
},
State::Incomplete(n) => buf.incomplete(n)
}
}
}
// Source: https://gist.github.com/dashed/9d18b7e4cc351a7feabc89897a58baff
#[test]
fn line_numbering() {
use chomp::take;
use std::cell::Cell;
use chomp::buffer::{IntoStream, Stream, StreamError};
let mut data = b"abc\nc\n\ndef".into_stream();
// Just some state to make sure we are called the correct number of times:
let i = Cell::new(0);
let p = |d| {
i.set(i.get() + 1);
take(d, 2)
};
let mut n = Numbering::new(LineNumber::new(), p);
// If we could implement FnMut for Numbering then we would be good, but we need to wrap now:
let mut m = |i| n.parse(i);
assert_eq!(data.parse(&mut m), Ok((0, &b"ab"[..])));
assert_eq!(i.get(), 1);
assert_eq!(data.parse(&mut m), Ok((1, &b"c\n"[..])));
assert_eq!(i.get(), 2);
assert_eq!(data.parse(&mut m), Ok((2, &b"c\n"[..])));
assert_eq!(i.get(), 3);
assert_eq!(data.parse(&mut m), Ok((3, &b"\nd"[..])));
assert_eq!(i.get(), 4);
assert_eq!(data.parse(&mut m), Ok((3, &b"ef"[..])));
assert_eq!(i.get(), 5);
assert_eq!(data.parse(&mut m), Err(StreamError::EndOfInput));
assert_eq!(i.get(), 5);
}
--filter-by-project to --only-with-project
#![recursion_limit="100"]
#[macro_use]
extern crate version;
#[macro_use]
extern crate debug_unreachable;
#[macro_use]
extern crate chomp;
extern crate chrono;
extern crate colored;
extern crate clap;
// TODO: clean up imports
use std::path::{Path, PathBuf};
use std::fs::{File, Metadata};
use std::collections::{HashMap, HashSet, BTreeMap};
use std::ascii::{AsciiExt};
use std::env;
use std::process;
use std::marker::PhantomData;
use colored::*;
use clap::{Arg, App, SubCommand};
// use chrono::*;
use chrono::offset::utc::UTC;
use chrono::offset::{TimeZone, Offset};
use chrono::offset::local::Local;
use chrono::datetime::DateTime;
use chrono::naive::datetime::NaiveDateTime;
use chrono::naive::date::NaiveDate;
use chrono::naive::time::NaiveTime;
use chrono::duration::Duration;
// TODO: reorg this
use chomp::{SimpleResult, Error, ParseError, ParseResult};
use chomp::primitives::{InputBuffer, IntoInner, State};
use chomp::{Input, U8Result, parse_only};
use chomp::buffer::{Source, Stream, StreamError};
use chomp::{take_while1, token};
use chomp::parsers::{string, eof, any, satisfy};
use chomp::combinators::{or, many_till, many, many1, skip_many, skip_many1, look_ahead, option, sep_by};
use chomp::ascii::{is_whitespace, decimal, digit};
// use chomp::*;
fn main() {
let version: &str = &format!("v{} (semver.org)", version!());
let cmd_matches = App::new("gtdtxt")
.version(version) // semver semantics
.about("CLI app to parse a human-readable text file for managing GTD workflow")
.author("Alberto Leal <mailforalberto@gmail.com> (github.com/dashed)")
.arg(
Arg::with_name("due-within")
.next_line_help(true)
.help("Display tasks due within a time duration.{n}Example: 2 days 4 hrs")
.short("w")
.long("due-within")
.required(false)
.takes_value(true)
.multiple(false)
)
.arg(
Arg::with_name("hide-by-default")
.help("Hide tasks by default. Usage of flags / options are necessary to display tasks.")
.short("x")
.long("hide-by-default")
.required(false)
)
.arg(
Arg::with_name("show-overdue")
.help("Show overdue tasks. Used with --hide-by-default")
.short("a")
.long("show-overdue")
.required(false)
)
.arg(
Arg::with_name("show-incomplete")
.help("Show incomplete tasks. Used with --hide-by-default")
.short("b")
.long("show-incomplete")
.required(false)
)
.arg(
Arg::with_name("show-flagged")
.help("Show flagged tasks. Used with --hide-by-default")
.short("e")
.long("show-flagged")
.required(false)
)
.arg(
Arg::with_name("show-nonproject-tasks")
.help("Show tasks that are not in a project. Used with --hide-by-default")
.short("g")
.long("show-nonproject-tasks")
.required(false)
)
.arg(
Arg::with_name("show-project-tasks")
.help("Show tasks that are not in a project. Used with --hide-by-default")
.short("j")
.long("show-project-tasks")
.required(false)
)
.arg(
Arg::with_name("hide-overdue")
.help("Hide overdue tasks.")
.short("o")
.long("hide-overdue")
.required(false)
)
.arg(
Arg::with_name("show-done")
.help("Show completed tasks.")
.short("d")
.long("show-done")
.required(false)
)
.arg(
Arg::with_name("show-deferred")
.help("Reveal deferred tasks.")
.short("r")
.long("show-deferred")
.required(false)
)
.arg(
Arg::with_name("show-incubate")
.help("Show incubated tasks.")
.short("i")
.long("show-incubate")
.required(false)
)
.arg(
Arg::with_name("hide-incomplete")
.help("Hide incomplete tasks.")
.short("I")
.long("hide-incomplete")
.required(false)
)
.arg(
Arg::with_name("validate")
.help("Validate file and suppress any output.")
.short("q")
.long("validate")
.required(false)
)
.arg(
Arg::with_name("hide-nonproject-tasks")
.help("Hide tasks not belonging to a project.")
.short("n")
.long("hide-nonproject-tasks")
.required(false)
)
.arg(
Arg::with_name("show-only-flagged")
.help("Show only flagged tasks.")
.short("f")
.long("show-only-flagged")
.required(false)
)
.arg(
Arg::with_name("hide-flagged")
.help("Hide flagged tasks.")
.short("F")
.long("hide-flagged")
.required(false)
)
.arg(
Arg::with_name("sort-overdue-by-priority")
.help("Sort overdue tasks by priority.")
.short("z")
.long("sort-overdue-by-priority")
.required(false)
)
.arg(
Arg::with_name("only-with-project")
.next_line_help(true)
.help("Show only tasks with given project path.{n}\
Example: path / to / project")
.short("p")
.long("only-with-project")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|path| {
let path = path.trim();
if path.len() <= 0 {
return Err(String::from("invalid project path"));
}
return Ok(());
})
)
.arg(
Arg::with_name("show-with-project")
.next_line_help(true)
.help("Show tasks with given project path.\
Used with --hide-by-default{n}\
Example: path / to / project")
.short("k")
.long("show-with-project")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|path| {
let path = path.trim();
if path.len() <= 0 {
return Err(String::from("invalid project path"));
}
return Ok(());
})
)
.arg(
Arg::with_name("filter-by-tag")
.next_line_help(true)
.help("Filter using given tag or list of comma separated tags.{n}Example: chore, art, to watch")
.short("t")
.long("filter-by-tag")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|tag| {
let tag = tag.trim();
if tag.len() <= 0 {
return Err(String::from("invalid tag"));
}
return Ok(());
})
)
.arg(
Arg::with_name("filter-by-context")
.next_line_help(true)
.help("Filter using given context or list of comma separated contexts.{n}Example: phone, computer, internet connection, office")
.short("c")
.long("filter-by-context")
.required(false)
.takes_value(true)
.multiple(true)
.validator(|context| {
let context = context.trim();
if context.len() <= 0 {
return Err(String::from("invalid context"));
}
return Ok(());
})
)
.arg(
Arg::with_name("path to gtdtxt file")
.help("Path to gtdtxt file.")
.required(true)
.index(1)
.validator(|gtdtxt_file| {
let gtdtxt_file = gtdtxt_file.trim();
if gtdtxt_file.len() <= 0 {
return Err(String::from("invalid path to file"));
} else {
return Ok(());
}
})
).get_matches();
let path_to_file: String = cmd_matches.value_of("path to gtdtxt file")
.unwrap()
.trim()
.to_string();
let base_root = format!("{}", env::current_dir().unwrap().display());
let mut journal = GTD::new(base_root);
// due within filter
if let Some(due_within) = cmd_matches.value_of("due-within") {
let due_within = due_within.trim();
match parse_only(|i| multiple_time_range(i), due_within.as_bytes()) {
Ok(mut result) => {
journal.due_within = Duration::seconds(result as i64);
},
Err(e) => {
println!("Unable to parse value to option `--due-within`: {}", due_within);
process::exit(1);
// TODO: refactor
// panic!("{:?}", e);
}
}
}
// project path filters
if let Some(project_paths) = cmd_matches.values_of("only-with-project") {
for project_path in project_paths {
match parse_only(|i| string_list(i, b'/'), project_path.as_bytes()) {
Ok(mut result) => {
journal.add_project_only_filter(&mut result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
if let Some(project_paths) = cmd_matches.values_of("show-with-project") {
for project_path in project_paths {
match parse_only(|i| string_list(i, b'/'), project_path.as_bytes()) {
Ok(mut result) => {
journal.add_project_whitelist(&mut result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
// tag filters
if let Some(tags) = cmd_matches.values_of("filter-by-tag") {
for tag in tags {
match parse_only(|i| string_list(i, b','), tag.as_bytes()) {
Ok(mut result) => {
if result.len() > 0 {
journal.filter_by_tags = true;
}
journal.add_tag_filters(result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
// context filters
if let Some(contexts) = cmd_matches.values_of("filter-by-context") {
for context in contexts {
match parse_only(|i| string_list(i, b','), context.as_bytes()) {
Ok(mut result) => {
if result.len() > 0 {
journal.filter_by_contexts = true;
}
journal.add_context_filters(result);
},
Err(e) => {
// TODO: refactor
panic!("{:?}", e);
}
}
}
}
// flags
journal.sort_overdue_by_priority = cmd_matches.is_present("sort-overdue-by-priority");
journal.hide_flagged = cmd_matches.is_present("hide-flagged");
journal.show_only_flagged = cmd_matches.is_present("show-only-flagged");
journal.show_done = cmd_matches.is_present("show-done");
journal.show_incubate = cmd_matches.is_present("show-incubate");
journal.show_deferred = cmd_matches.is_present("show-deferred");
journal.hide_overdue = cmd_matches.is_present("hide-overdue");
journal.hide_nonproject_tasks = cmd_matches.is_present("hide-nonproject-tasks");
journal.hide_incomplete = cmd_matches.is_present("hide-incomplete");
journal.hide_tasks_by_default = cmd_matches.is_present("hide-by-default");
journal.show_overdue = cmd_matches.is_present("show-overdue");
journal.show_incomplete = cmd_matches.is_present("show-incomplete");
journal.show_flagged = cmd_matches.is_present("show-flagged");
journal.show_nonproject_tasks = cmd_matches.is_present("show-nonproject-tasks");
journal.show_project_tasks = cmd_matches.is_present("show-project-tasks");
parse_file(None, path_to_file.clone(), &mut journal);
let journal: GTD = journal;
if cmd_matches.is_present("validate") {
println!("{:>20} {}", "Tasks found".purple(), format!("{}", journal.tasks.len()).bold().purple());
println!("File validated.");
return;
}
// Display tasks
let mut display_divider = false;
if journal.due_within.num_seconds() > 0 {
println!("{:>11} {} {}",
"",
"Displaying tasks due within".bold().white(),
Timerange::new(journal.due_within.num_seconds() as u64).print(10).white().bold()
);
display_divider = true;
}
if journal.show_only_flagged {
println!("{:>11} {}",
"",
"Displaying only flagged tasks.".bold().white()
);
display_divider = true;
} else if journal.hide_flagged {
println!("{:>11} {}",
"",
"Hiding flagged tasks.".bold().white()
);
display_divider = true;
}
if display_divider {
println!("");
}
let mut print_line: bool = false;
let mut num_displayed: u32 = 0;
let mut num_overdue = 0;
let mut num_deferred = 0;
let mut num_done = 0;
// display tasks that are overdue
for (_, bucket) in journal.overdue.iter() {
if bucket.len() <= 0 {
continue;
}
num_overdue += bucket.len();
if !journal.hide_overdue {
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, bucket);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
}
// display inbox ordered by priority.
// incubated tasks are not included
for (_, inbox) in journal.inbox.iter() {
if inbox.len() <= 0 {
continue;
}
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, inbox);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
// display deferred tasks ordered by priority
for (_, deferred) in journal.deferred.iter() {
if deferred.len() <= 0 {
continue;
}
num_deferred += deferred.len();
if journal.show_deferred || journal.hide_tasks_by_default {
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, deferred);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
}
// display completed tasks
for (_, done) in journal.done.iter() {
if done.len() <= 0 {
continue;
}
num_done += done.len();
if journal.show_done || journal.hide_tasks_by_default {
if print_line {
println!("");
}
num_displayed = num_displayed + print_vector_of_tasks(&journal, done);
if !print_line && num_displayed > 0 {
print_line = true;
}
}
}
if num_displayed > 0 {
println!("");
}
println!(" {}",
"Tasks completed in the past week (tracked using `done:`)".purple().bold()
);
let mut days_ago = 0;
loop {
print!("{:>11} {}",
format!("{} {}", days_ago, "days ago").purple(),
format!("|").purple()
);
if days_ago >= 7 {
break;
}
days_ago = days_ago + 1;
}
println!("");
let mut days_ago = 0;
loop {
let items_num = match journal.pulse.get(&days_ago) {
None => 0,
Some(bucket) => {
(*bucket).len()
}
};
print!("{:>11} {}",
format!("{}", items_num).bold().purple(),
format!("|").purple()
);
if days_ago >= 7 {
break;
}
days_ago = days_ago + 1;
}
println!("");
println!("");
println!("{:>20} {}",
"Tasks overdue".purple(),
format!("{}", num_overdue).bold().purple()
);
println!("{:>20} {}",
"Tasks deferred".purple(),
format!("{}", num_deferred).bold().purple()
);
println!("{:>20} {}",
"Tasks complete".purple(),
format!("{}", num_done).bold().purple()
);
println!("{:>20} {}",
"Tasks found".purple(),
format!("{}", journal.tasks.len()).bold().purple()
);
println!("{:>20} {}",
"Tasks not displayed".purple(),
format!("{}", journal.tasks.len() as u32 - num_displayed).bold().purple()
);
println!("{:>20} {}",
"Tasks displayed".purple(),
format!("{}", num_displayed).bold().purple()
);
println!("{:>20} {}",
"Executed at".purple(),
format!("{}", Local::now().naive_local().format("%B %-d, %Y %-l:%M:%S %p")).purple()
);
}
/* printers */
fn print_vector_of_tasks(journal: >D, inbox: &Vec<i32>) -> u32 {
let mut print_line: bool = false;
let mut num_displayed: u32 = 0;
for task_id in inbox {
if print_line {
println!("");
}
let task: &Task = journal.tasks.get(task_id).unwrap();
print_task(journal, task);
num_displayed = num_displayed + 1;
if !print_line {
print_line = true;
}
}
num_displayed
}
fn print_task(journal: >D, task: &Task) {
if task.flag && !journal.show_only_flagged {
println!("{:>11} ",
"Flagged".bold().yellow()
);
}
match task.title {
None => {
println!("Missing task title (i.e. `task: <title>`) in task block found {}",
task.debug_range_string()
);
process::exit(1);
},
Some(ref title) => {
println!("{:>11} {}", "Task:".blue().bold(), title);
}
}
match task.status {
None => {},
Some(ref status) => {
let status_string = match status {
&Status::Done => {
"Done".green()
},
&Status::NotDone => {
"Not Done".red().bold()
},
&Status::Incubate => {
"Incubate".purple()
}
};
println!("{:>11} {}", "Status:".bold().blue(), status_string);
}
}
match task.created_at {
None => {},
Some(ref created_at) => {
let rel_time = relative_time(created_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Past(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Future(_, rel_time) => {
format!("({})", rel_time)
}
};
println!("{:>11} {} {}",
"Added at:".bold().blue(),
created_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
match task.done_at {
None => {},
Some(ref done_at) => {
let rel_time = relative_time(done_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Past(_, rel_time) => {
format!("({})", rel_time)
},
RelativeTime::Future(_, rel_time) => {
format!("({})", rel_time)
}
};
println!("{:>11} {} {}",
"Done at:".bold().blue(),
done_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
match task.defer {
None => {},
Some(ref defer) => {
match defer {
&Defer::Forever => {
println!("{:>11} {}",
"Defer till:".bold().blue(),
"Forever".bold().green()
);
},
&Defer::Until(defer_till) => {
let rel_time = relative_time(defer_till.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.red()
},
RelativeTime::Past(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().red()
},
RelativeTime::Future(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().green()
}
};
println!("{:>11} {} {}",
"Defer till:".bold().blue(),
defer_till.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
}
}
match task.due_at {
None => {},
Some(ref due_at) => {
let rel_time = relative_time(due_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.red()
},
RelativeTime::Past(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().red()
},
RelativeTime::Future(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().green()
}
};
println!("{:>11} {} {}",
"Due at:".bold().blue(),
due_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
}
match task.source_file {
None => unsafe { debug_unreachable!() },
Some(ref path) => {
let path = match Path::new(path).strip_prefix(&journal.base_root) {
Err(_) => {
format!("{}", path)
},
Ok(path) => {
format!("./{}", path.display())
}
};
println!("{:>11} {}",
"File:".bold().blue(),
path
);
}
};
println!("{:>11} Lines {} to {}",
"Located:".bold().blue(),
task.task_block_range_start,
task.task_block_range_end
);
match task.tags {
None => {},
Some(ref tags) => {
println!("{:>11} {}",
"Tags:".bold().blue(),
tags.join(", ")
);
}
}
match task.contexts {
None => {},
Some(ref contexts) => {
println!("{:>11} {}",
"Contexts:".bold().blue(),
contexts.join(", ")
);
}
}
match task.project {
None => {},
Some(ref project_path) => {
println!("{:>11} {}",
"Project:".bold().blue(),
project_path.join(" / ")
);
}
}
if task.time > 0 {
println!("{:>11} {}",
"Time spent:".bold().blue(),
Timerange::new(task.time).print(2)
);
}
if task.has_chain() {
let chain_at: NaiveDateTime = task.get_chain();
let rel_time = relative_time(chain_at.timestamp(), Local::now().naive_local().timestamp());
let rel_time = match rel_time {
RelativeTime::Now(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.red()
},
RelativeTime::Past(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().red()
},
RelativeTime::Future(_, rel_time) => {
let rel_time = format!("({})", rel_time);
rel_time.bold().green()
}
};
println!("{:>11} {} {}",
"Last chain:".bold().blue(),
chain_at.format("%B %-d, %Y %-l:%M %p"),
rel_time
);
}
if task.priority != 0 {
println!("{:>11} {}", "Priority:".bold().blue(), task.priority);
}
match task.note {
None => {},
Some(ref note) => {
println!("{:>11} {}",
"Notes:".bold().blue(),
note
);
}
}
}
/* data structures */
#[derive(Debug)]
enum NodeType {
Node(Tree),
Leaf
}
// index project filters
type Tree = HashMap<String, NodeType>;
#[derive(Debug)]
enum Status {
Done,
Incubate,
NotDone
}
#[derive(Debug)]
struct Task {
/* debug*/
task_block_range_start: u64,
task_block_range_end: u64,
/* props */
title: Option<String>,
note: Option<String>,
created_at: Option<NaiveDateTime>,
done_at: Option<NaiveDateTime>,
chains: Option<BTreeMap<NaiveDateTime, bool>>,
due_at: Option<NaiveDateTime>,
defer: Option<Defer>,
status: Option<Status>,
project: Option<Vec<String>>,
contexts: Option<Vec<String>>,
tags: Option<Vec<String>>,
priority: i64,
time: u64,
flag: bool,
source_file: Option<String>
}
impl Task {
fn new(task_block_range_start: u64) -> Task {
Task {
task_block_range_start: task_block_range_start,
task_block_range_end: task_block_range_start,
/* props */
title: None,
note: None,
created_at: None,
done_at: None,
chains: None,
due_at: None,
defer: None,
status: None,
project: None,
contexts: None,
tags: None,
priority: 0,
time: 0,
flag: false,
source_file: None
}
}
fn is_done(&self) -> bool {
match self.status {
None => {},
Some(ref status) => {
match status {
&Status::Done => {
return true;
},
_ => {}
}
}
};
return false;
}
fn has_chain(&self) -> bool {
if self.chains.is_none() {
return false;
}
match self.chains {
None => unsafe { debug_unreachable!() },
Some(ref tree) => {
return tree.len() > 0;
}
}
}
fn get_chain(&self) -> NaiveDateTime {
match self.chains {
None => unsafe { debug_unreachable!() },
Some(ref tree) => {
// see: http://stackoverflow.com/a/33699340/412627
// let (key, _) = tree.iter().last().unwrap();
let (key, _) = tree.iter().next_back().unwrap();
return key.clone();
}
}
}
fn debug_range_string(&self) -> String {
if self.task_block_range_start == self.task_block_range_end {
return format!("on line {}", self.task_block_range_start);
}
return format!("between lines {} and {}",
self.task_block_range_start,
self.task_block_range_end
);
}
}
#[derive(Debug)]
struct GTD {
/* debug */
// the line of the last task block line parsed
previous_task_block_line: u64,
/* flag/switches */
hide_flagged: bool,
show_only_flagged: bool,
show_done: bool,
show_incubate: bool,
show_deferred: bool,
hide_overdue: bool,
hide_nonproject_tasks: bool,
hide_incomplete: bool,
project_only_filter: Tree,
project_whitelist: Tree,
sort_overdue_by_priority: bool,
filter_by_tags: bool,
filter_by_contexts: bool,
due_within: Duration,
hide_tasks_by_default: bool,
show_overdue: bool,
show_incomplete: bool,
show_flagged: bool,
show_nonproject_tasks: bool,
show_project_tasks: bool,
/* data */
base_root: String,
// track files opened
opened_files: HashSet<String>,
// path to file -> vector of task ids
files_with_completed_tasks: HashMap<String, Vec<i32>>,
pulse: HashMap<i64, Vec<i32>>,
tags: HashSet<String>,
contexts: HashSet<String>,
// lookup table for tasks
tasks: HashMap<i32, Task>,
// this contains any tasks that are overdue
// timestamp difference -> task id
overdue: BTreeMap<i64, Vec<i32>>,
// this contains any tasks that are either due soon
// timestamp difference -> task id
// due_soon: BTreeMap<i64, Vec<i32>>,
// inbox contain any tasks that do not have a project
// priority -> vector of task ids ordered by recent appearance
inbox: BTreeMap<i64, Vec<i32>>,
// this contains any tasks that are inactive
// priority -> vector of task ids ordered by recent appearance
deferred: BTreeMap<i64, Vec<i32>>,
// this contains any tasks that are compelted
// priority -> vector of task ids ordered by recent appearance
done: BTreeMap<i64, Vec<i32>>
}
impl GTD {
fn new(base_root: String) -> GTD {
let mut inbox = BTreeMap::new();
// inbox at priority 0
inbox.insert(0, Vec::new());
let inbox = inbox;
let mut done = BTreeMap::new();
// done bucket at priority 0
done.insert(0, Vec::new());
let done = done;
let mut deferred = BTreeMap::new();
// deferred bucket at priority 0
deferred.insert(0, Vec::new());
let deferred = deferred;
GTD {
/* error output */
previous_task_block_line: 0,
/* options */
hide_flagged: false,
show_only_flagged: false,
show_done: false,
show_incubate: false,
show_deferred: false,
hide_overdue: false,
hide_nonproject_tasks: false,
hide_incomplete: false,
project_only_filter: HashMap::new(),
project_whitelist: HashMap::new(),
sort_overdue_by_priority: false,
filter_by_tags: false,
filter_by_contexts: false,
due_within: Duration::seconds(0),
hide_tasks_by_default: false,
show_overdue: false,
show_incomplete: false,
show_flagged: false,
show_nonproject_tasks: false,
show_project_tasks: false,
/* data */
base_root: base_root,
opened_files: HashSet::new(),
files_with_completed_tasks: HashMap::new(),
pulse: HashMap::new(),
tags: HashSet::new(),
contexts: HashSet::new(),
tasks: HashMap::new(),
inbox: inbox,
done: done,
deferred: deferred,
overdue: BTreeMap::new()
}
}
fn add_tag_filters(&mut self, tags: Vec<String>) {
for tag in tags {
self.tags.insert(tag);
}
}
fn have_tags(&mut self, tags: &Vec<String>) -> bool {
for tag in tags {
if self.tags.contains(tag) {
return true;
}
}
return false;
}
fn add_context_filters(&mut self, contexts: Vec<String>) {
for context in contexts {
self.contexts.insert(context);
}
}
fn have_contexts(&mut self, contexts: &Vec<String>) -> bool {
for context in contexts {
if self.contexts.contains(context) {
return true;
}
}
return false;
}
fn add_project_only_filter(&mut self, path: &mut Vec<String>) {
traverse(path, &mut self.project_only_filter);
}
fn add_project_whitelist(&mut self, path: &mut Vec<String>) {
traverse(path, &mut self.project_whitelist);
}
fn has_project_only_filters(&mut self) -> bool {
self.project_only_filter.len() > 0
}
fn has_project_whitelist(&mut self) -> bool {
self.project_whitelist.len() > 0
}
fn should_only_filter_project(&mut self, path: &Vec<String>) -> bool {
return subpath_exists_in_tree(&(self.project_only_filter), path);
}
fn should_whitelist_project(&mut self, path: &Vec<String>) -> bool {
return subpath_exists_in_tree(&(self.project_whitelist), path);
}
fn add_task(&mut self, task: Task) {
// TODO: is this the best placement for this?
let mut task = task;
task.task_block_range_end = self.previous_task_block_line;
let task = task;
// validation
if task.title.is_none() {
println!("Missing task title (i.e. `task: <title>`) in task block found {}",
task.debug_range_string()
);
process::exit(1);
}
let new_id = self.next_task_id();
match task.done_at {
None => {},
Some(ref done_at) => {
if !task.is_done() {
println!("In file: {}", task.source_file.as_ref().unwrap());
println!("Task is incorrectly given a `done` datetime found at {}",
task.debug_range_string()
);
println!("Mayhaps you forgot to add: 'status: done'");
process::exit(1);
} else {
self.add_to_pulse(done_at, new_id);
}
}
};
// track completed task by its source file
match task.status {
None => {},
Some(ref status) => {
match status {
&Status::Done => {
match task.source_file {
None => unsafe { debug_unreachable!() },
Some(ref source_file) => {
match self.files_with_completed_tasks.get_mut(source_file) {
None => unsafe { debug_unreachable!() },
Some(bucket) => {
(*bucket).push(new_id);
}
}
}
}
},
_ => {}
}
}
}
// sort tasks into the proper data structure that shall be displayed
// to the user
// TODO: refactor eventually
if self.hide_tasks_by_default {
// hide task unless it satisfy [whitelist] filters
self.add_task_default_hidden(&task, new_id);
} else {
// default behaviour
self.add_task_default(&task, new_id);
}
// add task to look-up table
self.tasks.insert(new_id, task);
}
fn add_task_default_hidden(&mut self, task: &Task, new_id: i32) {
if self.should_hide_task(&task) {
return;
}
let mut shall_show: bool = task.tags.is_some() ||
task.contexts.is_some() ||
task.project.is_some() ||
self.show_only_flagged && task.flag ||
self.show_flagged && task.flag ||
self.show_nonproject_tasks && task.project.is_none() ||
self.show_project_tasks && task.project.is_some();
if self.has_project_whitelist() {
let should_whitelist: bool = match task.project {
Some(ref project_path) => {
self.should_whitelist_project(project_path)
},
// TODO: need flag to control this
None => true
};
if should_whitelist {
shall_show = true;
};
};
// sort task by status and priority
match task.status {
None => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
if self.show_overdue || task.due_at.is_some() || shall_show {
self.add_to_overdue(&task, new_id);
}
} else if !self.should_defer(&task) {
if self.show_incomplete || shall_show {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
if self.show_deferred || shall_show {
self.add_to_deferred(task.priority, new_id);
}
}
},
Some(ref status) => {
match status {
&Status::NotDone => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
if self.show_overdue || task.due_at.is_some() || shall_show {
self.add_to_overdue(&task, new_id);
}
} else if !self.should_defer(&task) {
if self.show_incomplete || shall_show {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
if self.show_deferred || shall_show {
self.add_to_deferred(task.priority, new_id);
}
}
},
&Status::Incubate => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
if self.show_overdue || task.due_at.is_some() || shall_show {
self.add_to_overdue(&task, new_id);
}
} else if !self.should_defer(&task) {
if self.show_incomplete || shall_show {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
if self.show_deferred || shall_show {
self.add_to_deferred(task.priority, new_id);
}
}
},
&Status::Done => {
if self.show_done || shall_show {
self.add_to_done(task.priority, new_id);
}
}
}
}
}
}
fn add_task_default(&mut self, task: &Task, new_id: i32) {
if self.should_hide_task(&task) {
return;
}
// sort task by status and priority
match task.status {
None => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
self.add_to_overdue(&task, new_id);
} else if !self.should_defer(&task) {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
} else {
self.add_to_deferred(task.priority, new_id);
}
},
Some(ref status) => {
match status {
&Status::NotDone => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
self.add_to_overdue(&task, new_id);
} else if !self.should_defer(&task) {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
} else {
self.add_to_deferred(task.priority, new_id);
}
},
&Status::Incubate => {
if self.hide_incomplete {
// hide task
} else if self.is_overdue(&task) {
self.add_to_overdue(&task, new_id);
} else if !self.should_defer(&task) {
if self.show_incubate {
// add task to inbox
self.add_to_inbox(task.priority, new_id);
}
} else {
self.add_to_deferred(task.priority, new_id);
}
},
&Status::Done => {
self.add_to_done(task.priority, new_id);
}
}
}
}
}
fn should_hide_task(&mut self, task: &Task) -> bool {
if self.hide_nonproject_tasks &&!task.project.is_some() {
return true;
}
if self.filter_by_tags {
match task.tags {
None => {
// TODO: need flag to control this
return true;
},
Some(ref tags) => {
if !self.have_tags(tags) {
return true;
}
}
}
}
if self.filter_by_contexts {
match task.contexts {
None => {
// TODO: need flag to control this
return true;
},
Some(ref contexts) => {
if !self.have_contexts(contexts) {
return true;
}
}
}
}
// invariant: task belongs to a project
// if necessary, apply any project path apply filters
if self.has_project_only_filters() {
let should_filter: bool = match task.project {
Some(ref project_path) => {
self.should_only_filter_project(project_path)
},
// TODO: need flag to control this
None => true
};
if should_filter {
return true;
}
}
if self.show_only_flagged {
return !task.flag;
}
if self.show_flagged && task.flag {
return false;
}
if self.hide_flagged {
return task.flag;
}
// TODO: redundant; remove
// if self.show_project_tasks && task.project.is_some() {
// return false;
// }
return false;
}
fn should_defer(&mut self, task: &Task) -> bool {
// TODO: necessary??
// if self.show_deferred {
// return false;
// }
match task.defer {
None => {
return false;
},
Some(ref defer) => {
match defer {
&Defer::Forever => {
return true;
},
&Defer::Until(defer_till) => {
return defer_till.timestamp() > Local::now().naive_local().timestamp();
}
}
}
}
return false;
}
fn add_to_pulse(&mut self, done_at: &NaiveDateTime, task_id: i32) {
let diff = Local::now().naive_local().timestamp() - done_at.timestamp();
if !(0 <= diff && diff <= chrono::Duration::days(7).num_seconds()) {
return;
}
let diff = diff as f64;
let sec_per_minute: f64 = 60f64;
let sec_per_hour: f64 = sec_per_minute * 60f64;
let sec_per_day: f64 = sec_per_hour * 24f64;
let days_ago = (diff / sec_per_day).floor() as i64;
if !self.pulse.contains_key(&days_ago) {
self.pulse.insert(days_ago, Vec::new());
}
match self.pulse.get_mut(&days_ago) {
None => unsafe { debug_unreachable!("journal.overdue missing expected bucket") },
Some(bucket) => {
(*bucket).push(task_id);
}
}
}
fn is_overdue(&mut self, task: &Task) -> bool {
match task.due_at {
None => {
return false;
},
Some(ref due_at) => {
return (Local::now().naive_local().timestamp() + self.due_within.num_seconds()) >= due_at.timestamp();
}
}
false
}
fn add_to_overdue(&mut self, task: &Task, task_id: i32) {
match task.due_at {
None => {
return;
},
Some(ref due_at) => {
let rel_time = due_at.timestamp() - Local::now().naive_local().timestamp();
let encoded_key = if self.sort_overdue_by_priority {
GTD::encode_priority(task.priority) as i64
} else {
// largest negative numbers appear first
-rel_time
};
if !self.overdue.contains_key(&encoded_key) {
self.overdue.insert(encoded_key, Vec::new());
}
match self.overdue.get_mut(&encoded_key) {
None => unsafe { debug_unreachable!("journal.overdue missing expected bucket") },
Some(bucket) => {
(*bucket).push(task_id);
}
}
}
}
}
fn add_to_inbox(&mut self, task_priority: i64, task_id: i32) {
self.ensure_priority_inbox(task_priority);
let task_priority: i64 = GTD::encode_priority(task_priority);
match self.inbox.get_mut(&task_priority) {
None => unsafe { debug_unreachable!("add_to_inbox: expected priority bucket not found") },
Some(inbox) => {
(*inbox).push(task_id);
}
}
}
fn add_to_deferred(&mut self, task_priority: i64, task_id: i32) {
self.ensure_priority_deferred(task_priority);
let task_priority: i64 = GTD::encode_priority(task_priority);
match self.deferred.get_mut(&task_priority) {
None => unsafe { debug_unreachable!("add_to_deferred: expected priority bucket not found") },
Some(deferred) => {
(*deferred).push(task_id);
}
}
}
fn add_to_done(&mut self, task_priority: i64, task_id: i32) {
self.ensure_priority_done(task_priority);
let task_priority: i64 = GTD::encode_priority(task_priority);
match self.done.get_mut(&task_priority) {
None => unsafe { debug_unreachable!("add_to_done: expected priority bucket not found") },
Some(done) => {
(*done).push(task_id);
}
}
}
fn next_task_id(&mut self) -> i32 {
to_task_id(self.tasks.len() + 1) as i32
}
// TODO: refactor
fn ensure_priority_inbox(&mut self, priority: i64) {
let priority = GTD::encode_priority(priority);
if !self.inbox.contains_key(&priority) {
self.inbox.insert(priority, Vec::new());
}
}
fn ensure_priority_deferred(&mut self, priority: i64) {
let priority = GTD::encode_priority(priority);
if !self.deferred.contains_key(&priority) {
self.deferred.insert(priority, Vec::new());
}
}
fn ensure_priority_done(&mut self, priority: i64) {
let priority = GTD::encode_priority(priority);
if !self.done.contains_key(&priority) {
self.done.insert(priority, Vec::new());
}
}
// TODO: refactor
fn encode_priority(priority: i64) -> i64 {
-priority
}
fn decode_priority(priority: i64) -> i64 {
-priority
}
}
/* gtdtxt file parser */
fn parse_file(parent_file: Option<String>, path_to_file_str: String, journal: &mut GTD) {
let path_to_file: &Path = Path::new(&path_to_file_str);
if !path_to_file.is_file() {
// TODO: return Err(...)
match parent_file {
None => {},
Some(parent_file) => {
println!("In file: {}",
parent_file
);
}
};
println!("Path is not a file: {}",
path_to_file_str
);
process::exit(1);
}
// fetch path to file
let tracked_path = match path_to_file.canonicalize() {
Ok(resolved) => {
let resolved: PathBuf = resolved;
format!("{}", resolved.display())
},
Err(e) => {
panic!("{:?}", e);
}
};
if journal.opened_files.contains(&tracked_path) {
println!("Cyclic includes detected; file already opened: {}", tracked_path);
process::exit(1);
}
let file: File = File::open(path_to_file).ok().expect("Failed to open file");
// track this opened file to ensure we're not opening the same file twice
journal.opened_files.insert(tracked_path.clone());
// save current working directory
let old_working_directory = format!("{}", env::current_dir().unwrap().display());
// set new current working dir
let parent_dir: String = {
let parent_dir = Path::new(&tracked_path).parent().unwrap();
format!("{}", parent_dir.display())
};
if !env::set_current_dir(&parent_dir).is_ok() {
println!("Unable to change working directory to: {}", parent_dir);
process::exit(1);
}
journal.files_with_completed_tasks.insert(tracked_path.clone(), Vec::new());
let mut num_of_lines_parsed = 0;
// parse gtdtxt file
let mut input = Source::new(file);
// directive switches
let mut file_shall_not_contain_completed_tasks: bool = false;
// initial state
let mut previous_state: ParseState = ParseState::Start;
loop {
let mut n = Numbering::new(LineNumber::new(), line_token_parser);
// If we could implement FnMut for Numbering then we would be good, but we need to wrap now:
let mut m = |i| n.parse(i);
match input.parse(m) {
Ok((lines_parsed, line)) => {
// amend behaviour of newline counting
let lines_parsed = if lines_parsed == 0 {
1
} else {
lines_parsed
};
num_of_lines_parsed += lines_parsed;
match line {
LineToken::Task(task_block_line) => {
// mark this line as previous task block seen
journal.previous_task_block_line = num_of_lines_parsed;
let current_task: &mut Task = match previous_state {
ParseState::Task(ref mut task) => {
task
},
_ => {
let mut new_task: Task = Task::new(num_of_lines_parsed);
new_task.source_file = Some(tracked_path.clone());
previous_state = ParseState::Task(new_task);
// TODO: possible to refactor this in a better way?
match previous_state {
ParseState::Task(ref mut task) => {
task
},
_ => unsafe { debug_unreachable!() }
}
}
};
match task_block_line {
TaskBlock::Title(title) => {
current_task.title = Some(title);
},
TaskBlock::Note(note) => {
current_task.note = Some(note);
},
TaskBlock::Project(project) => {
if project.len() > 0 {
current_task.project = Some(project);
} else {
current_task.project = None;
}
},
TaskBlock::Created(created_at) => {
let created_at: NaiveDateTime = created_at;
current_task.created_at = Some(created_at);
},
TaskBlock::Done(done_at) => {
let done_at: NaiveDateTime = done_at;
current_task.done_at = Some(done_at);
},
TaskBlock::Chain(chain_at) => {
let chain_at: NaiveDateTime = chain_at;
match current_task.chains {
None => {
let mut tree = BTreeMap::new();
tree.insert(chain_at, true);
current_task.chains = Some(tree);
},
Some(ref mut tree) => {
tree.insert(chain_at, true);
}
};
},
TaskBlock::Status(status) => {
current_task.status = Some(status);
},
TaskBlock::Due(due_at) => {
let due_at: NaiveDateTime = due_at;
current_task.due_at = Some(due_at);
},
TaskBlock::Defer(defer) => {
current_task.defer = Some(defer);
},
TaskBlock::Contexts(contexts) => {
if contexts.len() > 0 {
current_task.contexts = Some(contexts);
} else {
current_task.contexts = None;
}
},
TaskBlock::Tags(tags) => {
if tags.len() > 0 {
current_task.tags = Some(tags);
} else {
current_task.tags = None;
}
},
TaskBlock::Time(time) => {
current_task.time += time;
},
TaskBlock::ID(id) => {
// println!("id: '{}'", id);
// TODO: complete
},
TaskBlock::Priority(priority) => {
current_task.priority = priority
},
TaskBlock::Flag(flag) => {
current_task.flag = flag;
}
};
},
LineToken::Directive(directive_line) => {
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
previous_state = ParseState::Directive;
match directive_line {
Directive::Include(path_to_file) => {
parse_file(Some(tracked_path.clone()), path_to_file, journal);
},
Directive::ShouldNotContainCompletedTasks(result) => {
file_shall_not_contain_completed_tasks = result;
}
};
},
LineToken::PreBlock => {
// println!("preblock");
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
previous_state = ParseState::PreBlock;
},
LineToken::TaskSeparator => {
// println!("TaskSeparator");
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
previous_state = ParseState::TaskSeparator;
}
};
},
Err(StreamError::Retry) => {
// Needed to refill buffer when necessary
},
Err(StreamError::EndOfInput) => {
break;
},
Err(e) => {
// println!("{:?}", e);
println!("Error parsing starting at line {} in file: {}", num_of_lines_parsed + 1, tracked_path);
process::exit(1);
}
}
};
match previous_state {
ParseState::Task(task) => {
journal.add_task(task);
},
_ => {}
};
match journal.files_with_completed_tasks.get_mut(&tracked_path) {
None => unsafe { debug_unreachable!() },
Some(bucket) => {
if (*bucket).len() > 0 && file_shall_not_contain_completed_tasks {
println!("Found {} completed tasks that are not supposed to be in file: {}",
(*bucket).len(),
tracked_path);
let task: &Task = journal.tasks.get((*bucket).first().unwrap()).unwrap();
println!("Found a completed task at lines: {} to {}",
task.task_block_range_start,
task.task_block_range_end
);
process::exit(1);
}
}
}
journal.opened_files.remove(&tracked_path);
// restore current working dir
if !env::set_current_dir(&old_working_directory).is_ok() {
println!("Unable to change working directory to: {}", old_working_directory);
process::exit(1);
}
}
/* parsers */
// state machine:
// Start = PreBlock | Task | Directive | TaskSeparator
// PreBlock = PreBlock | Task | Directive | TaskSeparator
// TaskSeparator = PreBlock | Task | Directive | TaskSeparator
// Task = Task | PreBlock | TaskSeparator
// Directive = Directive | PreBlock | TaskSeparator
#[derive(Debug)]
enum ParseState {
Start,
PreBlock,
Task(Task),
Directive,
TaskSeparator
}
#[derive(Debug)]
enum LineToken {
Task(TaskBlock),
Directive(Directive),
PreBlock,
TaskSeparator
}
fn line_token_parser(input: Input<u8>) -> U8Result<LineToken> {
or(input,
|i| parse!{i;
// this line shall not begin with any whitespace
look_ahead(|i| satisfy(i, |c| !is_whitespace(c)));
let line: LineToken = task_seperators() <|>
task_block() <|>
directives();
ret line
},
|i| pre_block(i)
)
}
/* preblock */
fn pre_block(i: Input<u8>) -> U8Result<LineToken> {
parse!{i;
/*
consume comment blocks or whitespace till
one line comments or terminating
*/
let line: Vec<()> = many_till(
|i| or(i,
|i| whitespace(i),
|i| comments_block(i)
),
|i| or(i,
|i| comments_one_line(i),
|i| terminating(i)
)
);
ret LineToken::PreBlock;
}
}
/* task block */
#[derive(Debug)]
enum Defer {
Forever,
Until(NaiveDateTime)
}
// tokens from parser
#[derive(Debug)]
enum TaskBlock {
Title(String),
Created(NaiveDateTime),
Done(NaiveDateTime),
Chain(NaiveDateTime),
Due(NaiveDateTime),
Defer(Defer),
Priority(i64),
Time(u64),
Project(Vec<String>),
Status(Status),
Contexts(Vec<String>),
Tags(Vec<String>),
Flag(bool),
Note(String),
// TODO: complete
ID(String)
}
fn task_block(i: Input<u8>) -> U8Result<LineToken> {
parse!{i;
let line: TaskBlock = task_title() <|>
task_note() <|>
task_priority() <|>
task_project() <|>
task_flag() <|>
task_created() <|>
task_done() <|>
task_chain() <|>
task_status() <|>
task_due() <|>
task_defer() <|>
task_tags() <|>
task_contexts() <|>
task_time() <|>
task_id();
ret LineToken::Task(line)
}
}
fn task_title(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
// aliases
string_ignore_case("task".as_bytes()) <|>
string_ignore_case("todo".as_bytes()) <|>
string_ignore_case("action".as_bytes()) <|>
string_ignore_case("item".as_bytes());
token(b':');
let line = non_empty_line();
ret {
let title: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
TaskBlock::Title(title)
}
}
}
fn task_note(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
// aliases
string_ignore_case("notes".as_bytes()) <|>
string_ignore_case("note".as_bytes()) <|>
string_ignore_case("description".as_bytes()) <|>
string_ignore_case("desc".as_bytes());
token(b':');
skip_many(|i| space_or_tab(i));
let line = or(
|i| non_empty_line(i),
|i| parse!{i;
terminating();
ret {
let line: Vec<u8> = vec![];
line
}
}
);
let other_lines: Vec<String> = many(
|i| or(i,
|i| parse!{i;
// allow empty lines in note
let nothing: Vec<()> = many(|i| parse!{i;
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| end_of_line(i));
ret ()
});
space_or_tab();
let line = non_empty_line();
ret {
let filler = String::from_utf8(vec![b'\n'; nothing.len()]).ok().unwrap();
let line: String = format!("{}{:>11} {}",
filler,
"",
String::from_utf8_lossy(line.as_slice()).trim()
);
line
}
},
|i| parse!{i;
space_or_tab();
let line = non_empty_line();
ret {
let line: String = format!("{:>11} {}",
"",
String::from_utf8_lossy(line.as_slice()).trim()
);
line
}
}
)
);
ret {
let line: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
let other_lines = other_lines.join("\n");
let note = if other_lines.len() > 0 {
if line.len() > 0 {
format!("{}\n{}", line, other_lines)
} else {
format!("{}", other_lines.trim())
}
} else {
format!("{}", line)
};
TaskBlock::Note(note)
}
}
}
fn task_time(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("time".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let time: u64 = multiple_time_range();
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Time(time)
}
}
fn task_priority(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("priority".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let priority: i64 = signed_decimal() <|> decimal();
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Priority(priority)
}
}
fn task_project(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("project".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
let list = string_list(b'/');
ret TaskBlock::Project(list)
}
}
fn task_flag(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("flag".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let input = bool_option_parser();
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Flag(input)
}
}
fn task_created(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("created at".as_bytes()) <|>
string_ignore_case("created".as_bytes()) <|>
string_ignore_case("date".as_bytes()) <|>
string_ignore_case("added at".as_bytes()) <|>
string_ignore_case("added".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let created_at = parse_datetime(false);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Created(created_at)
}
}
fn task_done(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("done at".as_bytes()) <|>
string_ignore_case("done".as_bytes()) <|>
string_ignore_case("completed".as_bytes()) <|>
string_ignore_case("complete".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let done_at = parse_datetime(false);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Done(done_at)
}
}
fn task_chain(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("chain".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let chain_at = parse_datetime(false);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Chain(chain_at)
}
}
fn parse_status(input: Input<u8>) -> U8Result<Status> {
or(input,
|i| parse!{i;
string_ignore_case("done".as_bytes()) <|>
string_ignore_case("complete".as_bytes()) <|>
string_ignore_case("finished".as_bytes()) <|>
string_ignore_case("finish".as_bytes()) <|>
string_ignore_case("fin".as_bytes());
ret Status::Done
},
|i| or(i,
|i| parse!{i;
string_ignore_case("hide".as_bytes()) <|>
string_ignore_case("hidden".as_bytes()) <|>
string_ignore_case("incubate".as_bytes()) <|>
string_ignore_case("later".as_bytes()) <|>
string_ignore_case("someday".as_bytes()) <|>
string_ignore_case("inactive".as_bytes()) <|>
string_ignore_case("not active".as_bytes());
ret Status::Incubate
},
|i| parse!{i;
string_ignore_case("active".as_bytes()) <|>
string_ignore_case("not done".as_bytes()) <|>
string_ignore_case("progress".as_bytes()) <|>
string_ignore_case("in progress".as_bytes()) <|>
string_ignore_case("in-progress".as_bytes()) <|>
string_ignore_case("pending".as_bytes()) <|>
string_ignore_case("is active".as_bytes());
ret Status::NotDone
}
)
)
}
fn task_status(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("status".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let status = parse_status();
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Status(status)
}
}
fn task_due(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("due".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let due_at = parse_datetime(true);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Due(due_at)
}
}
fn task_defer(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("defer till".as_bytes()) <|>
string_ignore_case("defer until".as_bytes()) <|>
string_ignore_case("defer".as_bytes()) <|>
string_ignore_case("hide until".as_bytes()) <|>
string_ignore_case("hidden".as_bytes()) <|>
string_ignore_case("hide till".as_bytes()) <|>
string_ignore_case("hide".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
skip_many(|i| space_or_tab(i));
let defer = or(
|i| parse!{i;
string_ignore_case("forever".as_bytes());
ret Defer::Forever
},
|i| parse!{i;
let defer_till = parse_datetime(false);
ret Defer::Until(defer_till)
}
);
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret TaskBlock::Defer(defer)
}
}
fn task_contexts(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("contexts".as_bytes()) <|>
string_ignore_case("context".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
let list = string_list(b',');
ret TaskBlock::Contexts(list)
}
}
fn task_tags(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("tags".as_bytes()) <|>
string_ignore_case("tag".as_bytes());
token(b':');
look_ahead(|i| non_empty_line(i));
let list = string_list(b',');
ret TaskBlock::Tags(list)
}
}
fn task_id(input: Input<u8>) -> U8Result<TaskBlock> {
parse!{input;
string_ignore_case("id".as_bytes());
token(b':');
let line = non_empty_line();
ret {
let id: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
TaskBlock::ID(id)
}
}
}
/* directives */
#[derive(Debug)]
enum Directive {
Include(String),
ShouldNotContainCompletedTasks(bool)
}
fn directives(input: Input<u8>) -> U8Result<LineToken> {
parse!{input;
let line: Directive = directive_include() <|>
directive_not_contain_done_tasks();
ret {
LineToken::Directive(line)
}
}
}
fn directive_include(input: Input<u8>) -> U8Result<Directive> {
parse!{input;
string_ignore_case("include".as_bytes());
token(b':');
skip_many(|i| space_or_tab(i));
let line = non_empty_line();
ret {
let path_to_file: String = format!("{}", String::from_utf8_lossy(line.as_slice()).trim());
Directive::Include(path_to_file)
}
}
}
fn directive_not_contain_done_tasks(input: Input<u8>) -> U8Result<Directive> {
parse!{input;
string_ignore_case("file_no_done_tasks".as_bytes());
token(b':');
skip_many(|i| space_or_tab(i));
let input = bool_option_parser();
let nothing: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret Directive::ShouldNotContainCompletedTasks(input)
}
}
/* lines */
enum Line {
Empty,
NonEmpty(Vec<u8>)
}
fn non_empty_line(i: Input<u8>) -> U8Result<Vec<u8>> {
parse_line(i)
.bind(parse_non_empty_line)
}
// TODO: bother moving as closure?
fn parse_non_empty_line(i: Input<u8>, above: Line) -> U8Result<Vec<u8>> {
match above {
Line::Empty => {
// need at least one u8 token
i.incomplete(1)
},
Line::NonEmpty(line) => {
if line.len() <= 0 {
return i.incomplete(1);
}
i.ret(line)
}
}
}
fn parse_line(i: Input<u8>) -> U8Result<Line> {
// many_till(i, any, |i| terminating(i))
or(i,
|i| parse!{i;
terminating();
ret Line::Empty
},
|i| parse!{i;
// lines with just whitespace are probably not interesting
// TODO: consider space_or_tab?
skip_many(|i| whitespace(i));
let line: Vec<u8> = many_till(any, |i| terminating(i));
ret Line::NonEmpty(line)
}
)
}
/* task separator */
fn task_seperators(input: Input<u8>) -> U8Result<LineToken> {
parse!{input;
parse_task_separator("-".as_bytes()) <|>
parse_task_separator("=".as_bytes()) <|>
parse_task_separator("_".as_bytes()) <|>
// TODO: necessary?
parse_task_separator("#".as_bytes()) <|>
parse_task_separator("/".as_bytes()) <|>
parse_task_separator(":".as_bytes()) <|>
parse_task_separator("~".as_bytes()) <|>
parse_task_separator("*".as_bytes());
ret {
LineToken::TaskSeparator
}
}
}
fn parse_task_separator<'a>(input: Input<'a, u8>, token: &[u8])
-> SimpleResult<'a, u8, ()> {
parse!{input;
match_four_tokens(token);
skip_many(|i| string(i, token));
let line: Vec<()> = many_till(|i| space_or_tab(i), |i| terminating(i));
ret ()
}
}
/* comments */
fn comments_one_line(i: Input<u8>) -> U8Result<()> {
parse!{i;
or(
|i| string(i, "//".as_bytes()),
|i| or(i,
|i| string(i, "#".as_bytes()),
|i| string(i, ";".as_bytes())
)
);
let line: Vec<u8> = many_till(|i| any(i), |i| terminating(i));
ret ()
}
}
fn comments_block(i: Input<u8>) -> U8Result<()> {
parse!{i;
string("/*".as_bytes());
let line: Vec<u8> = many_till(|i| any(i), |i| string(i, "*/".as_bytes()));
ret ()
}
}
/* delimited list parser */
fn string_list(input: Input<u8>, delim: u8) -> U8Result<Vec<String>> {
parse!{input;
skip_many(|i| space_or_tab(i));
// TODO: custom delimeter option
let raw_list = delim_sep_list(delim);
let last_item: Vec<u8> = many_till(|i| any(i), |i| terminating(i));
ret {
let mut new_list: Vec<String> = Vec::new();
for item in &raw_list {
let item: String = format!("{}", String::from_utf8_lossy(item.as_slice()).trim());
if item.len() > 0 {
new_list.push(item);
}
}
let last_item: String = format!("{}", String::from_utf8_lossy(last_item.as_slice()).trim());
if last_item.len() > 0 {
new_list.push(last_item);
}
new_list
}
}
}
fn delim_sep_list(i: Input<u8>, delim: u8) -> U8Result<Vec<Vec<u8>>> {
parse!{i;
skip_many(|i| token(i, delim));
let list: Vec<Vec<u8>> = many(|i| delim_sep_item(i, delim));
ret list
}
}
fn delim_sep_item(i: Input<u8>, delim: u8) -> U8Result<Vec<u8>> {
parse!{i;
skip_many(|i| token(i, delim));
let item: Vec<u8> = many_till(|i| non_terminating(i), |i| token(i, delim));
skip_many(|i| token(i, delim));
ret item
}
}
/* misc parsers */
fn bool_option_parser(i: Input<u8>) -> U8Result<bool> {
or(i,
|i| parse!{i;
string_ignore_case("yes".as_bytes()) <|>
string_ignore_case("true".as_bytes());
ret true
},
|i| parse!{i;
string_ignore_case("no".as_bytes()) <|>
string_ignore_case("false".as_bytes());
ret false
}
)
}
fn match_four_tokens<'a>(input: Input<'a, u8>, token: &[u8])
-> SimpleResult<'a, u8, ()> {
parse!{input;
string(token);
string(token);
string(token);
string(token);
ret ()
}
}
fn whitespace(i: Input<u8>) -> U8Result<()> {
parse!{i;
satisfy(|c| is_whitespace(c));
ret ()
}
}
fn space_or_tab(input: Input<u8>) -> U8Result<()> {
parse!{input;
or(
|i| token(i, b' '),
|i| token(i, b'\t')
);
ret ()
}
}
fn non_terminating(i: Input<u8>) -> U8Result<u8> {
or(i,
|i| parse!{i;
terminating();
ret None
},
|i| parse!{i;
let something = any();
ret Some(something)
}
)
.bind(|i, above: Option<u8>| {
match above {
None => {
return i.incomplete(1);
},
Some(c) => {
return i.ret(c);
}
}
})
}
// match eof or various eol
fn terminating(i: Input<u8>) -> U8Result<()> {
or(i,
|i| parse!{i;
end_of_line();
ret ()
},
// NOTE: eof should be matched last
|i| eof(i)
)
}
// Source: https://en.wikipedia.org/wiki/Newline#Unicode
fn end_of_line(i: Input<u8>) -> U8Result<&[u8]> {
// TODO: bother to refactor using parse! macro with <|> operator?
or(i,
|i| parse!{i;
token(b'\r');
token(b'\n');
ret "\r\n".as_bytes()
},
|i| or(i,
|i| parse!{i;
token(b'\n');
ret "\n".as_bytes()
},
|i| or(i,
|i| parse!{i;
token(b'\r');
ret "\r".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{2028}".as_bytes());
ret "\u{2028}".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{2029}".as_bytes());
ret "\u{2029}".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{000B}".as_bytes());
ret "\u{000B}".as_bytes()
},
|i| or(i,
|i| parse!{i;
string("\u{000C}".as_bytes());
ret "\u{000C}".as_bytes()
},
|i| parse!{i;
string("\u{0085}".as_bytes());
ret "\u{0085}".as_bytes()
}
)
)
)
)
)
)
)
}
/* time range parsers */
fn multiple_time_range(i: Input<u8>) -> U8Result<u64> {
parse!{i;
let time: Vec<u64> = many1(|i| parse!{
i;
skip_many(|i| space_or_tab(i));
let range = time_range();
ret range
});
ret {
let time = time.iter().fold(0, |mut sum, &val| {sum += val; sum});
time
}
}
}
fn time_range(i: Input<u8>) -> U8Result<u64> {
parse!{i;
let range: u64 = decimal();
skip_many(|i| space_or_tab(i));
let multiplier = time_range_unit_minutes() <|>
time_range_unit_hours() <|>
time_range_unit_days() <|>
time_range_unit_seconds();
ret {
range * multiplier
}
}
}
fn time_range_unit_seconds(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("seconds".as_bytes()) <|>
string_ignore_case("second".as_bytes()) <|>
string_ignore_case("secs".as_bytes()) <|>
string_ignore_case("sec".as_bytes()) <|>
string_ignore_case("s".as_bytes());
ret 1
}
}
fn time_range_unit_minutes(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("minutes".as_bytes()) <|>
string_ignore_case("minute".as_bytes()) <|>
string_ignore_case("mins".as_bytes()) <|>
string_ignore_case("min".as_bytes()) <|>
string_ignore_case("m".as_bytes());
// 60 seconds in a minute
ret 60
}
}
fn time_range_unit_hours(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("hours".as_bytes()) <|>
string_ignore_case("hour".as_bytes()) <|>
string_ignore_case("hrs".as_bytes()) <|>
string_ignore_case("hr".as_bytes()) <|>
string_ignore_case("h".as_bytes());
// 3600 seconds in an hour
ret 3600
}
}
fn time_range_unit_days(i: Input<u8>) -> U8Result<u64> {
parse!{i;
string_ignore_case("days".as_bytes()) <|>
string_ignore_case("day".as_bytes()) <|>
string_ignore_case("dys".as_bytes()) <|>
string_ignore_case("dy".as_bytes()) <|>
string_ignore_case("d".as_bytes());
// 86400 seconds in a day
ret 86400
}
}
/* datetime parsers */
enum Meridiem {
AM,
PM
}
struct Time {
// 24-hour format.
// range from 0 to 23
hour: u32,
minute: u32
}
struct ParsedDate {
// between 1 and 31
day: u32,
// between 1 and 12
month: u32,
// at least 1
year: i32
}
struct ParsedDateTime {
time: Time,
date: ParsedDate
}
fn parse_datetime(i: Input<u8>, end_of_day: bool) -> U8Result<NaiveDateTime> {
or(i,
|i| parse!{i;
let time = parse_time();
skip_many1(|i| space_or_tab(i));
let date = parse_date();
ret ParsedDateTime {
time: time,
date: date
}
},
|i| or(i,
|i| parse!{i;
let date = parse_date();
skip_many1(|i| space_or_tab(i));
let time = parse_time();
ret ParsedDateTime {
time: time,
date: date
}
},
|i| parse!{i;
let date = parse_date();
ret {
if end_of_day {
ParsedDateTime {
date: date,
time: Time {
hour: 23,
minute: 59
}
}
} else {
ParsedDateTime {
date: date,
time: Time {
hour: 0,
minute: 0
}
}
}
}
}
)
)
.bind(|i, above: ParsedDateTime| {
let date = NaiveDate::from_ymd(above.date.year, above.date.month, above.date.day);
let time = NaiveTime::from_hms(above.time.hour, above.time.minute, 0);
let date_time = NaiveDateTime::new(date, time);
i.ret(date_time)
})
}
fn parse_date(i: Input<u8>) -> U8Result<ParsedDate> {
parse!{i;
let month = parse_months();
skip_many1(|i| space_or_tab(i));
let day = parse_day();
or(
|i| parse!{i;
skip_many(|i| space_or_tab(i));
token(b',');
skip_many(|i| space_or_tab(i));
ret ()
},
|i| parse!{i;
skip_many1(|i| space_or_tab(i));
ret ()
}
);
let year = parse_year();
ret ParsedDate {
month: month,
day: day,
year: year
}
}
}
// 5pm
// 5:00pm
// 17:00
fn parse_time(i: Input<u8>) -> U8Result<Time> {
parse!{i;
let time = simple_time() <|>
parse_12_hour_clock() <|>
parse_24_hour_clock();
ret time
}
}
fn simple_time(i: Input<u8>) -> U8Result<Time> {
parse!{i;
let hour = parse_12_hour();
skip_many(|i| space_or_tab(i));
let ampm: Meridiem = parse_am_pm();
ret {
let mut hour: u32 = hour;
match ampm {
Meridiem::AM => {
if hour == 12 {
hour = 0;
}
},
Meridiem::PM => {
if hour != 12 {
// 1 to 11
hour = hour + 12;
}
}
};
Time {
hour: hour,
minute: 0
}
}
}
}
fn parse_12_hour_clock(i: Input<u8>) -> U8Result<Time> {
parse!{i;
let hour = parse_12_hour();
token(b':');
let minute = parse_minute();
skip_many(|i| space_or_tab(i));
let ampm: Meridiem = parse_am_pm();
ret {
let mut hour: u32 = hour;
match ampm {
Meridiem::AM => {
if hour == 12 {
hour = 0;
}
},
Meridiem::PM => {
if hour != 12 {
// 1 to 11
hour = hour + 12;
}
}
};
Time {
hour: hour,
minute: minute
}
}
}
}
fn parse_am_pm(i: Input<u8>) -> U8Result<Meridiem> {
or(i,
|i| parse!{i;
string_ignore_case("pm".as_bytes());
ret Meridiem::PM;
},
|i| parse!{i;
string_ignore_case("am".as_bytes());
ret Meridiem::AM;
}
)
}
fn parse_24_hour_clock(i: Input<u8>) -> U8Result<Time> {
or(i,
|i| parse!{i;
let hour: u32 = parse_24_hour();
token(b':');
let minute: u32 = parse_minute();
ret Time {
hour: hour,
minute: minute
}
},
|i| military_time(i)
)
}
fn military_time(i: Input<u8>) -> U8Result<Time> {
// TODO: refactor; haha...
or(i,
|i| parse!{i;
let hour_2: u8 = digit();
let hour_1: u8 = digit();
let min_2: u8 = digit();
let min_1: u8 = digit();
ret {
let hour_2: u32 = hour_2 as u32 - 48;
let hour_1: u32 = hour_1 as u32 - 48;
let hour = hour_2 * 10 + hour_1;
let min_2: u32 = min_2 as u32 - 48;
let min_1: u32 = min_1 as u32 - 48;
let min = min_2 * 10 + min_1;
Time {
hour: hour,
minute: min
}
}
},
|i| parse!{i;
let hour_1: u8 = digit();
let min_2: u8 = digit();
let min_1: u8 = digit();
ret {
let hour_1: u32 = hour_1 as u32 - 48;
let hour = hour_1;
let min_2: u32 = min_2 as u32 - 48;
let min_1: u32 = min_1 as u32 - 48;
let min = min_2 * 10 + min_1;
Time {
hour: hour,
minute: min
}
}
}
)
.bind(|i, above:Time| {
if 0 <= above.hour && above.hour <= 23 && 0 <= above.minute && above.minute <= 59 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_24_hour(i: Input<u8>) -> U8Result<u32> {
up_to_two_digits(i)
.bind(|i, above:u32| {
if 0 <= above && above <= 23 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_12_hour(i: Input<u8>) -> U8Result<u32> {
up_to_two_digits(i)
.bind(|i, above:u32| {
if 1 <= above && above <= 12 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_minute(i: Input<u8>) -> U8Result<u32> {
two_digits(i)
.bind(|i, above:u32| {
if 0 <= above && above <= 59 {
return i.ret(above);
}
// TODO: right usize?
return i.incomplete(1);
})
}
fn parse_year(i: Input<u8>) -> U8Result<i32> {
decimal::<u32>(i)
.bind(|i, above:u32| {
if above <= 0 {
// TODO: right usize?
return i.incomplete(1);
}
i.ret(above as i32)
})
}
fn parse_day(i: Input<u8>) -> U8Result<u32> {
up_to_two_digits(i)
.bind(|i, above:u32| {
if above <= 0 || above >= 32 {
// TODO: right usize?
return i.incomplete(1);
}
i.ret(above)
})
}
fn parse_months(i: Input<u8>) -> U8Result<u32> {
parse!{i;
let month: u32 =
resolve_month("january", 1) <|>
resolve_month("jan", 1) <|>
resolve_month("february", 2) <|>
resolve_month("feb", 2) <|>
resolve_month("march", 3) <|>
resolve_month("mar", 3) <|>
resolve_month("april", 4) <|>
resolve_month("apr", 4) <|>
resolve_month("may", 5) <|>
resolve_month("june", 6) <|>
resolve_month("jun", 6) <|>
resolve_month("july", 7) <|>
resolve_month("jul", 7) <|>
resolve_month("august", 8) <|>
resolve_month("aug", 8) <|>
resolve_month("september", 9) <|>
resolve_month("sept", 9) <|>
resolve_month("sep", 9) <|>
resolve_month("october", 10) <|>
resolve_month("oct", 10) <|>
resolve_month("november", 11) <|>
resolve_month("nov", 11) <|>
resolve_month("december", 12) <|>
resolve_month("dec", 12);
ret month;
}
}
fn resolve_month<'a>(i: Input<'a, u8>, month: &str, ret_val: u32) -> SimpleResult<'a, u8, u32> {
parse!{i;
string_ignore_case(month.as_bytes());
ret ret_val
}
}
fn up_to_two_digits(i: Input<u8>) -> U8Result<u32> {
or(i,
|i| parse!{i;
let first_digit: u8 = digit();
let second_digit: u8 = digit();
ret {
let first_digit: u32 = first_digit as u32 - 48;
let second_digit: u32 = second_digit as u32 - 48;
let resolved: u32 = first_digit * 10 + second_digit;
resolved
}
},
|i| parse!{i;
let first_digit: u8 = digit();
ret {
let resolved: u32 = first_digit as u32 - 48;
resolved
}
}
)
}
fn two_digits(i: Input<u8>) -> U8Result<u32> {
parse!{i;
let first_digit: u8 = digit();
let second_digit: u8 = digit();
ret {
let first_digit: u32 = first_digit as u32 - 48;
let second_digit: u32 = second_digit as u32 - 48;
let resolved: u32 = first_digit * 10 + second_digit;
resolved
}
}
}
fn signed_decimal(input: Input<u8>) -> U8Result<i64> {
parse!{input;
let sign: i64 = or(
|i| parse!{i;
token(b'-');
ret -1
},
|i| parse!{i;
token(b'+');
ret 1
}
);
let num: i64 = decimal();
ret {
sign * num
}
}
}
fn string_ignore_case<'a>(i: Input<'a, u8>, s: &[u8])
-> SimpleResult<'a, u8, &'a [u8]> {
let b = i.buffer();
if s.len() > b.len() {
return i.incomplete(s.len() - b.len());
}
let d = &b[..s.len()];
for j in 0..s.len() {
if !(s[j]).eq_ignore_ascii_case(&(d[j])) {
return i.replace(&b[j..]).err(Error::expected(d[j]))
}
}
i.replace(&b[s.len()..]).ret(d)
}
/* helpers */
fn to_task_id(len: usize) -> i32 {
len as i32
}
enum RelativeTime {
Future(i64, String),
Now(i64, String),
Past(i64, String)
}
// src: http://stackoverflow.com/a/6109105/412627
fn relative_time(from: i64, to: i64) -> RelativeTime {
let elapsed_num: u64 = (to - from).abs() as u64;
let range = Timerange::new(elapsed_num).print(2);
let elapsed_num = elapsed_num as i64;
if to > from {
return RelativeTime::Past(elapsed_num, format!("{} ago", range));
} else if to == from {
return RelativeTime::Now(elapsed_num, format!("{} ago", range));
} else {
return RelativeTime::Future(elapsed_num, format!("{} into the future", range));
}
}
struct Timerange {
range: u64
}
impl Timerange {
fn new(range: u64) -> Timerange {
Timerange {
range: range
}
}
fn floor_time_unit(&self) -> (u64, u64, String) {
let sec_per_minute: f64 = 60f64;
let sec_per_hour: f64 = sec_per_minute * 60f64;
let sec_per_day: f64 = sec_per_hour * 24f64;
let sec_per_month: f64 = sec_per_day * 30f64;
let sec_per_year: f64 = sec_per_day * 365f64;
let mut elapsed = self.range as f64;
let mut remainder: f64 = 0f64;
let unit;
if elapsed < sec_per_minute {
unit = "second";
} else if elapsed < sec_per_hour {
remainder = elapsed % sec_per_minute;
elapsed = (elapsed / sec_per_minute).floor();
unit = "minute"
} else if elapsed < sec_per_day {
remainder = elapsed % sec_per_hour;
elapsed = (elapsed / sec_per_hour).floor();
unit = "hour"
} else if elapsed < sec_per_month {
remainder = elapsed % sec_per_day;
elapsed = (elapsed / sec_per_day).floor();
unit = "day"
} else if elapsed < sec_per_year {
remainder = elapsed % sec_per_month;
elapsed = (elapsed / sec_per_month).floor();
unit = "month"
} else {
remainder = elapsed % sec_per_year;
elapsed = (elapsed / sec_per_year).floor();
unit = "year"
}
// pluralize
let unit = if elapsed <= 1f64 {
format!("{}", unit)
} else {
format!("{}s", unit)
};
let elapsed = elapsed as u64;
let remainder = remainder as u64;
return (elapsed, remainder, unit);
}
fn print(&self, depth: u32) -> String {
let (elapsed, remainder, unit) = self.floor_time_unit();
if remainder <= 0 || depth <= 1 {
return format!("{} {}", elapsed, unit);
}
let pretty_remainder = Timerange::new(remainder).print(depth - 1);
if remainder < 60 || depth <= 2 {
return format!("{} {} and {}", elapsed, unit, pretty_remainder);
}
return format!("{} {} {}", elapsed, unit, pretty_remainder);
}
}
// TODO: refactor
fn traverse(path: &mut [String], tree: &mut Tree) {
if path.len() <= 0 {
return;
}
match path.split_first_mut() {
None => unsafe { debug_unreachable!() },
Some((first, rest)) => {
if !tree.contains_key(first) {
tree.insert(first.clone(), NodeType::Leaf);
}
if rest.len() <= 0 {
return;
} else {
let should_replace: bool = match tree.get_mut(first) {
None => unsafe { debug_unreachable!("add_project_filter: NodeType not found") },
Some(node_type) => {
match node_type {
&mut NodeType::Leaf => {
true
},
&mut NodeType::Node(_) => {
false
}
}
}
};
if should_replace {
let mut new_tree: Tree = HashMap::new();
{
let _new_tree = &mut new_tree;
traverse(rest, _new_tree);
};
tree.insert(first.clone(), NodeType::Node(new_tree));
}
}
}
}
}
fn subpath_exists_in_tree(tree: &Tree, path: &Vec<String>) -> bool {
let mut current = tree;
for path_item in path {
if !current.contains_key(path_item) {
return true;
}
match current.get(path_item) {
None => {
return true;
},
Some(node_type) => {
match node_type {
&NodeType::Leaf => {
// path is super path
return false;
},
&NodeType::Node(ref tree) => {
current = tree;
}
}
}
};
}
return false;
}
/*
Adapted from: https://gist.github.com/m4rw3r/1f43559dcd73bf46e845
Thanks to github.com/m4rw3r for wrapping parsers for line number tracking!
*/
pub trait NumberingType {
type Token;
type Position;
fn update(&mut self, &[Self::Token]);
fn position(&self) -> Self::Position;
}
#[derive(Debug)]
pub struct LineNumber(u64);
// Semantics: count number of newlines
impl LineNumber {
pub fn new() -> Self { LineNumber(0) }
}
impl NumberingType for LineNumber {
type Token = u8;
type Position = u64;
fn update(&mut self, b: &[Self::Token]) {
self.0 = self.0 + b.iter().filter(|&&c| c == b'\n').count() as u64;
}
fn position(&self) -> Self::Position {
self.0
}
}
#[derive(Debug)]
pub struct Numbering<'i, T, P, R, E>
where T: NumberingType,
P: FnMut(Input<'i, T::Token>) -> ParseResult<'i, T::Token, R, E>,
R: 'i,
E: 'i,
<T as NumberingType>::Token: 'i {
parser: P,
numbering: T,
_re: PhantomData<&'i (R, E)>,
}
impl<'i, N, P, R, E> Numbering<'i, N, P, R, E>
where N: NumberingType,
P: FnMut(Input<'i, N::Token>) -> ParseResult<'i, N::Token, R, E>,
R: 'i,
E: 'i,
<N as NumberingType>::Position: std::fmt::Debug,
<N as NumberingType>::Token: 'i {
pub fn new(n: N, p: P) -> Self {
Numbering {
parser: p,
numbering: n,
_re: PhantomData,
}
}
pub fn parse(&mut self, i: Input<'i, N::Token>) -> ParseResult<'i, N::Token, (N::Position, R), E> {
use chomp::primitives::InputBuffer;
use chomp::primitives::InputClone;
use chomp::primitives::IntoInner;
use chomp::primitives::State;
let buf = i.clone();
match (self.parser)(i.clone()).into_inner() {
State::Data(remainder, t) => {
self.numbering.update(&buf.buffer()[..buf.buffer().len() - remainder.buffer().len()]);
let pos = self.numbering.position();
remainder.ret((pos, t))
},
State::Error(remainder, e) => {
self.numbering.update(&buf.buffer()[..buf.buffer().len() - remainder.len()]);
buf.replace(remainder).err(e)
},
State::Incomplete(n) => buf.incomplete(n)
}
}
}
// Source: https://gist.github.com/dashed/9d18b7e4cc351a7feabc89897a58baff
#[test]
fn line_numbering() {
use chomp::take;
use std::cell::Cell;
use chomp::buffer::{IntoStream, Stream, StreamError};
let mut data = b"abc\nc\n\ndef".into_stream();
// Just some state to make sure we are called the correct number of times:
let i = Cell::new(0);
let p = |d| {
i.set(i.get() + 1);
take(d, 2)
};
let mut n = Numbering::new(LineNumber::new(), p);
// If we could implement FnMut for Numbering then we would be good, but we need to wrap now:
let mut m = |i| n.parse(i);
assert_eq!(data.parse(&mut m), Ok((0, &b"ab"[..])));
assert_eq!(i.get(), 1);
assert_eq!(data.parse(&mut m), Ok((1, &b"c\n"[..])));
assert_eq!(i.get(), 2);
assert_eq!(data.parse(&mut m), Ok((2, &b"c\n"[..])));
assert_eq!(i.get(), 3);
assert_eq!(data.parse(&mut m), Ok((3, &b"\nd"[..])));
assert_eq!(i.get(), 4);
assert_eq!(data.parse(&mut m), Ok((3, &b"ef"[..])));
assert_eq!(i.get(), 5);
assert_eq!(data.parse(&mut m), Err(StreamError::EndOfInput));
assert_eq!(i.get(), 5);
}
|
#![allow(dead_code)]
extern crate regex;
use regex::Regex;
use std::io;
use std::io::Write;
use std::collections::HashMap;
#[derive(Debug, PartialEq, Clone)]
pub enum Token {
Unknown(String), // Invalid test (basically non-ascii)
Literal(String), // Numeric literal number
Func(Function), // Pre-defined function (like cos() )
Const(Constant), // Constant like pi or e
Var(String), // str arbitrary single char variable name
Op(Operator), // Any of the 4 operators (+-*/)
Open, // Open parens '('
Close, // Closing parens ')'
Comma,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Function {
Abs,
Exp,
Sqrt,
Ln,
Log,
LogBase,
Sin,
Csc,
Cos,
Sec,
Tan,
Cot,
Asin,
Acsc,
Acos,
Asec,
Atan,
Acot,
Sinh,
Csch,
Cosh,
Sech,
Tanh,
Coth,
Asinh,
Acsch,
Acosh,
Asech,
Atanh,
Acoth,
Max,
Recip,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Constant {
Pi,
E,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[repr(u32)]
pub enum Operator {
Add = 0,
Sub = 1,
Div = 2,
Mul = 3,
Pow = 4, // Right
Negate = 5, // Right
}
#[derive(Debug, PartialEq, Clone)]
struct Expression {
tokens: Vec<Token>,
}
struct ExpressionIter<'a> {
expr: &'a Expression,
count: usize,
}
impl<'a> Iterator for ExpressionIter<'a> {
type Item = &'a Token;
fn next(&mut self) -> Option<&'a Token> {
let token = self.expr.get_tokens().get(self.count);
self.count += 1;
token
}
}
impl Expression {
fn new(tokens: Vec<Token>) -> Self {
Expression { tokens: tokens }
}
fn push(&mut self, token: Token) {
self.tokens.push(token);
}
fn get_tokens(&self) -> &[Token] {
self.tokens.as_slice()
}
fn get_mut_tokens(&mut self) -> &mut [Token] {
self.tokens.as_mut_slice()
}
fn get_token(&self, index: usize) -> &Token {
self.tokens.get(index).unwrap()
}
fn get_mut_token(&mut self, index: usize) -> &mut Token {
self.tokens.get_mut(index).unwrap()
}
fn replace_token(&mut self, index: usize, new_token: Token) {
self.tokens.remove(index);
self.tokens.insert(index, new_token);
}
fn replace_all_tokens(&mut self, new_token: Vec<Token>) {
self.tokens = new_token
}
fn find_first(&self, token: &Token) -> Option<usize> {
for i in 0..self.tokens.len() {
if self.tokens.get(i).unwrap() == token {
return Some(i);
}
}
None
}
fn find_last(&self, token: &Token) -> Option<usize> {
for i in (0..self.tokens.len()).rev() {
if self.tokens.get(i).unwrap() == token {
return Some(i);
}
}
None
}
fn split_at(&self, index: usize) -> (&[Token], &[Token]) {
self.tokens.split_at(index)
}
fn len(&self) -> usize {
self.tokens.len()
}
fn iter(&self) -> ExpressionIter {
ExpressionIter {
expr: &self,
count: 0,
}
}
}
fn strip_white_space(input: &String) -> String {
input.split_whitespace().collect::<Vec<&str>>().join("")
}
fn parse_input(input: &String,
numeric_regex: &Regex,
function_regex: &Regex)
-> (String, Result<Expression, String>) {
// 1. Replace everthing except letters/numbers with their enums
// 2. Then go through and replace things with Literals or functions
let mut variable: String = String::new();
let mut expr: Expression = Expression::new(Vec::new());
let mut builder: String = String::new();
for c in input.chars() {
match c {
'(' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Open);
}
')' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Close);
}
'+' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Add));
}
'-' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
expr.push(Token::Op(Operator::Sub));
} else if builder.len() == 0 {
expr.push(Token::Op(Operator::Negate));
}
}
'*' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Mul));
}
'/' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Div));
}
'^' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Pow));
}
',' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Comma);
}
'=' => {
if builder.len() > 0 {
variable = builder.clone();
builder = String::new();
}
}
_ => {
builder.push(c);
}
}
}
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
}
let mut op_stack: Vec<Token> = Vec::with_capacity(input.len());
let mut out_queue: Vec<Token> = Vec::with_capacity(input.len());
for i in 0..expr.len() {
let current_token = expr.get_token(i);
match current_token {
&Token::Literal(ref x) => out_queue.push(Token::Literal(x.clone())),
&Token::Func(ref x) => op_stack.push(Token::Func(x.clone())),
&Token::Comma => {
loop {
let stack_token = op_stack.pop();
if !stack_token.is_some() {
return (variable, Err("Malformed Expression, comma but no Parenthesis".to_owned()));
}
let stack_token = stack_token.unwrap();
match stack_token {
Token::Open => {
op_stack.push(stack_token);
break;
}
_ => out_queue.push(stack_token),
}
}
}
&Token::Op(ref o1) => {
loop {
if op_stack.len() < 1 {
break;
}
let o2 = op_stack.pop().unwrap(); // top of stack, must exist based off of previous if
match o1 {
&Operator::Negate => {
op_stack.push(o2);
break;
},
&Operator::Pow => {
match o2 {
Token::Op(Operator::Negate) => out_queue.push(o2),
_ => {
op_stack.push(o2);
break;
}
}
},
&Operator::Mul | &Operator::Div => {
match o2 {
Token::Op(Operator::Negate) |
Token::Op(Operator::Pow) |
Token::Op(Operator::Mul) |
Token::Op(Operator::Div) => out_queue.push(o2),
_ => {
op_stack.push(o2);
break;
}
}
}
&Operator::Add | &Operator::Sub => {
match o2 {
Token::Op(Operator::Negate) |
Token::Op(Operator::Pow) |
Token::Op(Operator::Mul) |
Token::Op(Operator::Div) |
Token::Op(Operator::Add) |
Token::Op(Operator::Sub) => out_queue.push(o2),
_ => {
op_stack.push(o2);
break;
},
}
}
}
}
op_stack.push(Token::Op(o1.clone()));
}
&Token::Open => op_stack.push(Token::Open),
&Token::Close => {
loop {
let stack_token = op_stack.pop();
if !stack_token.is_some() {
return (variable, Err("Malformed Expression, found a ) without (".to_owned()));
}
let stack_token = stack_token.unwrap();
match stack_token {
Token::Open => break,
_ => out_queue.push(stack_token),
}
}
if op_stack.len() > 0 {
let next_stack_token = op_stack.pop().unwrap(); // must exist based off of previous if
match next_stack_token {
Token::Func(ref x) => out_queue.push(Token::Func(x.clone())),
_ => op_stack.push(next_stack_token),
}
}
}
&Token::Var(ref x) => out_queue.push(Token::Var(x.clone())),
&Token::Unknown(ref x) => {
let mut message: String = "You either misspelled a function, or it is not yet \
implemented. The unknown string was: ".to_owned();
message.push_str(x);
return (variable, Err(message));
}
_ => break,
}
}
while op_stack.len() > 0 {
out_queue.push(op_stack.pop().unwrap()); // The item must exist
}
(variable, Ok(Expression::new(out_queue)))
}
fn map_string_to_func(input: &String) -> Token {
match &(input.to_lowercase())[..] {
"abs" => Token::Func(Function::Abs),
"exp" => Token::Func(Function::Exp),
"sqrt" => Token::Func(Function::Sqrt),
"ln" => Token::Func(Function::Ln),
"log" => Token::Func(Function::Log),
"logbase" => Token::Func(Function::LogBase),
"sin" => Token::Func(Function::Sin),
"csc" => Token::Func(Function::Csc),
"cos" => Token::Func(Function::Cos),
"sec" => Token::Func(Function::Sec),
"tan" => Token::Func(Function::Tan),
"cot" => Token::Func(Function::Cot),
"asin" => Token::Func(Function::Asin),
"acsc" => Token::Func(Function::Acsc),
"acos" => Token::Func(Function::Acos),
"asec" => Token::Func(Function::Asec),
"atan" => Token::Func(Function::Atan),
"acot" => Token::Func(Function::Acot),
"sinh" => Token::Func(Function::Sinh),
"csch" => Token::Func(Function::Csch),
"cosh" => Token::Func(Function::Cosh),
"sech" => Token::Func(Function::Sech),
"tanh" => Token::Func(Function::Tanh),
"coth" => Token::Func(Function::Coth),
"asinh" => Token::Func(Function::Asinh),
"acsch" => Token::Func(Function::Acsch),
"acosh" => Token::Func(Function::Acosh),
"asech" => Token::Func(Function::Asech),
"atanh" => Token::Func(Function::Atanh),
"acoth" => Token::Func(Function::Acoth),
"max" => Token::Func(Function::Max),
"recip" => Token::Func(Function::Recip),
_ => Token::Var(input.clone()),
}
}
fn eval_postfix_expr(expr: &Expression, vars: &HashMap<String, f64>) -> f64 {
let mut stack: Vec<f64>= Vec::with_capacity(expr.len()/2);
for token in expr.iter() {
match token {
&Token::Literal(ref x) => stack.push(x.parse::<f64>().unwrap()),
&Token::Const(ref x) => match x {
&Constant::Pi => stack.push(std::f64::consts::PI),
&Constant::E => stack.push(std::f64::consts::E),
},
&Token::Op(ref x) => {
let arg: f64 = stack.pop().unwrap();
match x {
&Operator::Negate => stack.push(-1.0f64 * arg),
_ => {
let arg2 = arg;
let arg1 = stack.pop().unwrap();
match x {
&Operator::Add => stack.push(arg1 + arg2),
&Operator::Sub => stack.push(arg1 - arg2),
&Operator::Div => stack.push(arg1 / arg2),
&Operator::Mul => stack.push(arg1 * arg2),
&Operator::Pow => stack.push(arg1.powf(arg2)),
_ => continue, // Should never hit here
}
}
}
},
&Token::Func(ref x) => {
let arg: f64 = stack.pop().unwrap();
match x {
&Function::Abs => stack.push(f64::abs(arg)),
&Function::Sqrt => stack.push(f64::sqrt(arg)),
&Function::Ln => stack.push(f64::ln(arg)),
&Function::Log => stack.push(f64::log10(arg)),
&Function::Exp => stack.push(f64::exp(arg)),
&Function::Sin => stack.push(f64::sin(arg)),
&Function::Csc => stack.push(f64::recip(f64::sin(arg))),
&Function::Cos => stack.push(f64::cos(arg)),
&Function::Sec => stack.push(f64::recip(f64::cos(arg))),
&Function::Tan => stack.push(f64::tan(arg)),
&Function::Cot => stack.push(f64::recip(f64::tan(arg))),
&Function::Asin => stack.push(f64::asin(arg)),
&Function::Acsc => stack.push(f64::recip(f64::asin(arg))),
&Function::Acos => stack.push(f64::acos(arg)),
&Function::Asec => stack.push(f64::recip(f64::acos(arg))),
&Function::Atan => stack.push(f64::atan(arg)),
&Function::Acot => stack.push(f64::recip(f64::atan(arg))),
&Function::Sinh => stack.push(f64::sinh(arg)),
&Function::Csch => stack.push(f64::recip(f64::sinh(arg))),
&Function::Cosh => stack.push(f64::cosh(arg)),
&Function::Sech => stack.push(f64::recip(f64::cosh(arg))),
&Function::Tanh => stack.push(f64::tanh(arg)),
&Function::Coth => stack.push(f64::recip(f64::tanh(arg))),
&Function::Asinh => stack.push(f64::asinh(arg)),
&Function::Acsch => stack.push(f64::recip(f64::asinh(arg))),
&Function::Acosh => stack.push(f64::acosh(arg)),
&Function::Asech => stack.push(f64::recip(f64::acosh(arg))),
&Function::Atanh => stack.push(f64::atanh(arg)),
&Function::Acoth => stack.push(f64::recip(f64::atanh(arg))),
&Function::Recip => stack.push(f64::recip(arg)),
&Function::Max => {
let arg2 = arg;
let arg1 = stack.pop().unwrap();
stack.push(arg1.max(arg2));
},
&Function::LogBase => {
let arg2 = arg;
let arg1 = stack.pop().unwrap();
stack.push(arg1.log(arg2)); // logbase(8,2) == 3
}
}
},
&Token::Unknown(ref x) => {
let _ = stack.pop().unwrap();
continue;
},
&Token::Var(ref x) => {
let value: f64 = *vars.get(x).unwrap();
stack.push(value);
},
_ => continue
}
}
stack.pop().unwrap()
}
// test
fn main() {
let numeric_regex: Regex = Regex::new(r"\d+\.\d+|\d+").unwrap();
let function_regex: Regex = Regex::new(r"\w{2,}").unwrap();
println!("Welcome to Rust-Calculus!");
println!("To evaluate an expression, simply type one in and hit RETURN.");
println!("To set a variable, simply type VAR_NAME=EXPRESSION and hit RETURN.");
println!("Valid commands are: sym_int, int, sym_def, and def.");
println!("Type 'quit' to exit.");
let mut input = String::new();
let stdin = io::stdin();
let mut stdout = io::stdout();
let mut variables: HashMap<String, f64> = HashMap::new();
let mut var_expr: bool;
loop {
var_expr = false;
input.clear();
print!(">>>> ");
stdout.flush().ok();
if let Err(x) = stdin.read_line(&mut input) {
println!("There was a problem reading stdin: {:?}", x);
print!("Exiting...");;
break;
}
input = strip_white_space(&input).to_lowercase();
if input == "quit" {
println!("Exiting...");
break;
}
let (var, expr) = parse_input(&input, &numeric_regex, &function_regex);
if var.len() > 0 {
var_expr = true;
if !variables.contains_key(&var) {
variables.insert(var.clone(), 0.0);
}
} else {
var_expr = false;
}
if expr.is_ok() {
let my_expression = expr.unwrap();
println!("HASHMAP BEFORE: {:?}", &variables);
let result = eval_postfix_expr(&my_expression, &variables);
if var_expr {
variables.insert(var.clone(), result);
println!("{} = {}", &var, &result);
} else {
println!("{}", &result);
}
println!("HASHMAP AFTER: {:?}", &variables);
} else {
println!("Encountered an error while parsing: {:?}",
expr.unwrap_err());
println!("Try Again...(type 'quit' to exit)");
continue;
}
}
}
un-test
#![allow(dead_code)]
extern crate regex;
use regex::Regex;
use std::io;
use std::io::Write;
use std::collections::HashMap;
#[derive(Debug, PartialEq, Clone)]
pub enum Token {
Unknown(String), // Invalid test (basically non-ascii)
Literal(String), // Numeric literal number
Func(Function), // Pre-defined function (like cos() )
Const(Constant), // Constant like pi or e
Var(String), // str arbitrary single char variable name
Op(Operator), // Any of the 4 operators (+-*/)
Open, // Open parens '('
Close, // Closing parens ')'
Comma,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Function {
Abs,
Exp,
Sqrt,
Ln,
Log,
LogBase,
Sin,
Csc,
Cos,
Sec,
Tan,
Cot,
Asin,
Acsc,
Acos,
Asec,
Atan,
Acot,
Sinh,
Csch,
Cosh,
Sech,
Tanh,
Coth,
Asinh,
Acsch,
Acosh,
Asech,
Atanh,
Acoth,
Max,
Recip,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Constant {
Pi,
E,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[repr(u32)]
pub enum Operator {
Add = 0,
Sub = 1,
Div = 2,
Mul = 3,
Pow = 4, // Right
Negate = 5, // Right
}
#[derive(Debug, PartialEq, Clone)]
struct Expression {
tokens: Vec<Token>,
}
struct ExpressionIter<'a> {
expr: &'a Expression,
count: usize,
}
impl<'a> Iterator for ExpressionIter<'a> {
type Item = &'a Token;
fn next(&mut self) -> Option<&'a Token> {
let token = self.expr.get_tokens().get(self.count);
self.count += 1;
token
}
}
impl Expression {
fn new(tokens: Vec<Token>) -> Self {
Expression { tokens: tokens }
}
fn push(&mut self, token: Token) {
self.tokens.push(token);
}
fn get_tokens(&self) -> &[Token] {
self.tokens.as_slice()
}
fn get_mut_tokens(&mut self) -> &mut [Token] {
self.tokens.as_mut_slice()
}
fn get_token(&self, index: usize) -> &Token {
self.tokens.get(index).unwrap()
}
fn get_mut_token(&mut self, index: usize) -> &mut Token {
self.tokens.get_mut(index).unwrap()
}
fn replace_token(&mut self, index: usize, new_token: Token) {
self.tokens.remove(index);
self.tokens.insert(index, new_token);
}
fn replace_all_tokens(&mut self, new_token: Vec<Token>) {
self.tokens = new_token
}
fn find_first(&self, token: &Token) -> Option<usize> {
for i in 0..self.tokens.len() {
if self.tokens.get(i).unwrap() == token {
return Some(i);
}
}
None
}
fn find_last(&self, token: &Token) -> Option<usize> {
for i in (0..self.tokens.len()).rev() {
if self.tokens.get(i).unwrap() == token {
return Some(i);
}
}
None
}
fn split_at(&self, index: usize) -> (&[Token], &[Token]) {
self.tokens.split_at(index)
}
fn len(&self) -> usize {
self.tokens.len()
}
fn iter(&self) -> ExpressionIter {
ExpressionIter {
expr: &self,
count: 0,
}
}
}
fn strip_white_space(input: &String) -> String {
input.split_whitespace().collect::<Vec<&str>>().join("")
}
fn parse_input(input: &String,
numeric_regex: &Regex,
function_regex: &Regex)
-> (String, Result<Expression, String>) {
// 1. Replace everthing except letters/numbers with their enums
// 2. Then go through and replace things with Literals or functions
let mut variable: String = String::new();
let mut expr: Expression = Expression::new(Vec::new());
let mut builder: String = String::new();
for c in input.chars() {
match c {
'(' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Open);
}
')' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Close);
}
'+' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Add));
}
'-' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
expr.push(Token::Op(Operator::Sub));
} else if builder.len() == 0 {
expr.push(Token::Op(Operator::Negate));
}
}
'*' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Mul));
}
'/' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Div));
}
'^' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Op(Operator::Pow));
}
',' => {
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
builder = String::new();
}
expr.push(Token::Comma);
}
'=' => {
if builder.len() > 0 {
variable = builder.clone();
builder = String::new();
}
}
_ => {
builder.push(c);
}
}
}
if builder.len() > 0 {
if numeric_regex.is_match(&builder[..]) {
expr.push(Token::Literal(builder.clone()));
} else if function_regex.is_match(&builder[..]) {
expr.push(map_string_to_func(&builder));
} else {
expr.push(Token::Var(builder.clone()));
}
}
let mut op_stack: Vec<Token> = Vec::with_capacity(input.len());
let mut out_queue: Vec<Token> = Vec::with_capacity(input.len());
for i in 0..expr.len() {
let current_token = expr.get_token(i);
match current_token {
&Token::Literal(ref x) => out_queue.push(Token::Literal(x.clone())),
&Token::Func(ref x) => op_stack.push(Token::Func(x.clone())),
&Token::Comma => {
loop {
let stack_token = op_stack.pop();
if !stack_token.is_some() {
return (variable, Err("Malformed Expression, comma but no Parenthesis".to_owned()));
}
let stack_token = stack_token.unwrap();
match stack_token {
Token::Open => {
op_stack.push(stack_token);
break;
}
_ => out_queue.push(stack_token),
}
}
}
&Token::Op(ref o1) => {
loop {
if op_stack.len() < 1 {
break;
}
let o2 = op_stack.pop().unwrap(); // top of stack, must exist based off of previous if
match o1 {
&Operator::Negate => {
op_stack.push(o2);
break;
},
&Operator::Pow => {
match o2 {
Token::Op(Operator::Negate) => out_queue.push(o2),
_ => {
op_stack.push(o2);
break;
}
}
},
&Operator::Mul | &Operator::Div => {
match o2 {
Token::Op(Operator::Negate) |
Token::Op(Operator::Pow) |
Token::Op(Operator::Mul) |
Token::Op(Operator::Div) => out_queue.push(o2),
_ => {
op_stack.push(o2);
break;
}
}
}
&Operator::Add | &Operator::Sub => {
match o2 {
Token::Op(Operator::Negate) |
Token::Op(Operator::Pow) |
Token::Op(Operator::Mul) |
Token::Op(Operator::Div) |
Token::Op(Operator::Add) |
Token::Op(Operator::Sub) => out_queue.push(o2),
_ => {
op_stack.push(o2);
break;
},
}
}
}
}
op_stack.push(Token::Op(o1.clone()));
}
&Token::Open => op_stack.push(Token::Open),
&Token::Close => {
loop {
let stack_token = op_stack.pop();
if !stack_token.is_some() {
return (variable, Err("Malformed Expression, found a ) without (".to_owned()));
}
let stack_token = stack_token.unwrap();
match stack_token {
Token::Open => break,
_ => out_queue.push(stack_token),
}
}
if op_stack.len() > 0 {
let next_stack_token = op_stack.pop().unwrap(); // must exist based off of previous if
match next_stack_token {
Token::Func(ref x) => out_queue.push(Token::Func(x.clone())),
_ => op_stack.push(next_stack_token),
}
}
}
&Token::Var(ref x) => out_queue.push(Token::Var(x.clone())),
&Token::Unknown(ref x) => {
let mut message: String = "You either misspelled a function, or it is not yet \
implemented. The unknown string was: ".to_owned();
message.push_str(x);
return (variable, Err(message));
}
_ => break,
}
}
while op_stack.len() > 0 {
out_queue.push(op_stack.pop().unwrap()); // The item must exist
}
(variable, Ok(Expression::new(out_queue)))
}
fn map_string_to_func(input: &String) -> Token {
match &(input.to_lowercase())[..] {
"abs" => Token::Func(Function::Abs),
"exp" => Token::Func(Function::Exp),
"sqrt" => Token::Func(Function::Sqrt),
"ln" => Token::Func(Function::Ln),
"log" => Token::Func(Function::Log),
"logbase" => Token::Func(Function::LogBase),
"sin" => Token::Func(Function::Sin),
"csc" => Token::Func(Function::Csc),
"cos" => Token::Func(Function::Cos),
"sec" => Token::Func(Function::Sec),
"tan" => Token::Func(Function::Tan),
"cot" => Token::Func(Function::Cot),
"asin" => Token::Func(Function::Asin),
"acsc" => Token::Func(Function::Acsc),
"acos" => Token::Func(Function::Acos),
"asec" => Token::Func(Function::Asec),
"atan" => Token::Func(Function::Atan),
"acot" => Token::Func(Function::Acot),
"sinh" => Token::Func(Function::Sinh),
"csch" => Token::Func(Function::Csch),
"cosh" => Token::Func(Function::Cosh),
"sech" => Token::Func(Function::Sech),
"tanh" => Token::Func(Function::Tanh),
"coth" => Token::Func(Function::Coth),
"asinh" => Token::Func(Function::Asinh),
"acsch" => Token::Func(Function::Acsch),
"acosh" => Token::Func(Function::Acosh),
"asech" => Token::Func(Function::Asech),
"atanh" => Token::Func(Function::Atanh),
"acoth" => Token::Func(Function::Acoth),
"max" => Token::Func(Function::Max),
"recip" => Token::Func(Function::Recip),
_ => Token::Var(input.clone()),
}
}
fn eval_postfix_expr(expr: &Expression, vars: &HashMap<String, f64>) -> f64 {
let mut stack: Vec<f64>= Vec::with_capacity(expr.len()/2);
for token in expr.iter() {
match token {
&Token::Literal(ref x) => stack.push(x.parse::<f64>().unwrap()),
&Token::Const(ref x) => match x {
&Constant::Pi => stack.push(std::f64::consts::PI),
&Constant::E => stack.push(std::f64::consts::E),
},
&Token::Op(ref x) => {
let arg: f64 = stack.pop().unwrap();
match x {
&Operator::Negate => stack.push(-1.0f64 * arg),
_ => {
let arg2 = arg;
let arg1 = stack.pop().unwrap();
match x {
&Operator::Add => stack.push(arg1 + arg2),
&Operator::Sub => stack.push(arg1 - arg2),
&Operator::Div => stack.push(arg1 / arg2),
&Operator::Mul => stack.push(arg1 * arg2),
&Operator::Pow => stack.push(arg1.powf(arg2)),
_ => continue, // Should never hit here
}
}
}
},
&Token::Func(ref x) => {
let arg: f64 = stack.pop().unwrap();
match x {
&Function::Abs => stack.push(f64::abs(arg)),
&Function::Sqrt => stack.push(f64::sqrt(arg)),
&Function::Ln => stack.push(f64::ln(arg)),
&Function::Log => stack.push(f64::log10(arg)),
&Function::Exp => stack.push(f64::exp(arg)),
&Function::Sin => stack.push(f64::sin(arg)),
&Function::Csc => stack.push(f64::recip(f64::sin(arg))),
&Function::Cos => stack.push(f64::cos(arg)),
&Function::Sec => stack.push(f64::recip(f64::cos(arg))),
&Function::Tan => stack.push(f64::tan(arg)),
&Function::Cot => stack.push(f64::recip(f64::tan(arg))),
&Function::Asin => stack.push(f64::asin(arg)),
&Function::Acsc => stack.push(f64::recip(f64::asin(arg))),
&Function::Acos => stack.push(f64::acos(arg)),
&Function::Asec => stack.push(f64::recip(f64::acos(arg))),
&Function::Atan => stack.push(f64::atan(arg)),
&Function::Acot => stack.push(f64::recip(f64::atan(arg))),
&Function::Sinh => stack.push(f64::sinh(arg)),
&Function::Csch => stack.push(f64::recip(f64::sinh(arg))),
&Function::Cosh => stack.push(f64::cosh(arg)),
&Function::Sech => stack.push(f64::recip(f64::cosh(arg))),
&Function::Tanh => stack.push(f64::tanh(arg)),
&Function::Coth => stack.push(f64::recip(f64::tanh(arg))),
&Function::Asinh => stack.push(f64::asinh(arg)),
&Function::Acsch => stack.push(f64::recip(f64::asinh(arg))),
&Function::Acosh => stack.push(f64::acosh(arg)),
&Function::Asech => stack.push(f64::recip(f64::acosh(arg))),
&Function::Atanh => stack.push(f64::atanh(arg)),
&Function::Acoth => stack.push(f64::recip(f64::atanh(arg))),
&Function::Recip => stack.push(f64::recip(arg)),
&Function::Max => {
let arg2 = arg;
let arg1 = stack.pop().unwrap();
stack.push(arg1.max(arg2));
},
&Function::LogBase => {
let arg2 = arg;
let arg1 = stack.pop().unwrap();
stack.push(arg1.log(arg2)); // logbase(8,2) == 3
}
}
},
&Token::Unknown(ref x) => {
let _ = stack.pop().unwrap();
continue;
},
&Token::Var(ref x) => {
let value: f64 = *vars.get(x).unwrap();
stack.push(value);
},
_ => continue
}
}
stack.pop().unwrap()
}
fn main() {
let numeric_regex: Regex = Regex::new(r"\d+\.\d+|\d+").unwrap();
let function_regex: Regex = Regex::new(r"\w{2,}").unwrap();
println!("Welcome to Rust-Calculus!");
println!("To evaluate an expression, simply type one in and hit RETURN.");
println!("To set a variable, simply type VAR_NAME=EXPRESSION and hit RETURN.");
println!("Valid commands are: sym_int, int, sym_def, and def.");
println!("Type 'quit' to exit.");
let mut input = String::new();
let stdin = io::stdin();
let mut stdout = io::stdout();
let mut variables: HashMap<String, f64> = HashMap::new();
let mut var_expr: bool;
loop {
var_expr = false;
input.clear();
print!(">>>> ");
stdout.flush().ok();
if let Err(x) = stdin.read_line(&mut input) {
println!("There was a problem reading stdin: {:?}", x);
print!("Exiting...");;
break;
}
input = strip_white_space(&input).to_lowercase();
if input == "quit" {
println!("Exiting...");
break;
}
let (var, expr) = parse_input(&input, &numeric_regex, &function_regex);
if var.len() > 0 {
var_expr = true;
if !variables.contains_key(&var) {
variables.insert(var.clone(), 0.0);
}
} else {
var_expr = false;
}
if expr.is_ok() {
let my_expression = expr.unwrap();
println!("HASHMAP BEFORE: {:?}", &variables);
let result = eval_postfix_expr(&my_expression, &variables);
if var_expr {
variables.insert(var.clone(), result);
println!("{} = {}", &var, &result);
} else {
println!("{}", &result);
}
println!("HASHMAP AFTER: {:?}", &variables);
} else {
println!("Encountered an error while parsing: {:?}",
expr.unwrap_err());
println!("Try Again...(type 'quit' to exit)");
continue;
}
}
} |
extern crate core;
extern crate libc;
extern crate llvm_sys as llvm;
use core::str::FromStr;
use std::collections::HashMap;
// use lib::llvm::llvm;
use llvm::prelude::{LLVMBuilderRef, LLVMContextRef, LLVMModuleRef, LLVMPassManagerRef, LLVMTypeRef, LLVMValueRef};
use llvm::execution_engine::{LLVMExecutionEngineRef, LLVMGenericValueToFloat, LLVMRunFunction, LLVMGenericValueRef};
use llvm::analysis::{LLVMVerifyFunction, LLVMVerifierFailureAction};
use llvm::LLVMRealPredicate;
use std::char;
use std::ffi::CString;
use std::io::{self, Read, Write, BufReader};
use std::ptr;
use std::str;
use std::sync::mpsc::{self, Sender, Receiver};
use std::thread;
use std::vec;
use libc::{c_uint};
#[derive(Clone)]
enum Token {
Def,
Extern,
Identifier(String),
Number(f64),
Char(char),
EndOfFile
}
impl PartialEq for Token {
fn eq(&self, other: &Token) -> bool {
match (self, other) {
(&Token::Def, &Token::Def) => true,
(&Token::Extern, &Token::Extern) => true,
(&Token::Identifier(ref val), &Token::Identifier(ref oVal)) => val == oVal,
(&Token::Number(val), &Token::Number(oVal)) => val == oVal,
(&Token::Char(val), &Token::Char(oVal)) => val == oVal,
(&Token::EndOfFile, &Token::EndOfFile) => true,
(_, _) => false,
}
}
}
impl Eq for Token {
}
trait ExprAst {
unsafe fn codegen(&self, &mut Parser) -> LLVMValueRef;
}
struct NumberExprAst {
val: f64
}
struct VariableExprAst {
name: String
}
struct BinaryExprAst {
op: Token,
lhs: Box<ExprAst>,
rhs: Box<ExprAst>,
}
struct CallExprAst {
callee: String,
args: Vec<Box<ExprAst>>
}
struct PrototypeAst {
name: String,
argNames: Vec<String>
}
struct FunctionAst {
proto: Box<PrototypeAst>,
body: Box<ExprAst>
}
impl ExprAst for NumberExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let ty = llvm::core::LLVMDoubleTypeInContext(parser.contextRef);
return llvm::core::LLVMConstReal(ty, self.val);
}
}
impl ExprAst for VariableExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
return match parser.namedValues.get(&self.name) {
Some(v) => *v,
None => panic!("Unknown variable name {}", self.name)
};
}
}
impl ExprAst for BinaryExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let lhsValue = self.lhs.codegen(parser);
let rhsValue = self.rhs.codegen(parser);
match self.op {
Token::Char('+') =>
return llvm::core::LLVMBuildFAdd(parser.builderRef, lhsValue, rhsValue, CString::new("addtmp").unwrap().into_raw()),
Token::Char('-') =>
return llvm::core::LLVMBuildFSub(parser.builderRef, lhsValue, rhsValue, CString::new("subtmp").unwrap().into_raw()),
Token::Char('*') =>
return llvm::core::LLVMBuildFMul(parser.builderRef, lhsValue, rhsValue, CString::new("multmp").unwrap().into_raw()),
Token::Char('<') => {
let cmpValue = llvm::core::LLVMBuildFCmp(parser.builderRef, LLVMRealPredicate::LLVMRealULT, lhsValue, rhsValue, CString::new("cmptmp").unwrap().into_raw());
let ty = llvm::core::LLVMDoubleTypeInContext(parser.contextRef);
return llvm::core::LLVMBuildUIToFP(parser.builderRef, cmpValue, ty, CString::new("booltmp").unwrap().into_raw());
}
_ => {
panic!("llvm code gen failed, invalid binary operation");
}
}
}
}
impl ExprAst for CallExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let funType : LLVMTypeRef = parser.getDoubleFunType(self.args.len());
let calleeF = parser.getOrInsertFunction(self.callee.clone(), funType);
// TODO check arg size
let mut argsV : Vec<LLVMValueRef> = Vec::new();
for arg in self.args.iter() {
argsV.push(arg.codegen(parser));
}
return llvm::core::LLVMBuildCall(parser.builderRef, calleeF, argsV.as_mut_ptr(), argsV.len() as c_uint, CString::new("calltmp").unwrap().into_raw());
}
}
impl PrototypeAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let funType = parser.getDoubleFunType(self.argNames.len());
let fun = parser.getOrInsertFunction(self.name.clone(), funType);
if llvm::core::LLVMCountBasicBlocks(fun) != 0 {
panic!("Redefinition of function");
}
let nArgs = llvm::core::LLVMCountParams(fun) as usize;
if nArgs != 0 && nArgs != self.argNames.len() {
panic!("Redefinition of function with different argument count");
}
for (i, argName) in self.argNames.iter().enumerate() {
let llarg = llvm::core::LLVMGetParam(fun, i as c_uint);
llvm::core::LLVMSetValueName(llarg, CString::new(argName.to_string()).unwrap().into_raw());
parser.namedValues.insert(argName.clone(), llarg);
}
return fun;
}
}
impl FunctionAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
parser.namedValues.clear();
let fun = self.proto.codegen(parser);
let basicBlock = llvm::core::LLVMAppendBasicBlockInContext(parser.contextRef, fun, CString::new("entry").unwrap().into_raw());
llvm::core::LLVMPositionBuilderAtEnd(parser.builderRef, basicBlock);
let body = self.body.codegen(parser);
llvm::core::LLVMBuildRet(parser.builderRef, body);
if LLVMVerifyFunction(fun, LLVMVerifierFailureAction::LLVMPrintMessageAction) != 0 {
println!("Function verify failed");
}
// llvm::core::LLVMRunFunctionPassManager(parser.functionPassManagerRef, fun);
return fun;
}
}
struct Parser {
tokenReceiver: Receiver<Token>,
currentToken: Token,
moduleRef: LLVMModuleRef,
builderRef: LLVMBuilderRef,
contextRef: LLVMContextRef,
executionEngineRef: LLVMExecutionEngineRef,
functionPassManagerRef: LLVMPassManagerRef,
namedValues: HashMap<String, LLVMValueRef>,
functions: HashMap<String, LLVMValueRef>
}
type ParseResult<T> = Result<T, &'static str>;
impl Parser {
fn new(tokenReceiver: Receiver<Token>) -> Parser {
unsafe {
if llvm::target::LLVM_InitializeNativeTarget() != 0 {
panic!("initializing native target");
}
if llvm::target::LLVM_InitializeNativeAsmPrinter() != 0 {
panic!("initializing native target");
}
if llvm::target::LLVM_InitializeNativeAsmParser() != 0 {
panic!("initializing native target");
}
}
let llcx = unsafe {
llvm::core::LLVMContextCreate()
};
let llmod = unsafe {
llvm::core::LLVMModuleCreateWithNameInContext(CString::new("kaleidoscope").unwrap().into_raw(), llcx)
};
let llfpm = unsafe {
llvm::core::LLVMCreateFunctionPassManagerForModule(llmod)
};
unsafe {
llvm::transforms::scalar::LLVMAddBasicAliasAnalysisPass(llfpm);
llvm::transforms::scalar::LLVMAddInstructionCombiningPass(llfpm);
llvm::transforms::scalar::LLVMAddReassociatePass(llfpm);
llvm::transforms::scalar::LLVMAddGVNPass(llfpm);
llvm::transforms::scalar::LLVMAddCFGSimplificationPass(llfpm);
llvm::core::LLVMInitializeFunctionPassManager(llfpm);
}
let llbuilder = unsafe {
llvm::core::LLVMCreateBuilderInContext(llcx)
};
let mut llee = unsafe {
// initialize vars to NULL
llvm::execution_engine::LLVMLinkInMCJIT();
let mut llee: LLVMExecutionEngineRef = 0 as LLVMExecutionEngineRef;
let mut err: *mut i8 = 0 as *mut i8;
if llvm::execution_engine::LLVMCreateExecutionEngineForModule(&mut llee, llmod, &mut err) != 0 {
panic!("ceefm err {}", CString::from_raw(err).into_string().unwrap());
}
println!("ee: {:?}", llee);
llee
};
return Parser {
tokenReceiver: tokenReceiver,
currentToken: Token::Char(' '),
moduleRef: llmod,
builderRef: llbuilder,
contextRef: llcx,
executionEngineRef: llee,
functionPassManagerRef: llfpm,
namedValues: HashMap::new(),
functions: HashMap::new()
};
}
unsafe fn getOrInsertFunction(&mut self, funName: String, funType: LLVMTypeRef) -> LLVMValueRef {
let nameRawPtr = CString::new(funName.clone()).unwrap().into_raw();
let existingFun = llvm::core::LLVMGetNamedFunction(self.moduleRef, nameRawPtr);
if existingFun != ptr::null_mut() {
println!("Function already existed");
return existingFun;
}
let fun = llvm::core::LLVMAddFunction(self.moduleRef, nameRawPtr, funType);
return fun;
}
unsafe fn getDoubleFunType(&mut self, argc: usize) -> LLVMTypeRef {
let ty = llvm::core::LLVMDoubleTypeInContext(self.contextRef);
let mut doubles: Vec<LLVMTypeRef> = (0..argc).map(|_| ty).collect();
return llvm::core::LLVMFunctionType(ty, doubles.as_mut_ptr(), argc as c_uint, 0);
}
fn getNextToken(&mut self) {
self.currentToken = self.tokenReceiver.recv().unwrap();
}
fn parseNumberExpr(&mut self) -> ParseResult<Box<ExprAst>> {
let val = match self.currentToken {
Token::Number(val) => val,
_ => return Err("token not a number")
};
let expr = Box::new(NumberExprAst{val: val});
self.getNextToken();
return Ok(expr);
}
fn parseParenExpr(&mut self) -> ParseResult<Box<ExprAst>> {
self.getNextToken();
let expr = match self.parseExpression() {
Ok(expr) => expr,
err => return err
};
match self.currentToken {
Token::Char(')') => {},
_ => return Err("expected ')'")
}
self.getNextToken();
return Ok(expr);
}
fn parseIdentifierExpr(&mut self) -> ParseResult<Box<ExprAst>> {
let idName = match self.currentToken {
Token::Identifier(ref name) => name.clone(),
_ => return Err("token not an identifier")
};
self.getNextToken();
match self.currentToken {
Token::Char('(') => {},
_ => return Ok(Box::new(VariableExprAst{name: idName}))
}
self.getNextToken();
let mut args: Vec<Box<ExprAst>> = Vec::new();
if self.currentToken != Token::Char(')') {
loop {
let arg = self.parseExpression();
match arg {
Ok(arg) => args.push(arg),
err => return err
}
if self.currentToken == Token::Char(')') {
break;
}
if self.currentToken != Token::Char(',') {
return Err("Expected ')' or ',' in argument list");
}
self.getNextToken();
}
}
self.getNextToken();
return Ok(Box::new(CallExprAst {callee: idName, args: args}));
}
fn parsePrimary(&mut self) -> ParseResult<Box<ExprAst>> {
match self.currentToken {
Token::Identifier(_) => return self.parseIdentifierExpr(),
Token::Number(_) => return self.parseNumberExpr(),
Token::Char('(') => return self.parseParenExpr(),
_ => return Err("unknown token when expecting an expression")
}
}
fn parseExpression(&mut self) -> ParseResult<Box<ExprAst>> {
let lhs: Box<ExprAst> = match self.parsePrimary() {
Ok(lhs) => lhs,
err => return err
};
return self.parseBinOpRhs(0, lhs);
}
fn parseBinOpRhs(&mut self, exprPrec: i32, startLhs: Box<ExprAst>) -> ParseResult<Box<ExprAst>> {
let mut lhs = startLhs;
loop {
let tokenPrec = self.getTokenPrecedence();
if tokenPrec < exprPrec {
return Ok(lhs);
}
let binOp = self.currentToken.clone();
self.getNextToken();
let mut rhs = match self.parsePrimary() {
Ok(rhs) => rhs,
err => return err
};
let nextPrec = self.getTokenPrecedence();
if tokenPrec < nextPrec {
rhs = match self.parseBinOpRhs(tokenPrec+1, rhs) {
Ok(rhs) => rhs,
err => return err
};
}
lhs = Box::new(BinaryExprAst {op: binOp, lhs: lhs, rhs: rhs});
}
}
fn parsePrototype(&mut self) -> ParseResult<Box<PrototypeAst>> { // possibly need sep. of Prototype and Expr
let fnName: String = match self.currentToken {
Token::Identifier(ref name) => name.clone(),
_ => return Err("Expected function name in prototype")
};
self.getNextToken();
if self.currentToken != Token::Char('(') {
return Err("Expected '(' in prototype");
}
let mut argNames: Vec<String> = Vec::new();
loop {
self.getNextToken();
match self.currentToken {
Token::Identifier(ref name) => argNames.push(name.clone()),
_ => break
}
}
if self.currentToken != Token::Char(')') {
return Err("Expected ')' in prototype");
}
self.getNextToken();
return Ok(Box::new(PrototypeAst {name: fnName, argNames: argNames}));
}
fn parseDefinition(&mut self) -> ParseResult<Box<FunctionAst>> {
self.getNextToken();
let proto = match self.parsePrototype() {
Ok(proto) => proto,
Err(err) => return Err(err)
};
let expr = match self.parseExpression() {
Ok(expr) => expr,
Err(err) => return Err(err)
};
return Ok(Box::new(FunctionAst{proto: proto, body: expr}));
}
fn parseExtern(&mut self) -> ParseResult<Box<PrototypeAst>> {
self.getNextToken(); // consume "expr"
return self.parsePrototype();
}
fn parseTopLevelExpr(&mut self) -> ParseResult<Box<FunctionAst>> {
let expr = match self.parseExpression() {
Ok(expr) => expr,
Err(err) => return Err(err)
};
let proto = Box::new(PrototypeAst {name: "".to_string(), argNames: Vec::new()});
return Ok(Box::new(FunctionAst{proto: proto, body: expr}));
}
fn getTokenPrecedence(&mut self) -> i32 {
match self.currentToken {
Token::Char(t) => match t {
'<' => return 10,
'+' => return 20,
'-' => return 20,
'*' => return 40,
'/' => return 40,
_ => return -1
},
_ => return -1
}
}
fn run(&mut self) {
print!("ready> ");
io::stdout().flush();
self.getNextToken();
loop {
match self.currentToken {
Token::Def => self.handleDefinition(),
Token::Extern => self.handleExtern(),
Token::Char(';') => {
self.getNextToken();
continue;
},
_ => self.handleTopLevelExpression()
}
print!("ready> ");
io::stdout().flush();
}
}
fn handleDefinition(&mut self) {
let def = self.parseDefinition();
match def {
Ok(def) => {
println!("Parsed a function definition");
unsafe {
let fun = def.codegen(self);
llvm::core::LLVMDumpValue(fun);
}
}
Err(why) => {
println!("Error: {}", why);
self.getNextToken();
}
}
}
fn handleExtern(&mut self) {
let ext = self.parseExtern();
match ext {
Ok(ext) => {
println!("Parsed an extern");
unsafe {
let extLL = ext.codegen(self);
llvm::core::LLVMDumpValue(extLL);
}
},
Err(why) => {
println!("Error parsing extern: {}", why);
self.getNextToken();
}
}
}
fn handleTopLevelExpression(&mut self) {
let tle = self.parseTopLevelExpr();
match tle {
Ok(tle) => {
unsafe {
let tleFun = tle.codegen(self);
// we have a 0 arg function, call it using the executionEngineRef
let mut argsV: Vec<LLVMGenericValueRef> = Vec::new();
let retValue = LLVMRunFunction(self.executionEngineRef, tleFun, argsV.len() as c_uint, argsV.as_mut_ptr());
let doubleTy = llvm::core::LLVMDoubleTypeInContext(self.contextRef);
let fl = LLVMGenericValueToFloat(doubleTy, retValue);
println!("Returned {}", fl);
}
},
Err(why) => {
println!("Error parsing tle: {}", why);
self.getNextToken();
}
}
}
}
fn readChars(charSender: Sender<char>) {
let mut stdin = io::stdin();
let mut stdin_lock = stdin.lock();
let mut reader = BufReader::new(stdin_lock);
let mut buf = [0];
loop {
match reader.read(&mut buf) {
Ok(_) => charSender.send(buf[0] as char).unwrap(), // No utf8 support for now
Err(_) => return
}
}
}
fn readTokens(chars: Receiver<char>, tokenSender: Sender<Token>) {
let mut lastChr = ' ';
loop {
while lastChr == ' ' || lastChr == '\r' || lastChr == '\n' || lastChr == '\t' {
lastChr = match chars.recv() {
Ok(chr) => chr,
Err(_) => break
};
}
if lastChr.is_alphabetic() { // identifier [a-zA-Z][a-zA-Z0-9]*
let mut identifier = String::new();
identifier.push(lastChr);
loop {
match chars.recv() {
Ok(chr) => {
if chr.is_alphabetic() {
identifier.push(chr);
} else {
lastChr = chr;
break;
}
},
Err(_) => {
tokenSender.send(Token::EndOfFile);
return;
}
}
}
if identifier == "def" {
tokenSender.send(Token::Def);
} else if identifier == "extern" {
tokenSender.send(Token::Extern);
} else {
tokenSender.send(Token::Identifier(identifier));
}
continue;
}
if char::is_digit(lastChr, 10) || lastChr == '.' { // number: [0-9.]+
let mut numStr = String::new();
numStr.push(lastChr);
loop {
match chars.recv() {
Ok(chr) => {
if char::is_digit(chr, 10) || chr == '.' {
numStr.push(chr);
} else {
lastChr = chr;
break;
}
},
Err(_) => {
tokenSender.send(Token::EndOfFile);
return;
}
}
}
tokenSender.send(Token::Number(match f64::from_str(&numStr) {
Ok(val) => val,
Err(_) => {
println!("Malformed number");
continue;
}
}));
continue;
}
if lastChr == '#' {
loop {
match chars.recv() {
Ok(chr) => {
if chr == '\r' || chr == '\n' {
lastChr = ' ';
break;
}
},
Err(_) => {
tokenSender.send(Token::EndOfFile);
return;
}
}
}
continue;
}
tokenSender.send(Token::Char(lastChr));
// consume lastChr
lastChr = ' ';
}
}
fn main() {
let (charSender, charReceiver) = mpsc::channel();
let (tokenSender, tokenReceiver) = mpsc::channel();
thread::spawn(|| {
readChars(charSender);
});
thread::spawn(|| {
readTokens(charReceiver, tokenSender);
});
let mut parser = Parser::new(tokenReceiver);
parser.run();
}
Clean up code slightly
extern crate core;
extern crate libc;
extern crate llvm_sys as llvm;
use core::str::FromStr;
use std::collections::HashMap;
// use lib::llvm::llvm;
use llvm::prelude::{LLVMBuilderRef, LLVMContextRef, LLVMModuleRef, LLVMPassManagerRef, LLVMTypeRef, LLVMValueRef};
use llvm::execution_engine::{LLVMExecutionEngineRef, LLVMGenericValueToFloat, LLVMRunFunction, LLVMGenericValueRef};
use llvm::analysis::{LLVMVerifyFunction, LLVMVerifierFailureAction};
use llvm::LLVMRealPredicate;
use std::char;
use std::ffi::CString;
use std::io::{self, Read, Write, BufReader};
use std::ptr;
use std::str;
use std::sync::mpsc::{self, Sender, Receiver};
use std::thread;
use std::vec;
use libc::{c_uint};
#[derive(Clone)]
enum Token {
Def,
Extern,
Identifier(String),
Number(f64),
Char(char),
EndOfFile
}
impl PartialEq for Token {
fn eq(&self, other: &Token) -> bool {
match (self, other) {
(&Token::Def, &Token::Def) => true,
(&Token::Extern, &Token::Extern) => true,
(&Token::Identifier(ref val), &Token::Identifier(ref oVal)) => val == oVal,
(&Token::Number(val), &Token::Number(oVal)) => val == oVal,
(&Token::Char(val), &Token::Char(oVal)) => val == oVal,
(&Token::EndOfFile, &Token::EndOfFile) => true,
(_, _) => false,
}
}
}
impl Eq for Token {
}
trait ExprAst {
unsafe fn codegen(&self, &mut Parser) -> LLVMValueRef;
}
struct NumberExprAst {
val: f64
}
struct VariableExprAst {
name: String
}
struct BinaryExprAst {
op: Token,
lhs: Box<ExprAst>,
rhs: Box<ExprAst>,
}
struct CallExprAst {
callee: String,
args: Vec<Box<ExprAst>>
}
struct PrototypeAst {
name: String,
argNames: Vec<String>
}
struct FunctionAst {
proto: Box<PrototypeAst>,
body: Box<ExprAst>
}
impl ExprAst for NumberExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let ty = llvm::core::LLVMDoubleTypeInContext(parser.contextRef);
return llvm::core::LLVMConstReal(ty, self.val);
}
}
impl ExprAst for VariableExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
return match parser.namedValues.get(&self.name) {
Some(v) => *v,
None => panic!("Unknown variable name {}", self.name)
};
}
}
impl ExprAst for BinaryExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let lhsValue = self.lhs.codegen(parser);
let rhsValue = self.rhs.codegen(parser);
match self.op {
Token::Char('+') =>
return llvm::core::LLVMBuildFAdd(parser.builderRef, lhsValue, rhsValue, CString::new("addtmp").unwrap().into_raw()),
Token::Char('-') =>
return llvm::core::LLVMBuildFSub(parser.builderRef, lhsValue, rhsValue, CString::new("subtmp").unwrap().into_raw()),
Token::Char('*') =>
return llvm::core::LLVMBuildFMul(parser.builderRef, lhsValue, rhsValue, CString::new("multmp").unwrap().into_raw()),
Token::Char('<') => {
let cmpValue = llvm::core::LLVMBuildFCmp(parser.builderRef, LLVMRealPredicate::LLVMRealULT, lhsValue, rhsValue, CString::new("cmptmp").unwrap().into_raw());
let ty = llvm::core::LLVMDoubleTypeInContext(parser.contextRef);
return llvm::core::LLVMBuildUIToFP(parser.builderRef, cmpValue, ty, CString::new("booltmp").unwrap().into_raw());
}
_ => {
panic!("llvm code gen failed, invalid binary operation");
}
}
}
}
impl ExprAst for CallExprAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let funType : LLVMTypeRef = parser.getDoubleFunType(self.args.len());
let calleeF = parser.getOrInsertFunction(self.callee.clone(), funType);
// TODO check arg size
let mut argsV : Vec<LLVMValueRef> = Vec::new();
for arg in self.args.iter() {
argsV.push(arg.codegen(parser));
}
return llvm::core::LLVMBuildCall(parser.builderRef, calleeF, argsV.as_mut_ptr(), argsV.len() as c_uint, CString::new("calltmp").unwrap().into_raw());
}
}
impl PrototypeAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
let funType = parser.getDoubleFunType(self.argNames.len());
let fun = parser.getOrInsertFunction(self.name.clone(), funType);
if llvm::core::LLVMCountBasicBlocks(fun) != 0 {
panic!("Redefinition of function");
}
let nArgs = llvm::core::LLVMCountParams(fun) as usize;
if nArgs != 0 && nArgs != self.argNames.len() {
panic!("Redefinition of function with different argument count");
}
for (i, argName) in self.argNames.iter().enumerate() {
let llarg = llvm::core::LLVMGetParam(fun, i as c_uint);
llvm::core::LLVMSetValueName(llarg, CString::new(argName.to_string()).unwrap().into_raw());
parser.namedValues.insert(argName.clone(), llarg);
}
return fun;
}
}
impl FunctionAst {
unsafe fn codegen(&self, parser: &mut Parser) -> LLVMValueRef {
parser.namedValues.clear();
let fun = self.proto.codegen(parser);
let basicBlock = llvm::core::LLVMAppendBasicBlockInContext(parser.contextRef, fun, CString::new("entry").unwrap().into_raw());
llvm::core::LLVMPositionBuilderAtEnd(parser.builderRef, basicBlock);
let body = self.body.codegen(parser);
llvm::core::LLVMBuildRet(parser.builderRef, body);
if LLVMVerifyFunction(fun, LLVMVerifierFailureAction::LLVMPrintMessageAction) != 0 {
println!("Function verify failed");
}
// llvm::core::LLVMRunFunctionPassManager(parser.functionPassManagerRef, fun);
return fun;
}
}
struct Parser {
tokenReceiver: Receiver<Token>,
currentToken: Token,
moduleRef: LLVMModuleRef,
builderRef: LLVMBuilderRef,
contextRef: LLVMContextRef,
executionEngineRef: LLVMExecutionEngineRef,
functionPassManagerRef: LLVMPassManagerRef,
namedValues: HashMap<String, LLVMValueRef>
}
type ParseResult<T> = Result<T, &'static str>;
impl Parser {
fn new(tokenReceiver: Receiver<Token>) -> Parser {
unsafe {
if llvm::target::LLVM_InitializeNativeTarget() != 0 {
panic!("initializing native target");
}
if llvm::target::LLVM_InitializeNativeAsmPrinter() != 0 {
panic!("initializing native target");
}
if llvm::target::LLVM_InitializeNativeAsmParser() != 0 {
panic!("initializing native target");
}
}
let llcx = unsafe {
llvm::core::LLVMContextCreate()
};
let llmod = unsafe {
llvm::core::LLVMModuleCreateWithNameInContext(CString::new("kaleidoscope").unwrap().into_raw(), llcx)
};
let llfpm = unsafe {
llvm::core::LLVMCreateFunctionPassManagerForModule(llmod)
};
unsafe {
llvm::transforms::scalar::LLVMAddBasicAliasAnalysisPass(llfpm);
llvm::transforms::scalar::LLVMAddInstructionCombiningPass(llfpm);
llvm::transforms::scalar::LLVMAddReassociatePass(llfpm);
llvm::transforms::scalar::LLVMAddGVNPass(llfpm);
llvm::transforms::scalar::LLVMAddCFGSimplificationPass(llfpm);
llvm::core::LLVMInitializeFunctionPassManager(llfpm);
}
let llbuilder = unsafe {
llvm::core::LLVMCreateBuilderInContext(llcx)
};
let mut llee = unsafe {
// initialize vars to NULL
llvm::execution_engine::LLVMLinkInMCJIT();
let mut llee: LLVMExecutionEngineRef = 0 as LLVMExecutionEngineRef;
let mut err: *mut i8 = 0 as *mut i8;
if llvm::execution_engine::LLVMCreateExecutionEngineForModule(&mut llee, llmod, &mut err) != 0 {
panic!("Error in LLVMCreateExecutionEngineForModule: {}",
CString::from_raw(err).into_string().unwrap());
}
llee
};
return Parser {
tokenReceiver: tokenReceiver,
currentToken: Token::Char(' '),
moduleRef: llmod,
builderRef: llbuilder,
contextRef: llcx,
executionEngineRef: llee,
functionPassManagerRef: llfpm,
namedValues: HashMap::new()
};
}
unsafe fn getOrInsertFunction(&mut self, funName: String, funType: LLVMTypeRef) -> LLVMValueRef {
let nameRawPtr = CString::new(funName.clone()).unwrap().into_raw();
let existingFun = llvm::core::LLVMGetNamedFunction(self.moduleRef, nameRawPtr);
if existingFun != ptr::null_mut() {
println!("Function already existed");
return existingFun;
}
let fun = llvm::core::LLVMAddFunction(self.moduleRef, nameRawPtr, funType);
return fun;
}
unsafe fn getDoubleFunType(&mut self, argc: usize) -> LLVMTypeRef {
let ty = llvm::core::LLVMDoubleTypeInContext(self.contextRef);
let mut doubles: Vec<LLVMTypeRef> = (0..argc).map(|_| ty).collect();
return llvm::core::LLVMFunctionType(ty, doubles.as_mut_ptr(), argc as c_uint, 0);
}
fn getNextToken(&mut self) {
self.currentToken = self.tokenReceiver.recv().unwrap();
}
fn parseNumberExpr(&mut self) -> ParseResult<Box<ExprAst>> {
let val = match self.currentToken {
Token::Number(val) => val,
_ => return Err("token not a number")
};
let expr = Box::new(NumberExprAst{val: val});
self.getNextToken();
return Ok(expr);
}
fn parseParenExpr(&mut self) -> ParseResult<Box<ExprAst>> {
self.getNextToken();
let expr = match self.parseExpression() {
Ok(expr) => expr,
err => return err
};
match self.currentToken {
Token::Char(')') => {},
_ => return Err("expected ')'")
}
self.getNextToken();
return Ok(expr);
}
fn parseIdentifierExpr(&mut self) -> ParseResult<Box<ExprAst>> {
let idName = match self.currentToken {
Token::Identifier(ref name) => name.clone(),
_ => return Err("token not an identifier")
};
self.getNextToken();
match self.currentToken {
Token::Char('(') => {},
_ => return Ok(Box::new(VariableExprAst{name: idName}))
}
self.getNextToken();
let mut args: Vec<Box<ExprAst>> = Vec::new();
if self.currentToken != Token::Char(')') {
loop {
let arg = self.parseExpression();
match arg {
Ok(arg) => args.push(arg),
err => return err
}
if self.currentToken == Token::Char(')') {
break;
}
if self.currentToken != Token::Char(',') {
return Err("Expected ')' or ',' in argument list");
}
self.getNextToken();
}
}
self.getNextToken();
return Ok(Box::new(CallExprAst {callee: idName, args: args}));
}
fn parsePrimary(&mut self) -> ParseResult<Box<ExprAst>> {
match self.currentToken {
Token::Identifier(_) => return self.parseIdentifierExpr(),
Token::Number(_) => return self.parseNumberExpr(),
Token::Char('(') => return self.parseParenExpr(),
_ => return Err("unknown token when expecting an expression")
}
}
fn parseExpression(&mut self) -> ParseResult<Box<ExprAst>> {
let lhs: Box<ExprAst> = match self.parsePrimary() {
Ok(lhs) => lhs,
err => return err
};
return self.parseBinOpRhs(0, lhs);
}
fn parseBinOpRhs(&mut self, exprPrec: i32, startLhs: Box<ExprAst>) -> ParseResult<Box<ExprAst>> {
let mut lhs = startLhs;
loop {
let tokenPrec = self.getTokenPrecedence();
if tokenPrec < exprPrec {
return Ok(lhs);
}
let binOp = self.currentToken.clone();
self.getNextToken();
let mut rhs = match self.parsePrimary() {
Ok(rhs) => rhs,
err => return err
};
let nextPrec = self.getTokenPrecedence();
if tokenPrec < nextPrec {
rhs = match self.parseBinOpRhs(tokenPrec+1, rhs) {
Ok(rhs) => rhs,
err => return err
};
}
lhs = Box::new(BinaryExprAst {op: binOp, lhs: lhs, rhs: rhs});
}
}
fn parsePrototype(&mut self) -> ParseResult<Box<PrototypeAst>> { // possibly need sep. of Prototype and Expr
let fnName: String = match self.currentToken {
Token::Identifier(ref name) => name.clone(),
_ => return Err("Expected function name in prototype")
};
self.getNextToken();
if self.currentToken != Token::Char('(') {
return Err("Expected '(' in prototype");
}
let mut argNames: Vec<String> = Vec::new();
loop {
self.getNextToken();
match self.currentToken {
Token::Identifier(ref name) => argNames.push(name.clone()),
_ => break
}
}
if self.currentToken != Token::Char(')') {
return Err("Expected ')' in prototype");
}
self.getNextToken();
return Ok(Box::new(PrototypeAst {name: fnName, argNames: argNames}));
}
fn parseDefinition(&mut self) -> ParseResult<Box<FunctionAst>> {
self.getNextToken();
let proto = match self.parsePrototype() {
Ok(proto) => proto,
Err(err) => return Err(err)
};
let expr = match self.parseExpression() {
Ok(expr) => expr,
Err(err) => return Err(err)
};
return Ok(Box::new(FunctionAst{proto: proto, body: expr}));
}
fn parseExtern(&mut self) -> ParseResult<Box<PrototypeAst>> {
self.getNextToken(); // consume "expr"
return self.parsePrototype();
}
fn parseTopLevelExpr(&mut self) -> ParseResult<Box<FunctionAst>> {
let expr = match self.parseExpression() {
Ok(expr) => expr,
Err(err) => return Err(err)
};
let proto = Box::new(PrototypeAst {name: "".to_string(), argNames: Vec::new()});
return Ok(Box::new(FunctionAst{proto: proto, body: expr}));
}
fn getTokenPrecedence(&mut self) -> i32 {
match self.currentToken {
Token::Char(t) => match t {
'<' => return 10,
'+' => return 20,
'-' => return 20,
'*' => return 40,
'/' => return 40,
_ => return -1
},
_ => return -1
}
}
fn run(&mut self) {
print!("ready> ");
io::stdout().flush();
self.getNextToken();
loop {
match self.currentToken {
Token::Def => self.handleDefinition(),
Token::Extern => self.handleExtern(),
Token::Char(';') => {
self.getNextToken();
continue;
},
_ => self.handleTopLevelExpression()
}
print!("ready> ");
io::stdout().flush();
}
}
fn handleDefinition(&mut self) {
let def = self.parseDefinition();
match def {
Ok(def) => {
println!("Parsed a function definition");
unsafe {
let fun = def.codegen(self);
llvm::core::LLVMDumpValue(fun);
}
}
Err(why) => {
println!("Error: {}", why);
self.getNextToken();
}
}
}
fn handleExtern(&mut self) {
let ext = self.parseExtern();
match ext {
Ok(ext) => {
println!("Parsed an extern");
unsafe {
let extLL = ext.codegen(self);
llvm::core::LLVMDumpValue(extLL);
}
},
Err(why) => {
println!("Error parsing extern: {}", why);
self.getNextToken();
}
}
}
fn handleTopLevelExpression(&mut self) {
let tle = self.parseTopLevelExpr();
match tle {
Ok(tle) => {
unsafe {
let tleFun = tle.codegen(self);
// we have a 0 arg function, call it using the executionEngineRef
let mut argsV: Vec<LLVMGenericValueRef> = Vec::new();
let retValue = LLVMRunFunction(self.executionEngineRef, tleFun, argsV.len() as c_uint, argsV.as_mut_ptr());
let doubleTy = llvm::core::LLVMDoubleTypeInContext(self.contextRef);
let fl = LLVMGenericValueToFloat(doubleTy, retValue);
println!("Returned {}", fl);
}
},
Err(why) => {
println!("Error parsing tle: {}", why);
self.getNextToken();
}
}
}
}
fn readChars(charSender: Sender<char>) {
let mut stdin = io::stdin();
let mut stdin_lock = stdin.lock();
let mut reader = BufReader::new(stdin_lock);
let mut buf = [0];
loop {
match reader.read(&mut buf) {
Ok(_) => charSender.send(buf[0] as char).unwrap(), // No utf8 support for now
Err(_) => return
}
}
}
fn readTokens(chars: Receiver<char>, tokenSender: Sender<Token>) {
let mut lastChr = ' ';
loop {
while lastChr == ' ' || lastChr == '\r' || lastChr == '\n' || lastChr == '\t' {
lastChr = match chars.recv() {
Ok(chr) => chr,
Err(_) => break
};
}
if lastChr.is_alphabetic() { // identifier [a-zA-Z][a-zA-Z0-9]*
let mut identifier = String::new();
identifier.push(lastChr);
loop {
match chars.recv() {
Ok(chr) => {
if chr.is_alphabetic() {
identifier.push(chr);
} else {
lastChr = chr;
break;
}
},
Err(_) => {
tokenSender.send(Token::EndOfFile);
return;
}
}
}
if identifier == "def" {
tokenSender.send(Token::Def);
} else if identifier == "extern" {
tokenSender.send(Token::Extern);
} else {
tokenSender.send(Token::Identifier(identifier));
}
continue;
}
if char::is_digit(lastChr, 10) || lastChr == '.' { // number: [0-9.]+
let mut numStr = String::new();
numStr.push(lastChr);
loop {
match chars.recv() {
Ok(chr) => {
if char::is_digit(chr, 10) || chr == '.' {
numStr.push(chr);
} else {
lastChr = chr;
break;
}
},
Err(_) => {
tokenSender.send(Token::EndOfFile);
return;
}
}
}
tokenSender.send(Token::Number(match f64::from_str(&numStr) {
Ok(val) => val,
Err(_) => {
println!("Malformed number");
continue;
}
}));
continue;
}
if lastChr == '#' {
loop {
match chars.recv() {
Ok(chr) => {
if chr == '\r' || chr == '\n' {
lastChr = ' ';
break;
}
},
Err(_) => {
tokenSender.send(Token::EndOfFile);
return;
}
}
}
continue;
}
tokenSender.send(Token::Char(lastChr));
// consume lastChr
lastChr = ' ';
}
}
fn main() {
let (charSender, charReceiver) = mpsc::channel();
let (tokenSender, tokenReceiver) = mpsc::channel();
thread::spawn(|| {
readChars(charSender);
});
thread::spawn(|| {
readTokens(charReceiver, tokenSender);
});
let mut parser = Parser::new(tokenReceiver);
parser.run();
}
|
use std::io::Read;
use std::cmp::Ordering;
use utils::owning_iterator::SeekingIterator;
use storage::compression::{vbyte_encode, VByteDecoder};
use storage::{ByteDecodable, ByteEncodable, DecodeResult, DecodeError};
// For each term-document pair the doc_id and the
// positions of the term inside the document is stored
#[derive(Debug, Eq)]
pub struct Posting(pub DocId, pub Positions);
pub type DocId = u64;
pub type Positions = Vec<u32>;
pub type Listing = Vec<Posting>;
impl Posting {
pub fn new(doc_id: DocId, positions: Positions) -> Self {
Posting(doc_id, positions)
}
// TODO: Does it have an impact if we declare the
// #[inline]-attribute on these kinds of functions?
pub fn doc_id(&self) -> &DocId {
&self.0
}
pub fn positions(&self) -> &Positions {
&self.1
}
}
pub struct PostingDecoder<R: Read> {
decoder: VByteDecoder<R>,
last_doc_id: u64,
len: usize
}
impl<R: Read> PostingDecoder<R> {
pub fn new(read: R, len: usize) -> Self {
PostingDecoder{
decoder: VByteDecoder::new(read),
last_doc_id: 0,
len: len
}
}
pub fn len(&self) -> usize {
self.len
}
}
impl<R: Read> Iterator for PostingDecoder<R> {
type Item = Posting;
fn next(&mut self) -> Option<Self::Item> {
let delta_doc_id = try_option!(self.decoder.next()) as u64;
let positions_len = try_option!(self.decoder.next());
let mut positions = Vec::with_capacity(positions_len as usize);
let mut last_position = 0;
for _ in 0..positions_len {
last_position += try_option!(self.decoder.next());
positions.push(last_position as u32);
}
self.last_doc_id += delta_doc_id;
Some(Posting::new(self.last_doc_id, positions))
}
}
impl<R: Read> SeekingIterator for PostingDecoder<R> {
type Item = Posting;
fn next_seek(&mut self, other: &Self::Item) -> Option<Self::Item> {
loop {
let v = try_option!(self.next());
if v >= *other {
return Some(v);
}
}
}
}
// When we compare postings, we usually only care about doc_ids.
// For comparisons that consider positions have a look at
// `index::boolean_index::query_result_iterator::nary_query_iterator::positional_intersect` ...
impl Ord for Posting {
fn cmp(&self, other: &Self) -> Ordering {
self.doc_id().cmp(other.doc_id())
}
}
impl PartialEq for Posting {
fn eq(&self, other: &Self) -> bool {
self.doc_id().eq(other.doc_id())
}
}
impl PartialOrd for Posting {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.doc_id().partial_cmp(other.doc_id())
}
}
impl ByteEncodable for Listing {
fn encode(&self) -> Vec<u8> {
let mut bytes: Vec<u8> = Vec::new();
bytes.append(&mut vbyte_encode(self.len()));
for posting in self {
bytes.append(&mut vbyte_encode(*posting.doc_id() as usize));
bytes.append(&mut vbyte_encode(posting.positions().len() as usize));
let mut last_position = 0;
for position in &posting.1 {
bytes.append(&mut vbyte_encode((*position - last_position) as usize));
last_position = *position;
}
}
bytes
}
}
impl ByteDecodable for Vec<Posting> {
fn decode<R: Read>(read: &mut R) -> DecodeResult<Self> {
let mut decoder = VByteDecoder::new(read);
if let Some(postings_len) = decoder.next() {
let mut postings = Vec::with_capacity(postings_len);
for _ in 0..postings_len {
let doc_id = try!(decoder.next().ok_or(DecodeError::MalformedInput));
let positions_len = try!(decoder.next().ok_or(DecodeError::MalformedInput));
let mut positions = Vec::with_capacity(positions_len as usize);
let mut last_position = 0;
for _ in 0..positions_len {
last_position += try!(decoder.next().ok_or(DecodeError::MalformedInput));
positions.push(last_position as u32);
}
postings.push(Posting::new(doc_id as u64, positions));
}
Ok(postings)
} else {
Err(DecodeError::MalformedInput)
}
}
}
posting.rs cleanup
use std::io::Read;
use std::cmp::Ordering;
use utils::owning_iterator::SeekingIterator;
use storage::compression::VByteDecoder;
// For each term-document pair the doc_id and the
// positions of the term inside the document is stored
#[derive(Debug, Eq)]
pub struct Posting(pub DocId, pub Positions);
pub type DocId = u64;
pub type Positions = Vec<u32>;
pub type Listing = Vec<Posting>;
impl Posting {
pub fn new(doc_id: DocId, positions: Positions) -> Self {
Posting(doc_id, positions)
}
// TODO: Does it have an impact if we declare the
// #[inline]-attribute on these kinds of functions?
pub fn doc_id(&self) -> &DocId {
&self.0
}
// TODO: Decode positions lazily
pub fn positions(&self) -> &Positions {
&self.1
}
}
pub struct PostingDecoder<R: Read> {
decoder: VByteDecoder<R>,
last_doc_id: u64,
len: usize,
}
impl<R: Read> PostingDecoder<R> {
pub fn new(read: R, len: usize) -> Self {
PostingDecoder {
decoder: VByteDecoder::new(read),
last_doc_id: 0,
len: len,
}
}
pub fn len(&self) -> usize {
self.len
}
}
impl<R: Read> Iterator for PostingDecoder<R> {
type Item = Posting;
fn next(&mut self) -> Option<Self::Item> {
let delta_doc_id = try_option!(self.decoder.next()) as u64;
let positions_len = try_option!(self.decoder.next());
let mut positions = Vec::with_capacity(positions_len as usize);
let mut last_position = 0;
for _ in 0..positions_len {
last_position += try_option!(self.decoder.next());
positions.push(last_position as u32);
}
self.last_doc_id += delta_doc_id;
Some(Posting::new(self.last_doc_id, positions))
}
}
impl<R: Read> SeekingIterator for PostingDecoder<R> {
type Item = Posting;
fn next_seek(&mut self, other: &Self::Item) -> Option<Self::Item> {
loop {
let v = try_option!(self.next());
if v >= *other {
return Some(v);
}
}
}
}
// When we compare postings, we usually only care about doc_ids.
// For comparisons that consider positions have a look at
// `index::boolean_index::query_result_iterator::nary_query_iterator::positional_intersect` ...
impl Ord for Posting {
fn cmp(&self, other: &Self) -> Ordering {
self.doc_id().cmp(other.doc_id())
}
}
impl PartialEq for Posting {
fn eq(&self, other: &Self) -> bool {
self.doc_id().eq(other.doc_id())
}
}
impl PartialOrd for Posting {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.doc_id().partial_cmp(other.doc_id())
}
}
|
use rustc::lint::*;
use syntax::ptr::P;
use rustc_front::hir::*;
use reexport::*;
use rustc_front::util::{is_comparison_binop, binop_to_string};
use syntax::codemap::{Span, Spanned};
use rustc_front::intravisit::FnKind;
use rustc::middle::ty;
use rustc::middle::const_eval::ConstVal::Float;
use rustc::middle::const_eval::eval_const_expr_partial;
use rustc::middle::const_eval::EvalHint::ExprTypeChecked;
use utils::{get_item_name, match_path, snippet, span_lint, walk_ptrs_ty, is_integer_literal};
use utils::span_help_and_lint;
/// **What it does:** This lint checks for function arguments and let bindings denoted as `ref`. It is `Warn` by default.
///
/// **Why is this bad?** The `ref` declaration makes the function take an owned value, but turns the argument into a reference (which means that the value is destroyed when exiting the function). This adds not much value: either take a reference type, or take an owned value and create references in the body.
///
/// For let bindings, `let x = &foo;` is preferred over `let ref x = foo`. The type of `x` is more obvious with the former.
///
/// **Known problems:** If the argument is dereferenced within the function, removing the `ref` will lead to errors. This can be fixed by removing the dereferences, e.g. changing `*x` to `x` within the function.
///
/// **Example:** `fn foo(ref x: u8) -> bool { .. }`
declare_lint!(pub TOPLEVEL_REF_ARG, Warn,
"An entire binding was declared as `ref`, in a function argument (`fn foo(ref x: Bar)`), \
or a `let` statement (`let ref x = foo()`). In such cases, it is preferred to take \
references with `&`.");
#[allow(missing_copy_implementations)]
pub struct TopLevelRefPass;
impl LintPass for TopLevelRefPass {
fn get_lints(&self) -> LintArray {
lint_array!(TOPLEVEL_REF_ARG)
}
}
impl LateLintPass for TopLevelRefPass {
fn check_fn(&mut self, cx: &LateContext, k: FnKind, decl: &FnDecl, _: &Block, _: Span, _: NodeId) {
if let FnKind::Closure = k {
// Does not apply to closures
return
}
for ref arg in &decl.inputs {
if let PatIdent(BindByRef(_), _, _) = arg.pat.node {
span_lint(cx,
TOPLEVEL_REF_ARG,
arg.pat.span,
"`ref` directly on a function argument is ignored. Consider using a reference type instead."
);
}
}
}
fn check_stmt(&mut self, cx: &LateContext, s: &Stmt) {
if_let_chain! {
[
let StmtDecl(ref d, _) = s.node,
let DeclLocal(ref l) = d.node,
let PatIdent(BindByRef(_), i, None) = l.pat.node,
let Some(ref init) = l.init
], {
let tyopt = if let Some(ref ty) = l.ty {
format!(": {:?} ", ty)
} else {
"".to_owned()
};
span_help_and_lint(cx,
TOPLEVEL_REF_ARG,
l.pat.span,
"`ref` on an entire `let` pattern is discouraged, take a reference with & instead",
&format!("try `let {} {}= &{};`", snippet(cx, i.span, "_"),
tyopt, snippet(cx, init.span, "_"))
);
}
};
}
}
/// **What it does:** This lint checks for comparisons to NAN. It is `Deny` by default.
///
/// **Why is this bad?** NAN does not compare meaningfully to anything – not even itself – so those comparisons are simply wrong.
///
/// **Known problems:** None
///
/// **Example:** `x == NAN`
declare_lint!(pub CMP_NAN, Deny,
"comparisons to NAN (which will always return false, which is probably not intended)");
#[derive(Copy,Clone)]
pub struct CmpNan;
impl LintPass for CmpNan {
fn get_lints(&self) -> LintArray {
lint_array!(CMP_NAN)
}
}
impl LateLintPass for CmpNan {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, ref right) = expr.node {
if is_comparison_binop(cmp.node) {
if let ExprPath(_, ref path) = left.node {
check_nan(cx, path, expr.span);
}
if let ExprPath(_, ref path) = right.node {
check_nan(cx, path, expr.span);
}
}
}
}
}
fn check_nan(cx: &LateContext, path: &Path, span: Span) {
path.segments.last().map(|seg| if seg.identifier.name.as_str() == "NAN" {
span_lint(cx, CMP_NAN, span,
"doomed comparison with NAN, use `std::{f32,f64}::is_nan()` instead");
});
}
/// **What it does:** This lint checks for (in-)equality comparisons on floating-point values (apart from zero), except in functions called `*eq*` (which probably implement equality for a type involving floats). It is `Warn` by default.
///
/// **Why is this bad?** Floating point calculations are usually imprecise, so asking if two values are *exactly* equal is asking for trouble. For a good guide on what to do, see [the floating point guide](http://www.floating-point-gui.de/errors/comparison).
///
/// **Known problems:** None
///
/// **Example:** `y == 1.23f64`
declare_lint!(pub FLOAT_CMP, Warn,
"using `==` or `!=` on float values (as floating-point operations \
usually involve rounding errors, it is always better to check for approximate \
equality within small bounds)");
#[derive(Copy,Clone)]
pub struct FloatCmp;
impl LintPass for FloatCmp {
fn get_lints(&self) -> LintArray {
lint_array!(FLOAT_CMP)
}
}
impl LateLintPass for FloatCmp {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, ref right) = expr.node {
let op = cmp.node;
if (op == BiEq || op == BiNe) && (is_float(cx, left) || is_float(cx, right)) {
if is_allowed(cx, left) || is_allowed(cx, right) { return; }
if let Some(name) = get_item_name(cx, expr) {
let name = name.as_str();
if name == "eq" || name == "ne" || name == "is_nan" ||
name.starts_with("eq_") ||
name.ends_with("_eq") {
return;
}
}
span_lint(cx, FLOAT_CMP, expr.span, &format!(
"{}-comparison of f32 or f64 detected. Consider changing this to \
`abs({} - {}) < epsilon` for some suitable value of epsilon",
binop_to_string(op), snippet(cx, left.span, ".."),
snippet(cx, right.span, "..")));
}
}
}
}
fn is_allowed(cx: &LateContext, expr: &Expr) -> bool {
let res = eval_const_expr_partial(cx.tcx, expr, ExprTypeChecked, None);
if let Ok(Float(val)) = res {
val == 0.0 || val == ::std::f64::INFINITY || val == ::std::f64::NEG_INFINITY
} else { false }
}
fn is_float(cx: &LateContext, expr: &Expr) -> bool {
if let ty::TyFloat(_) = walk_ptrs_ty(cx.tcx.expr_ty(expr)).sty {
true
} else {
false
}
}
/// **What it does:** This lint checks for conversions to owned values just for the sake of a comparison. It is `Warn` by default.
///
/// **Why is this bad?** The comparison can operate on a reference, so creating an owned value effectively throws it away directly afterwards, which is needlessly consuming code and heap space.
///
/// **Known problems:** None
///
/// **Example:** `x.to_owned() == y`
declare_lint!(pub CMP_OWNED, Warn,
"creating owned instances for comparing with others, e.g. `x == \"foo\".to_string()`");
#[derive(Copy,Clone)]
pub struct CmpOwned;
impl LintPass for CmpOwned {
fn get_lints(&self) -> LintArray {
lint_array!(CMP_OWNED)
}
}
impl LateLintPass for CmpOwned {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, ref right) = expr.node {
if is_comparison_binop(cmp.node) {
check_to_owned(cx, left, right.span, true, cmp.span);
check_to_owned(cx, right, left.span, false, cmp.span)
}
}
}
}
fn check_to_owned(cx: &LateContext, expr: &Expr, other_span: Span, left: bool, op: Span) {
let snip = match expr.node {
ExprMethodCall(Spanned{node: ref name, ..}, _, ref args) if args.len() == 1 => {
if name.as_str() == "to_string" ||
name.as_str() == "to_owned" && is_str_arg(cx, args) {
snippet(cx, args[0].span, "..")
} else {
return
}
}
ExprCall(ref path, ref v) if v.len() == 1 => {
if let ExprPath(None, ref path) = path.node {
if match_path(path, &["String", "from_str"]) ||
match_path(path, &["String", "from"]) {
snippet(cx, v[0].span, "..")
} else {
return
}
} else {
return
}
}
_ => return
};
if left {
span_lint(cx, CMP_OWNED, expr.span, &format!(
"this creates an owned instance just for comparison. Consider using \
`{} {} {}` to compare without allocation", snip,
snippet(cx, op, "=="), snippet(cx, other_span, "..")));
} else {
span_lint(cx, CMP_OWNED, expr.span, &format!(
"this creates an owned instance just for comparison. Consider using \
`{} {} {}` to compare without allocation",
snippet(cx, other_span, ".."), snippet(cx, op, "=="), snip));
}
}
fn is_str_arg(cx: &LateContext, args: &[P<Expr>]) -> bool {
args.len() == 1 && if let ty::TyStr =
walk_ptrs_ty(cx.tcx.expr_ty(&args[0])).sty { true } else { false }
}
/// **What it does:** This lint checks for getting the remainder of a division by one. It is `Warn` by default.
///
/// **Why is this bad?** The result can only ever be zero. No one will write such code deliberately, unless trying to win an Underhanded Rust Contest. Even for that contest, it's probably a bad idea. Use something more underhanded.
///
/// **Known problems:** None
///
/// **Example:** `x % 1`
declare_lint!(pub MODULO_ONE, Warn, "taking a number modulo 1, which always returns 0");
#[derive(Copy,Clone)]
pub struct ModuloOne;
impl LintPass for ModuloOne {
fn get_lints(&self) -> LintArray {
lint_array!(MODULO_ONE)
}
}
impl LateLintPass for ModuloOne {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, _, ref right) = expr.node {
if let Spanned {node: BinOp_::BiRem, ..} = *cmp {
if is_integer_literal(right, 1) {
cx.span_lint(MODULO_ONE, expr.span, "any number modulo 1 will be 0");
}
}
}
}
}
/// **What it does:** This lint checks for patterns in the form `name @ _`.
///
/// **Why is this bad?** It's almost always more readable to just use direct bindings.
///
/// **Known problems:** None
///
/// **Example**:
/// ```
/// match v {
/// Some(x) => (),
/// y @ _ => (), // easier written as `y`,
/// }
/// ```
declare_lint!(pub REDUNDANT_PATTERN, Warn, "using `name @ _` in a pattern");
#[derive(Copy,Clone)]
pub struct PatternPass;
impl LintPass for PatternPass {
fn get_lints(&self) -> LintArray {
lint_array!(REDUNDANT_PATTERN)
}
}
impl LateLintPass for PatternPass {
fn check_pat(&mut self, cx: &LateContext, pat: &Pat) {
if let PatIdent(_, ref ident, Some(ref right)) = pat.node {
if right.node == PatWild {
cx.span_lint(REDUNDANT_PATTERN, pat.span, &format!(
"the `{} @ _` pattern can be written as just `{}`",
ident.node.name, ident.node.name));
}
}
}
}
/// **What it does:** This lint checks for the use of bindings with a single leading underscore
///
/// **Why is this bad?** A single leading underscore is usually used to indicate that a binding
/// will not be used. Using such a binding breaks this expectation.
///
/// **Known problems:** This lint's idea of a "used" variable is not quite the same as in the
/// built-in `unused_variables` lint. For example, in the following code
/// ```
/// fn foo(y: u32) -> u32) {
/// let _x = 1;
/// _x +=1;
/// y
/// }
/// ```
/// _x will trigger both the `unused_variables` lint and the `used_underscore_binding` lint.
///
/// **Example**:
/// ```
/// let _x = 0;
/// let y = _x + 1; // Here we are using `_x`, even though it has a leading underscore.
/// // We should rename `_x` to `x`
/// ```
declare_lint!(pub USED_UNDERSCORE_BINDING, Warn,
"using a binding which is prefixed with an underscore");
#[derive(Copy, Clone)]
pub struct UsedUnderscoreBinding;
impl LintPass for UsedUnderscoreBinding {
fn get_lints(&self) -> LintArray {
lint_array!(USED_UNDERSCORE_BINDING)
}
}
impl LateLintPass for UsedUnderscoreBinding {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
let needs_lint = match expr.node {
ExprPath(_, ref path) => {
let ident = path.segments.last()
.expect("path should always have at least one segment")
.identifier;
ident.name.as_str().chars().next() == Some('_') //starts with '_'
&& ident.name.as_str().chars().skip(1).next() != Some('_') //doesn't start with "__"
&& ident.name != ident.unhygienic_name //not in macro
&& cx.tcx.def_map.borrow().contains_key(&expr.id) //local variable
},
ExprField(_, spanned) => {
let name = spanned.node.as_str();
name.chars().next() == Some('_')
&& name.chars().skip(1).next() != Some('_')
},
_ => false
};
if needs_lint {
cx.span_lint(USED_UNDERSCORE_BINDING, expr.span,
"used binding which is prefixed with an underscore. A leading underscore\
signals that a binding will not be used.");
}
}
}
Remove local variable check
use rustc::lint::*;
use syntax::ptr::P;
use rustc_front::hir::*;
use reexport::*;
use rustc_front::util::{is_comparison_binop, binop_to_string};
use syntax::codemap::{Span, Spanned};
use rustc_front::intravisit::FnKind;
use rustc::middle::ty;
use rustc::middle::const_eval::ConstVal::Float;
use rustc::middle::const_eval::eval_const_expr_partial;
use rustc::middle::const_eval::EvalHint::ExprTypeChecked;
use utils::{get_item_name, match_path, snippet, span_lint, walk_ptrs_ty, is_integer_literal};
use utils::span_help_and_lint;
/// **What it does:** This lint checks for function arguments and let bindings denoted as `ref`. It is `Warn` by default.
///
/// **Why is this bad?** The `ref` declaration makes the function take an owned value, but turns the argument into a reference (which means that the value is destroyed when exiting the function). This adds not much value: either take a reference type, or take an owned value and create references in the body.
///
/// For let bindings, `let x = &foo;` is preferred over `let ref x = foo`. The type of `x` is more obvious with the former.
///
/// **Known problems:** If the argument is dereferenced within the function, removing the `ref` will lead to errors. This can be fixed by removing the dereferences, e.g. changing `*x` to `x` within the function.
///
/// **Example:** `fn foo(ref x: u8) -> bool { .. }`
declare_lint!(pub TOPLEVEL_REF_ARG, Warn,
"An entire binding was declared as `ref`, in a function argument (`fn foo(ref x: Bar)`), \
or a `let` statement (`let ref x = foo()`). In such cases, it is preferred to take \
references with `&`.");
#[allow(missing_copy_implementations)]
pub struct TopLevelRefPass;
impl LintPass for TopLevelRefPass {
fn get_lints(&self) -> LintArray {
lint_array!(TOPLEVEL_REF_ARG)
}
}
impl LateLintPass for TopLevelRefPass {
fn check_fn(&mut self, cx: &LateContext, k: FnKind, decl: &FnDecl, _: &Block, _: Span, _: NodeId) {
if let FnKind::Closure = k {
// Does not apply to closures
return
}
for ref arg in &decl.inputs {
if let PatIdent(BindByRef(_), _, _) = arg.pat.node {
span_lint(cx,
TOPLEVEL_REF_ARG,
arg.pat.span,
"`ref` directly on a function argument is ignored. Consider using a reference type instead."
);
}
}
}
fn check_stmt(&mut self, cx: &LateContext, s: &Stmt) {
if_let_chain! {
[
let StmtDecl(ref d, _) = s.node,
let DeclLocal(ref l) = d.node,
let PatIdent(BindByRef(_), i, None) = l.pat.node,
let Some(ref init) = l.init
], {
let tyopt = if let Some(ref ty) = l.ty {
format!(": {:?} ", ty)
} else {
"".to_owned()
};
span_help_and_lint(cx,
TOPLEVEL_REF_ARG,
l.pat.span,
"`ref` on an entire `let` pattern is discouraged, take a reference with & instead",
&format!("try `let {} {}= &{};`", snippet(cx, i.span, "_"),
tyopt, snippet(cx, init.span, "_"))
);
}
};
}
}
/// **What it does:** This lint checks for comparisons to NAN. It is `Deny` by default.
///
/// **Why is this bad?** NAN does not compare meaningfully to anything – not even itself – so those comparisons are simply wrong.
///
/// **Known problems:** None
///
/// **Example:** `x == NAN`
declare_lint!(pub CMP_NAN, Deny,
"comparisons to NAN (which will always return false, which is probably not intended)");
#[derive(Copy,Clone)]
pub struct CmpNan;
impl LintPass for CmpNan {
fn get_lints(&self) -> LintArray {
lint_array!(CMP_NAN)
}
}
impl LateLintPass for CmpNan {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, ref right) = expr.node {
if is_comparison_binop(cmp.node) {
if let ExprPath(_, ref path) = left.node {
check_nan(cx, path, expr.span);
}
if let ExprPath(_, ref path) = right.node {
check_nan(cx, path, expr.span);
}
}
}
}
}
fn check_nan(cx: &LateContext, path: &Path, span: Span) {
path.segments.last().map(|seg| if seg.identifier.name.as_str() == "NAN" {
span_lint(cx, CMP_NAN, span,
"doomed comparison with NAN, use `std::{f32,f64}::is_nan()` instead");
});
}
/// **What it does:** This lint checks for (in-)equality comparisons on floating-point values (apart from zero), except in functions called `*eq*` (which probably implement equality for a type involving floats). It is `Warn` by default.
///
/// **Why is this bad?** Floating point calculations are usually imprecise, so asking if two values are *exactly* equal is asking for trouble. For a good guide on what to do, see [the floating point guide](http://www.floating-point-gui.de/errors/comparison).
///
/// **Known problems:** None
///
/// **Example:** `y == 1.23f64`
declare_lint!(pub FLOAT_CMP, Warn,
"using `==` or `!=` on float values (as floating-point operations \
usually involve rounding errors, it is always better to check for approximate \
equality within small bounds)");
#[derive(Copy,Clone)]
pub struct FloatCmp;
impl LintPass for FloatCmp {
fn get_lints(&self) -> LintArray {
lint_array!(FLOAT_CMP)
}
}
impl LateLintPass for FloatCmp {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, ref right) = expr.node {
let op = cmp.node;
if (op == BiEq || op == BiNe) && (is_float(cx, left) || is_float(cx, right)) {
if is_allowed(cx, left) || is_allowed(cx, right) { return; }
if let Some(name) = get_item_name(cx, expr) {
let name = name.as_str();
if name == "eq" || name == "ne" || name == "is_nan" ||
name.starts_with("eq_") ||
name.ends_with("_eq") {
return;
}
}
span_lint(cx, FLOAT_CMP, expr.span, &format!(
"{}-comparison of f32 or f64 detected. Consider changing this to \
`abs({} - {}) < epsilon` for some suitable value of epsilon",
binop_to_string(op), snippet(cx, left.span, ".."),
snippet(cx, right.span, "..")));
}
}
}
}
fn is_allowed(cx: &LateContext, expr: &Expr) -> bool {
let res = eval_const_expr_partial(cx.tcx, expr, ExprTypeChecked, None);
if let Ok(Float(val)) = res {
val == 0.0 || val == ::std::f64::INFINITY || val == ::std::f64::NEG_INFINITY
} else { false }
}
fn is_float(cx: &LateContext, expr: &Expr) -> bool {
if let ty::TyFloat(_) = walk_ptrs_ty(cx.tcx.expr_ty(expr)).sty {
true
} else {
false
}
}
/// **What it does:** This lint checks for conversions to owned values just for the sake of a comparison. It is `Warn` by default.
///
/// **Why is this bad?** The comparison can operate on a reference, so creating an owned value effectively throws it away directly afterwards, which is needlessly consuming code and heap space.
///
/// **Known problems:** None
///
/// **Example:** `x.to_owned() == y`
declare_lint!(pub CMP_OWNED, Warn,
"creating owned instances for comparing with others, e.g. `x == \"foo\".to_string()`");
#[derive(Copy,Clone)]
pub struct CmpOwned;
impl LintPass for CmpOwned {
fn get_lints(&self) -> LintArray {
lint_array!(CMP_OWNED)
}
}
impl LateLintPass for CmpOwned {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, ref right) = expr.node {
if is_comparison_binop(cmp.node) {
check_to_owned(cx, left, right.span, true, cmp.span);
check_to_owned(cx, right, left.span, false, cmp.span)
}
}
}
}
fn check_to_owned(cx: &LateContext, expr: &Expr, other_span: Span, left: bool, op: Span) {
let snip = match expr.node {
ExprMethodCall(Spanned{node: ref name, ..}, _, ref args) if args.len() == 1 => {
if name.as_str() == "to_string" ||
name.as_str() == "to_owned" && is_str_arg(cx, args) {
snippet(cx, args[0].span, "..")
} else {
return
}
}
ExprCall(ref path, ref v) if v.len() == 1 => {
if let ExprPath(None, ref path) = path.node {
if match_path(path, &["String", "from_str"]) ||
match_path(path, &["String", "from"]) {
snippet(cx, v[0].span, "..")
} else {
return
}
} else {
return
}
}
_ => return
};
if left {
span_lint(cx, CMP_OWNED, expr.span, &format!(
"this creates an owned instance just for comparison. Consider using \
`{} {} {}` to compare without allocation", snip,
snippet(cx, op, "=="), snippet(cx, other_span, "..")));
} else {
span_lint(cx, CMP_OWNED, expr.span, &format!(
"this creates an owned instance just for comparison. Consider using \
`{} {} {}` to compare without allocation",
snippet(cx, other_span, ".."), snippet(cx, op, "=="), snip));
}
}
fn is_str_arg(cx: &LateContext, args: &[P<Expr>]) -> bool {
args.len() == 1 && if let ty::TyStr =
walk_ptrs_ty(cx.tcx.expr_ty(&args[0])).sty { true } else { false }
}
/// **What it does:** This lint checks for getting the remainder of a division by one. It is `Warn` by default.
///
/// **Why is this bad?** The result can only ever be zero. No one will write such code deliberately, unless trying to win an Underhanded Rust Contest. Even for that contest, it's probably a bad idea. Use something more underhanded.
///
/// **Known problems:** None
///
/// **Example:** `x % 1`
declare_lint!(pub MODULO_ONE, Warn, "taking a number modulo 1, which always returns 0");
#[derive(Copy,Clone)]
pub struct ModuloOne;
impl LintPass for ModuloOne {
fn get_lints(&self) -> LintArray {
lint_array!(MODULO_ONE)
}
}
impl LateLintPass for ModuloOne {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
if let ExprBinary(ref cmp, _, ref right) = expr.node {
if let Spanned {node: BinOp_::BiRem, ..} = *cmp {
if is_integer_literal(right, 1) {
cx.span_lint(MODULO_ONE, expr.span, "any number modulo 1 will be 0");
}
}
}
}
}
/// **What it does:** This lint checks for patterns in the form `name @ _`.
///
/// **Why is this bad?** It's almost always more readable to just use direct bindings.
///
/// **Known problems:** None
///
/// **Example**:
/// ```
/// match v {
/// Some(x) => (),
/// y @ _ => (), // easier written as `y`,
/// }
/// ```
declare_lint!(pub REDUNDANT_PATTERN, Warn, "using `name @ _` in a pattern");
#[derive(Copy,Clone)]
pub struct PatternPass;
impl LintPass for PatternPass {
fn get_lints(&self) -> LintArray {
lint_array!(REDUNDANT_PATTERN)
}
}
impl LateLintPass for PatternPass {
fn check_pat(&mut self, cx: &LateContext, pat: &Pat) {
if let PatIdent(_, ref ident, Some(ref right)) = pat.node {
if right.node == PatWild {
cx.span_lint(REDUNDANT_PATTERN, pat.span, &format!(
"the `{} @ _` pattern can be written as just `{}`",
ident.node.name, ident.node.name));
}
}
}
}
/// **What it does:** This lint checks for the use of bindings with a single leading underscore
///
/// **Why is this bad?** A single leading underscore is usually used to indicate that a binding
/// will not be used. Using such a binding breaks this expectation.
///
/// **Known problems:** This lint's idea of a "used" variable is not quite the same as in the
/// built-in `unused_variables` lint. For example, in the following code
/// ```
/// fn foo(y: u32) -> u32) {
/// let _x = 1;
/// _x +=1;
/// y
/// }
/// ```
/// _x will trigger both the `unused_variables` lint and the `used_underscore_binding` lint.
///
/// **Example**:
/// ```
/// let _x = 0;
/// let y = _x + 1; // Here we are using `_x`, even though it has a leading underscore.
/// // We should rename `_x` to `x`
/// ```
declare_lint!(pub USED_UNDERSCORE_BINDING, Warn,
"using a binding which is prefixed with an underscore");
#[derive(Copy, Clone)]
pub struct UsedUnderscoreBinding;
impl LintPass for UsedUnderscoreBinding {
fn get_lints(&self) -> LintArray {
lint_array!(USED_UNDERSCORE_BINDING)
}
}
impl LateLintPass for UsedUnderscoreBinding {
fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
let needs_lint = match expr.node {
ExprPath(_, ref path) => {
let ident = path.segments.last()
.expect("path should always have at least one segment")
.identifier;
ident.name.as_str().chars().next() == Some('_') //starts with '_'
&& ident.name.as_str().chars().skip(1).next() != Some('_') //doesn't start with "__"
&& ident.name != ident.unhygienic_name //not in macro
},
ExprField(_, spanned) => {
let name = spanned.node.as_str();
name.chars().next() == Some('_')
&& name.chars().skip(1).next() != Some('_')
},
_ => false
};
if needs_lint {
cx.span_lint(USED_UNDERSCORE_BINDING, expr.span,
"used binding which is prefixed with an underscore. A leading underscore\
signals that a binding will not be used.");
}
}
}
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::Abi;
use common::*;
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
use rustc::traits;
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt};
use syntax::codemap::DUMMY_SP;
pub use rustc::ty::Instance;
fn fn_once_adapter_instance<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_did: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> Instance<'tcx> {
debug!("fn_once_adapter_shim({:?}, {:?})",
closure_did,
substs);
let fn_once = tcx.lang_items().fn_once_trait().unwrap();
let call_once = tcx.associated_items(fn_once)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
let def = ty::InstanceDef::ClosureOnceShim { call_once };
let self_ty = tcx.mk_closure_from_closure_substs(
closure_did, substs);
let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs);
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
assert_eq!(sig.inputs().len(), 1);
let substs = tcx.mk_substs([
Kind::from(self_ty),
Kind::from(sig.inputs()[0]),
].iter().cloned());
debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
Instance { def, substs }
}
fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
trait_closure_kind: ty::ClosureKind)
-> Result<bool, ()>
{
match (actual_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
// No adapter needed.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
// The closure fn `llfn` is a `fn(&self, ...)`. We want a
// `fn(&mut self, ...)`. In fact, at trans time, these are
// basically the same thing, so we can just return llfn.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
// self, ...)`. We want a `fn(self, ...)`. We can produce
// this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
Ok(true)
}
_ => Err(()),
}
}
pub fn resolve_closure<'a, 'tcx> (
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
requested_kind: ty::ClosureKind)
-> Instance<'tcx>
{
let actual_kind = tcx.closure_kind(def_id);
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
_ => Instance::new(def_id, substs.substs)
}
}
fn resolve_associated_item<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_item: &ty::AssociatedItem,
trait_id: DefId,
rcvr_substs: &'tcx Substs<'tcx>
) -> Instance<'tcx> {
let def_id = trait_item.def_id;
debug!("resolve_associated_item(trait_item={:?}, \
trait_id={:?}, \
rcvr_substs={:?})",
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::ParamEnv::empty(traits::Reveal::All), ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
match vtbl {
traits::VtableImpl(impl_data) => {
let (def_id, substs) = traits::find_associated_item(
tcx, trait_item, rcvr_substs, &impl_data);
let substs = tcx.erase_regions(&substs);
ty::Instance::new(def_id, substs)
}
traits::VtableGenerator(closure_data) => {
Instance {
def: ty::InstanceDef::Item(closure_data.closure_def_id),
substs: closure_data.substs.substs
}
}
traits::VtableClosure(closure_data) => {
let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap();
resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
trait_closure_kind)
}
traits::VtableFnPointer(ref data) => {
Instance {
def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
substs: rcvr_substs
}
}
traits::VtableObject(ref data) => {
let index = tcx.get_vtable_index_of_object_method(data, def_id);
Instance {
def: ty::InstanceDef::Virtual(def_id, index),
substs: rcvr_substs
}
}
traits::VtableBuiltin(..) if Some(trait_id) == tcx.lang_items().clone_trait() => {
Instance {
def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()),
substs: rcvr_substs
}
}
_ => {
bug!("static call to invalid vtable: {:?}", vtbl)
}
}
}
/// The point where linking happens. Resolve a (def_id, substs)
/// pair to an instance.
pub fn resolve<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>
) -> Instance<'tcx> {
debug!("resolve(def_id={:?}, substs={:?})",
def_id, substs);
let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
debug!(" => associated item, attempting to find impl");
let item = tcx.associated_item(def_id);
resolve_associated_item(tcx, &item, trait_def_id, substs)
} else {
let item_type = def_ty(tcx, def_id, substs);
let def = match item_type.sty {
ty::TyFnDef(..) if {
let f = item_type.fn_sig(tcx);
f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic
} =>
{
debug!(" => intrinsic");
ty::InstanceDef::Intrinsic(def_id)
}
_ => {
if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
let ty = substs.type_at(0);
if type_needs_drop(tcx, ty) {
debug!(" => nontrivial drop glue");
ty::InstanceDef::DropGlue(def_id, Some(ty))
} else {
debug!(" => trivial drop glue");
ty::InstanceDef::DropGlue(def_id, None)
}
} else {
debug!(" => free item");
ty::InstanceDef::Item(def_id)
}
}
};
Instance { def, substs }
};
debug!("resolve(def_id={:?}, substs={:?}) = {}",
def_id, substs, result);
result
}
pub fn resolve_drop_in_place<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>)
-> ty::Instance<'tcx>
{
let def_id = tcx.require_lang_item(DropInPlaceFnLangItem);
let substs = tcx.intern_substs(&[Kind::from(ty)]);
resolve(tcx, def_id, substs)
}
pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>)
-> CustomCoerceUnsized {
let trait_ref = ty::Binder(ty::TraitRef {
def_id: tcx.lang_items().coerce_unsized_trait().unwrap(),
substs: tcx.mk_substs_trait(source_ty, &[target_ty])
});
match tcx.trans_fulfill_obligation(DUMMY_SP, ty::ParamEnv::empty(traits::Reveal::All), trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap()
}
vtable => {
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
}
}
}
/// Returns the normalized type of a struct field
pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,
f: &'tcx ty::FieldDef)
-> Ty<'tcx>
{
tcx.normalize_associated_type(&f.ty(tcx, param_substs))
}
fix tidy errors
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::Abi;
use common::*;
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
use rustc::traits;
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt};
use syntax::codemap::DUMMY_SP;
pub use rustc::ty::Instance;
fn fn_once_adapter_instance<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_did: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> Instance<'tcx> {
debug!("fn_once_adapter_shim({:?}, {:?})",
closure_did,
substs);
let fn_once = tcx.lang_items().fn_once_trait().unwrap();
let call_once = tcx.associated_items(fn_once)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
let def = ty::InstanceDef::ClosureOnceShim { call_once };
let self_ty = tcx.mk_closure_from_closure_substs(
closure_did, substs);
let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs);
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
assert_eq!(sig.inputs().len(), 1);
let substs = tcx.mk_substs([
Kind::from(self_ty),
Kind::from(sig.inputs()[0]),
].iter().cloned());
debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
Instance { def, substs }
}
fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
trait_closure_kind: ty::ClosureKind)
-> Result<bool, ()>
{
match (actual_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
// No adapter needed.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
// The closure fn `llfn` is a `fn(&self, ...)`. We want a
// `fn(&mut self, ...)`. In fact, at trans time, these are
// basically the same thing, so we can just return llfn.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
// self, ...)`. We want a `fn(self, ...)`. We can produce
// this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
Ok(true)
}
_ => Err(()),
}
}
pub fn resolve_closure<'a, 'tcx> (
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
requested_kind: ty::ClosureKind)
-> Instance<'tcx>
{
let actual_kind = tcx.closure_kind(def_id);
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
_ => Instance::new(def_id, substs.substs)
}
}
fn resolve_associated_item<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_item: &ty::AssociatedItem,
trait_id: DefId,
rcvr_substs: &'tcx Substs<'tcx>
) -> Instance<'tcx> {
let def_id = trait_item.def_id;
debug!("resolve_associated_item(trait_item={:?}, \
trait_id={:?}, \
rcvr_substs={:?})",
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
let vtbl = tcx.trans_fulfill_obligation(
DUMMY_SP, ty::ParamEnv::empty(traits::Reveal::All), ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
match vtbl {
traits::VtableImpl(impl_data) => {
let (def_id, substs) = traits::find_associated_item(
tcx, trait_item, rcvr_substs, &impl_data);
let substs = tcx.erase_regions(&substs);
ty::Instance::new(def_id, substs)
}
traits::VtableGenerator(closure_data) => {
Instance {
def: ty::InstanceDef::Item(closure_data.closure_def_id),
substs: closure_data.substs.substs
}
}
traits::VtableClosure(closure_data) => {
let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap();
resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
trait_closure_kind)
}
traits::VtableFnPointer(ref data) => {
Instance {
def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
substs: rcvr_substs
}
}
traits::VtableObject(ref data) => {
let index = tcx.get_vtable_index_of_object_method(data, def_id);
Instance {
def: ty::InstanceDef::Virtual(def_id, index),
substs: rcvr_substs
}
}
traits::VtableBuiltin(..) if Some(trait_id) == tcx.lang_items().clone_trait() => {
Instance {
def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()),
substs: rcvr_substs
}
}
_ => {
bug!("static call to invalid vtable: {:?}", vtbl)
}
}
}
/// The point where linking happens. Resolve a (def_id, substs)
/// pair to an instance.
pub fn resolve<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>
) -> Instance<'tcx> {
debug!("resolve(def_id={:?}, substs={:?})",
def_id, substs);
let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
debug!(" => associated item, attempting to find impl");
let item = tcx.associated_item(def_id);
resolve_associated_item(tcx, &item, trait_def_id, substs)
} else {
let item_type = def_ty(tcx, def_id, substs);
let def = match item_type.sty {
ty::TyFnDef(..) if {
let f = item_type.fn_sig(tcx);
f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic
} =>
{
debug!(" => intrinsic");
ty::InstanceDef::Intrinsic(def_id)
}
_ => {
if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
let ty = substs.type_at(0);
if type_needs_drop(tcx, ty) {
debug!(" => nontrivial drop glue");
ty::InstanceDef::DropGlue(def_id, Some(ty))
} else {
debug!(" => trivial drop glue");
ty::InstanceDef::DropGlue(def_id, None)
}
} else {
debug!(" => free item");
ty::InstanceDef::Item(def_id)
}
}
};
Instance { def, substs }
};
debug!("resolve(def_id={:?}, substs={:?}) = {}",
def_id, substs, result);
result
}
pub fn resolve_drop_in_place<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>)
-> ty::Instance<'tcx>
{
let def_id = tcx.require_lang_item(DropInPlaceFnLangItem);
let substs = tcx.intern_substs(&[Kind::from(ty)]);
resolve(tcx, def_id, substs)
}
pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>)
-> CustomCoerceUnsized {
let trait_ref = ty::Binder(ty::TraitRef {
def_id: tcx.lang_items().coerce_unsized_trait().unwrap(),
substs: tcx.mk_substs_trait(source_ty, &[target_ty])
});
match tcx.trans_fulfill_obligation(
DUMMY_SP, ty::ParamEnv::empty(traits::Reveal::All), trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap()
}
vtable => {
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
}
}
}
/// Returns the normalized type of a struct field
pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,
f: &'tcx ty::FieldDef)
-> Ty<'tcx>
{
tcx.normalize_associated_type(&f.ty(tcx, param_substs))
}
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Partitioning Codegen Units for Incremental Compilation
//! ======================================================
//!
//! The task of this module is to take the complete set of translation items of
//! a crate and produce a set of codegen units from it, where a codegen unit
//! is a named set of (translation-item, linkage) pairs. That is, this module
//! decides which translation item appears in which codegen units with which
//! linkage. The following paragraphs describe some of the background on the
//! partitioning scheme.
//!
//! The most important opportunity for saving on compilation time with
//! incremental compilation is to avoid re-translating and re-optimizing code.
//! Since the unit of translation and optimization for LLVM is "modules" or, how
//! we call them "codegen units", the particulars of how much time can be saved
//! by incremental compilation are tightly linked to how the output program is
//! partitioned into these codegen units prior to passing it to LLVM --
//! especially because we have to treat codegen units as opaque entities once
//! they are created: There is no way for us to incrementally update an existing
//! LLVM module and so we have to build any such module from scratch if it was
//! affected by some change in the source code.
//!
//! From that point of view it would make sense to maximize the number of
//! codegen units by, for example, putting each function into its own module.
//! That way only those modules would have to be re-compiled that were actually
//! affected by some change, minimizing the number of functions that could have
//! been re-used but just happened to be located in a module that is
//! re-compiled.
//!
//! However, since LLVM optimization does not work across module boundaries,
//! using such a highly granular partitioning would lead to very slow runtime
//! code since it would effectively prohibit inlining and other inter-procedure
//! optimizations. We want to avoid that as much as possible.
//!
//! Thus we end up with a trade-off: The bigger the codegen units, the better
//! LLVM's optimizer can do its work, but also the smaller the compilation time
//! reduction we get from incremental compilation.
//!
//! Ideally, we would create a partitioning such that there are few big codegen
//! units with few interdependencies between them. For now though, we use the
//! following heuristic to determine the partitioning:
//!
//! - There are two codegen units for every source-level module:
//! - One for "stable", that is non-generic, code
//! - One for more "volatile" code, i.e. monomorphized instances of functions
//! defined in that module
//!
//! In order to see why this heuristic makes sense, let's take a look at when a
//! codegen unit can get invalidated:
//!
//! 1. The most straightforward case is when the BODY of a function or global
//! changes. Then any codegen unit containing the code for that item has to be
//! re-compiled. Note that this includes all codegen units where the function
//! has been inlined.
//!
//! 2. The next case is when the SIGNATURE of a function or global changes. In
//! this case, all codegen units containing a REFERENCE to that item have to be
//! re-compiled. This is a superset of case 1.
//!
//! 3. The final and most subtle case is when a REFERENCE to a generic function
//! is added or removed somewhere. Even though the definition of the function
//! might be unchanged, a new REFERENCE might introduce a new monomorphized
//! instance of this function which has to be placed and compiled somewhere.
//! Conversely, when removing a REFERENCE, it might have been the last one with
//! that particular set of generic arguments and thus we have to remove it.
//!
//! From the above we see that just using one codegen unit per source-level
//! module is not such a good idea, since just adding a REFERENCE to some
//! generic item somewhere else would invalidate everything within the module
//! containing the generic item. The heuristic above reduces this detrimental
//! side-effect of references a little by at least not touching the non-generic
//! code of the module.
//!
//! A Note on Inlining
//! ------------------
//! As briefly mentioned above, in order for LLVM to be able to inline a
//! function call, the body of the function has to be available in the LLVM
//! module where the call is made. This has a few consequences for partitioning:
//!
//! - The partitioning algorithm has to take care of placing functions into all
//! codegen units where they should be available for inlining. It also has to
//! decide on the correct linkage for these functions.
//!
//! - The partitioning algorithm has to know which functions are likely to get
//! inlined, so it can distribute function instantiations accordingly. Since
//! there is no way of knowing for sure which functions LLVM will decide to
//! inline in the end, we apply a heuristic here: Only functions marked with
//! #[inline] are considered for inlining by the partitioner. The current
//! implementation will not try to determine if a function is likely to be
//! inlined by looking at the functions definition.
//!
//! Note though that as a side-effect of creating a codegen units per
//! source-level module, functions from the same module will be available for
//! inlining, even when they are not marked #[inline].
use back::symbol_export::ExportedSymbols;
use collector::InliningMap;
use common;
use context::SharedCrateContext;
use llvm;
use rustc::dep_graph::{DepNode, WorkProductId};
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER;
use rustc::ty::{self, TyCtxt, InstanceDef};
use rustc::ty::item_path::characteristic_def_id_of_type;
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_incremental::IchHasher;
use std::collections::hash_map::Entry;
use std::hash::Hash;
use syntax::ast::NodeId;
use syntax::symbol::{Symbol, InternedString};
use trans_item::{TransItem, InstantiationMode};
pub enum PartitioningStrategy {
/// Generate one codegen unit per source-level module.
PerModule,
/// Partition the whole crate into a fixed number of codegen units.
FixedUnitCount(usize)
}
pub struct CodegenUnit<'tcx> {
/// A name for this CGU. Incremental compilation requires that
/// name be unique amongst **all** crates. Therefore, it should
/// contain something unique to this crate (e.g., a module path)
/// as well as the crate name and disambiguator.
name: InternedString,
items: FxHashMap<TransItem<'tcx>, (llvm::Linkage, llvm::Visibility)>,
}
impl<'tcx> CodegenUnit<'tcx> {
pub fn new(name: InternedString,
items: FxHashMap<TransItem<'tcx>, (llvm::Linkage, llvm::Visibility)>)
-> Self {
CodegenUnit {
name,
items,
}
}
pub fn empty(name: InternedString) -> Self {
Self::new(name, FxHashMap())
}
pub fn contains_item(&self, item: &TransItem<'tcx>) -> bool {
self.items.contains_key(item)
}
pub fn name(&self) -> &str {
&self.name
}
pub fn items(&self) -> &FxHashMap<TransItem<'tcx>, (llvm::Linkage, llvm::Visibility)> {
&self.items
}
pub fn work_product_id(&self) -> WorkProductId {
WorkProductId::from_cgu_name(self.name())
}
pub fn work_product_dep_node(&self) -> DepNode {
self.work_product_id().to_dep_node()
}
pub fn compute_symbol_name_hash<'a>(&self,
scx: &SharedCrateContext<'a, 'tcx>,
exported_symbols: &ExportedSymbols)
-> u64 {
let mut state = IchHasher::new();
let exported_symbols = exported_symbols.local_exports();
let all_items = self.items_in_deterministic_order(scx.tcx());
for (item, _) in all_items {
let symbol_name = item.symbol_name(scx.tcx());
symbol_name.len().hash(&mut state);
symbol_name.hash(&mut state);
let exported = match item {
TransItem::Fn(ref instance) => {
let node_id =
scx.tcx().hir.as_local_node_id(instance.def_id());
node_id.map(|node_id| exported_symbols.contains(&node_id))
.unwrap_or(false)
}
TransItem::Static(node_id) => {
exported_symbols.contains(&node_id)
}
TransItem::GlobalAsm(..) => true,
};
exported.hash(&mut state);
}
state.finish().to_smaller_hash()
}
pub fn items_in_deterministic_order<'a>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> Vec<(TransItem<'tcx>,
(llvm::Linkage, llvm::Visibility))> {
// The codegen tests rely on items being process in the same order as
// they appear in the file, so for local items, we sort by node_id first
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct ItemSortKey(Option<NodeId>, ty::SymbolName);
fn item_sort_key<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
item: TransItem<'tcx>) -> ItemSortKey {
ItemSortKey(match item {
TransItem::Fn(instance) => {
tcx.hir.as_local_node_id(instance.def_id())
}
TransItem::Static(node_id) | TransItem::GlobalAsm(node_id) => {
Some(node_id)
}
}, item.symbol_name(tcx))
}
let items: Vec<_> = self.items.iter().map(|(&i, &l)| (i, l)).collect();
let mut items : Vec<_> = items.iter()
.map(|il| (il, item_sort_key(tcx, il.0))).collect();
items.sort_by(|&(_, ref key1), &(_, ref key2)| key1.cmp(key2));
items.into_iter().map(|(&item_linkage, _)| item_linkage).collect()
}
}
// Anything we can't find a proper codegen unit for goes into this.
const FALLBACK_CODEGEN_UNIT: &'static str = "__rustc_fallback_codegen_unit";
pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
trans_items: I,
strategy: PartitioningStrategy,
inlining_map: &InliningMap<'tcx>,
exported_symbols: &ExportedSymbols)
-> Vec<CodegenUnit<'tcx>>
where I: Iterator<Item = TransItem<'tcx>>
{
let tcx = scx.tcx();
// In the first step, we place all regular translation items into their
// respective 'home' codegen unit. Regular translation items are all
// functions and statics defined in the local crate.
let mut initial_partitioning = place_root_translation_items(scx,
exported_symbols,
trans_items);
debug_dump(tcx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
// If the partitioning should produce a fixed count of codegen units, merge
// until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name.as_str());
debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
}
// In the next step, we use the inlining map to determine which addtional
// translation items have to go into each codegen unit. These additional
// translation items can be drop-glue, functions from external crates, and
// local functions the definition of which is marked with #[inline].
let mut post_inlining = place_inlined_translation_items(initial_partitioning,
inlining_map);
debug_dump(tcx, "POST INLINING:", post_inlining.codegen_units.iter());
// Next we try to make as many symbols "internal" as possible, so LLVM has
// more freedom to optimize.
internalize_symbols(tcx, &mut post_inlining, inlining_map);
// Finally, sort by codegen unit name, so that we get deterministic results
let PostInliningPartitioning {
codegen_units: mut result,
trans_item_placements: _,
internalization_candidates: _,
} = post_inlining;
result.sort_by(|cgu1, cgu2| {
(&cgu1.name[..]).cmp(&cgu2.name[..])
});
if scx.sess().opts.enable_dep_node_debug_strs() {
for cgu in &result {
let dep_node = cgu.work_product_dep_node();
scx.tcx().dep_graph.register_dep_node_debug_str(dep_node,
|| cgu.name().to_string());
}
}
result
}
struct PreInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
roots: FxHashSet<TransItem<'tcx>>,
internalization_candidates: FxHashSet<TransItem<'tcx>>,
}
/// For symbol internalization, we need to know whether a symbol/trans-item is
/// accessed from outside the codegen unit it is defined in. This type is used
/// to keep track of that.
#[derive(Clone, PartialEq, Eq, Debug)]
enum TransItemPlacement {
SingleCgu { cgu_name: InternedString },
MultipleCgus,
}
struct PostInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
trans_item_placements: FxHashMap<TransItem<'tcx>, TransItemPlacement>,
internalization_candidates: FxHashSet<TransItem<'tcx>>,
}
fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
exported_symbols: &ExportedSymbols,
trans_items: I)
-> PreInliningPartitioning<'tcx>
where I: Iterator<Item = TransItem<'tcx>>
{
let tcx = scx.tcx();
let exported_symbols = exported_symbols.local_exports();
let mut roots = FxHashSet();
let mut codegen_units = FxHashMap();
let is_incremental_build = tcx.sess.opts.incremental.is_some();
let mut internalization_candidates = FxHashSet();
for trans_item in trans_items {
let is_root = trans_item.instantiation_mode(tcx) == InstantiationMode::GloballyShared;
if is_root {
let characteristic_def_id = characteristic_def_id_of_trans_item(scx, trans_item);
let is_volatile = is_incremental_build &&
trans_item.is_generic_fn();
let codegen_unit_name = match characteristic_def_id {
Some(def_id) => compute_codegen_unit_name(tcx, def_id, is_volatile),
None => Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(),
};
let make_codegen_unit = || {
CodegenUnit::empty(codegen_unit_name.clone())
};
let mut codegen_unit = codegen_units.entry(codegen_unit_name.clone())
.or_insert_with(make_codegen_unit);
let (linkage, visibility) = match trans_item.explicit_linkage(tcx) {
Some(explicit_linkage) => (explicit_linkage, llvm::Visibility::Default),
None => {
match trans_item {
TransItem::Fn(ref instance) => {
let visibility = match instance.def {
InstanceDef::Item(def_id) => {
if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
if exported_symbols.contains(&node_id) {
llvm::Visibility::Default
} else {
internalization_candidates.insert(trans_item);
llvm::Visibility::Hidden
}
} else {
internalization_candidates.insert(trans_item);
llvm::Visibility::Hidden
}
}
InstanceDef::FnPtrShim(..) |
InstanceDef::Virtual(..) |
InstanceDef::Intrinsic(..) |
InstanceDef::ClosureOnceShim { .. } |
InstanceDef::DropGlue(..) => {
bug!("partitioning: Encountered unexpected
root translation item: {:?}",
trans_item)
}
};
(llvm::ExternalLinkage, visibility)
}
TransItem::Static(node_id) |
TransItem::GlobalAsm(node_id) => {
let visibility = if exported_symbols.contains(&node_id) {
llvm::Visibility::Default
} else {
internalization_candidates.insert(trans_item);
llvm::Visibility::Hidden
};
(llvm::ExternalLinkage, visibility)
}
}
}
};
codegen_unit.items.insert(trans_item, (linkage, visibility));
roots.insert(trans_item);
}
}
// always ensure we have at least one CGU; otherwise, if we have a
// crate with just types (for example), we could wind up with no CGU
if codegen_units.is_empty() {
let codegen_unit_name = Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str();
codegen_units.insert(codegen_unit_name.clone(),
CodegenUnit::empty(codegen_unit_name.clone()));
}
PreInliningPartitioning {
codegen_units: codegen_units.into_iter()
.map(|(_, codegen_unit)| codegen_unit)
.collect(),
roots,
internalization_candidates,
}
}
fn merge_codegen_units<'tcx>(initial_partitioning: &mut PreInliningPartitioning<'tcx>,
target_cgu_count: usize,
crate_name: &str) {
assert!(target_cgu_count >= 1);
let codegen_units = &mut initial_partitioning.codegen_units;
// Merge the two smallest codegen units until the target size is reached.
// Note that "size" is estimated here rather inaccurately as the number of
// translation items in a given unit. This could be improved on.
while codegen_units.len() > target_cgu_count {
// Sort small cgus to the back
codegen_units.sort_by_key(|cgu| -(cgu.items.len() as i64));
let smallest = codegen_units.pop().unwrap();
let second_smallest = codegen_units.last_mut().unwrap();
for (k, v) in smallest.items.into_iter() {
second_smallest.items.insert(k, v);
}
}
for (index, cgu) in codegen_units.iter_mut().enumerate() {
cgu.name = numbered_codegen_unit_name(crate_name, index);
}
// If the initial partitioning contained less than target_cgu_count to begin
// with, we won't have enough codegen units here, so add a empty units until
// we reach the target count
while codegen_units.len() < target_cgu_count {
let index = codegen_units.len();
codegen_units.push(
CodegenUnit::empty(numbered_codegen_unit_name(crate_name, index)));
}
}
fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartitioning<'tcx>,
inlining_map: &InliningMap<'tcx>)
-> PostInliningPartitioning<'tcx> {
let mut new_partitioning = Vec::new();
let mut trans_item_placements = FxHashMap();
let PreInliningPartitioning {
codegen_units: initial_cgus,
roots,
internalization_candidates,
} = initial_partitioning;
let single_codegen_unit = initial_cgus.len() == 1;
for old_codegen_unit in initial_cgus {
// Collect all items that need to be available in this codegen unit
let mut reachable = FxHashSet();
for root in old_codegen_unit.items.keys() {
follow_inlining(*root, inlining_map, &mut reachable);
}
let mut new_codegen_unit = CodegenUnit {
name: old_codegen_unit.name,
items: FxHashMap(),
};
// Add all translation items that are not already there
for trans_item in reachable {
if let Some(linkage) = old_codegen_unit.items.get(&trans_item) {
// This is a root, just copy it over
new_codegen_unit.items.insert(trans_item, *linkage);
} else {
if roots.contains(&trans_item) {
bug!("GloballyShared trans-item inlined into other CGU: \
{:?}", trans_item);
}
// This is a cgu-private copy
new_codegen_unit.items.insert(trans_item,
(llvm::InternalLinkage, llvm::Visibility::Default));
}
if !single_codegen_unit {
// If there is more than one codegen unit, we need to keep track
// in which codegen units each translation item is placed:
match trans_item_placements.entry(trans_item) {
Entry::Occupied(e) => {
let placement = e.into_mut();
debug_assert!(match *placement {
TransItemPlacement::SingleCgu { ref cgu_name } => {
*cgu_name != new_codegen_unit.name
}
TransItemPlacement::MultipleCgus => true,
});
*placement = TransItemPlacement::MultipleCgus;
}
Entry::Vacant(e) => {
e.insert(TransItemPlacement::SingleCgu {
cgu_name: new_codegen_unit.name.clone()
});
}
}
}
}
new_partitioning.push(new_codegen_unit);
}
return PostInliningPartitioning {
codegen_units: new_partitioning,
trans_item_placements,
internalization_candidates,
};
fn follow_inlining<'tcx>(trans_item: TransItem<'tcx>,
inlining_map: &InliningMap<'tcx>,
visited: &mut FxHashSet<TransItem<'tcx>>) {
if !visited.insert(trans_item) {
return;
}
inlining_map.with_inlining_candidates(trans_item, |target| {
follow_inlining(target, inlining_map, visited);
});
}
}
fn internalize_symbols<'a, 'tcx>(_tcx: TyCtxt<'a, 'tcx, 'tcx>,
partitioning: &mut PostInliningPartitioning<'tcx>,
inlining_map: &InliningMap<'tcx>) {
if partitioning.codegen_units.len() == 1 {
// Fast path for when there is only one codegen unit. In this case we
// can internalize all candidates, since there is nowhere else they
// could be accessed from.
for cgu in &mut partitioning.codegen_units {
for candidate in &partitioning.internalization_candidates {
cgu.items.insert(*candidate, (llvm::InternalLinkage,
llvm::Visibility::Default));
}
}
return;
}
// Build a map from every translation item to all the translation items that
// reference it.
let mut accessor_map: FxHashMap<TransItem<'tcx>, Vec<TransItem<'tcx>>> = FxHashMap();
inlining_map.iter_accesses(|accessor, accessees| {
for accessee in accessees {
accessor_map.entry(*accessee)
.or_insert(Vec::new())
.push(accessor);
}
});
let trans_item_placements = &partitioning.trans_item_placements;
// For each internalization candidates in each codegen unit, check if it is
// accessed from outside its defining codegen unit.
for cgu in &mut partitioning.codegen_units {
let home_cgu = TransItemPlacement::SingleCgu {
cgu_name: cgu.name.clone()
};
for (accessee, &mut (ref mut linkage, _)) in &mut cgu.items {
if !partitioning.internalization_candidates.contains(accessee) {
// This item is no candidate for internalizing, so skip it.
continue
}
debug_assert_eq!(trans_item_placements[accessee], home_cgu);
if let Some(accessors) = accessor_map.get(accessee) {
if accessors.iter()
.filter_map(|accessor| {
// Some accessors might not have been
// instantiated. We can safely ignore those.
trans_item_placements.get(accessor)
})
.any(|placement| *placement != home_cgu) {
// Found an accessor from another CGU, so skip to the next
// item without marking this one as internal.
continue
}
}
// If we got here, we did not find any accesses from other CGUs,
// so it's fine to make this translation item internal.
*linkage = llvm::InternalLinkage;
}
}
}
fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
trans_item: TransItem<'tcx>)
-> Option<DefId> {
let tcx = scx.tcx();
match trans_item {
TransItem::Fn(instance) => {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Intrinsic(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Virtual(..) => return None
};
// If this is a method, we want to put it into the same module as
// its self-type. If the self-type does not provide a characteristic
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(def_id).is_some() {
let self_ty = instance.substs.type_at(0);
// This is an implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(def_id));
}
if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
// This is a method within an inherent impl, find out what the
// self-type is:
let impl_self_ty = common::def_ty(scx, impl_def_id, instance.substs);
if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) {
return Some(def_id);
}
}
Some(def_id)
}
TransItem::Static(node_id) |
TransItem::GlobalAsm(node_id) => Some(tcx.hir.local_def_id(node_id)),
}
}
fn compute_codegen_unit_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
volatile: bool)
-> InternedString {
// Unfortunately we cannot just use the `ty::item_path` infrastructure here
// because we need paths to modules and the DefIds of those are not
// available anymore for external items.
let mut mod_path = String::with_capacity(64);
let def_path = tcx.def_path(def_id);
mod_path.push_str(&tcx.crate_name(def_path.krate).as_str());
for part in tcx.def_path(def_id)
.data
.iter()
.take_while(|part| {
match part.data {
DefPathData::Module(..) => true,
_ => false,
}
}) {
mod_path.push_str("-");
mod_path.push_str(&part.data.as_interned_str());
}
if volatile {
mod_path.push_str(".volatile");
}
return Symbol::intern(&mod_path[..]).as_str();
}
fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString {
Symbol::intern(&format!("{}{}{}", crate_name, NUMBERED_CODEGEN_UNIT_MARKER, index)).as_str()
}
fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
label: &str,
cgus: I)
where I: Iterator<Item=&'b CodegenUnit<'tcx>>,
'tcx: 'a + 'b
{
if cfg!(debug_assertions) {
debug!("{}", label);
for cgu in cgus {
debug!("CodegenUnit {}:", cgu.name);
for (trans_item, linkage) in &cgu.items {
let symbol_name = trans_item.symbol_name(tcx);
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..])
.unwrap_or("<no hash>");
debug!(" - {} [{:?}] [{}]",
trans_item.to_string(tcx),
linkage,
symbol_hash);
}
debug!("");
}
}
}
partitioning: Fix visibility of internalized symbols.
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Partitioning Codegen Units for Incremental Compilation
//! ======================================================
//!
//! The task of this module is to take the complete set of translation items of
//! a crate and produce a set of codegen units from it, where a codegen unit
//! is a named set of (translation-item, linkage) pairs. That is, this module
//! decides which translation item appears in which codegen units with which
//! linkage. The following paragraphs describe some of the background on the
//! partitioning scheme.
//!
//! The most important opportunity for saving on compilation time with
//! incremental compilation is to avoid re-translating and re-optimizing code.
//! Since the unit of translation and optimization for LLVM is "modules" or, how
//! we call them "codegen units", the particulars of how much time can be saved
//! by incremental compilation are tightly linked to how the output program is
//! partitioned into these codegen units prior to passing it to LLVM --
//! especially because we have to treat codegen units as opaque entities once
//! they are created: There is no way for us to incrementally update an existing
//! LLVM module and so we have to build any such module from scratch if it was
//! affected by some change in the source code.
//!
//! From that point of view it would make sense to maximize the number of
//! codegen units by, for example, putting each function into its own module.
//! That way only those modules would have to be re-compiled that were actually
//! affected by some change, minimizing the number of functions that could have
//! been re-used but just happened to be located in a module that is
//! re-compiled.
//!
//! However, since LLVM optimization does not work across module boundaries,
//! using such a highly granular partitioning would lead to very slow runtime
//! code since it would effectively prohibit inlining and other inter-procedure
//! optimizations. We want to avoid that as much as possible.
//!
//! Thus we end up with a trade-off: The bigger the codegen units, the better
//! LLVM's optimizer can do its work, but also the smaller the compilation time
//! reduction we get from incremental compilation.
//!
//! Ideally, we would create a partitioning such that there are few big codegen
//! units with few interdependencies between them. For now though, we use the
//! following heuristic to determine the partitioning:
//!
//! - There are two codegen units for every source-level module:
//! - One for "stable", that is non-generic, code
//! - One for more "volatile" code, i.e. monomorphized instances of functions
//! defined in that module
//!
//! In order to see why this heuristic makes sense, let's take a look at when a
//! codegen unit can get invalidated:
//!
//! 1. The most straightforward case is when the BODY of a function or global
//! changes. Then any codegen unit containing the code for that item has to be
//! re-compiled. Note that this includes all codegen units where the function
//! has been inlined.
//!
//! 2. The next case is when the SIGNATURE of a function or global changes. In
//! this case, all codegen units containing a REFERENCE to that item have to be
//! re-compiled. This is a superset of case 1.
//!
//! 3. The final and most subtle case is when a REFERENCE to a generic function
//! is added or removed somewhere. Even though the definition of the function
//! might be unchanged, a new REFERENCE might introduce a new monomorphized
//! instance of this function which has to be placed and compiled somewhere.
//! Conversely, when removing a REFERENCE, it might have been the last one with
//! that particular set of generic arguments and thus we have to remove it.
//!
//! From the above we see that just using one codegen unit per source-level
//! module is not such a good idea, since just adding a REFERENCE to some
//! generic item somewhere else would invalidate everything within the module
//! containing the generic item. The heuristic above reduces this detrimental
//! side-effect of references a little by at least not touching the non-generic
//! code of the module.
//!
//! A Note on Inlining
//! ------------------
//! As briefly mentioned above, in order for LLVM to be able to inline a
//! function call, the body of the function has to be available in the LLVM
//! module where the call is made. This has a few consequences for partitioning:
//!
//! - The partitioning algorithm has to take care of placing functions into all
//! codegen units where they should be available for inlining. It also has to
//! decide on the correct linkage for these functions.
//!
//! - The partitioning algorithm has to know which functions are likely to get
//! inlined, so it can distribute function instantiations accordingly. Since
//! there is no way of knowing for sure which functions LLVM will decide to
//! inline in the end, we apply a heuristic here: Only functions marked with
//! #[inline] are considered for inlining by the partitioner. The current
//! implementation will not try to determine if a function is likely to be
//! inlined by looking at the functions definition.
//!
//! Note though that as a side-effect of creating a codegen units per
//! source-level module, functions from the same module will be available for
//! inlining, even when they are not marked #[inline].
use back::symbol_export::ExportedSymbols;
use collector::InliningMap;
use common;
use context::SharedCrateContext;
use llvm;
use rustc::dep_graph::{DepNode, WorkProductId};
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER;
use rustc::ty::{self, TyCtxt, InstanceDef};
use rustc::ty::item_path::characteristic_def_id_of_type;
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_incremental::IchHasher;
use std::collections::hash_map::Entry;
use std::hash::Hash;
use syntax::ast::NodeId;
use syntax::symbol::{Symbol, InternedString};
use trans_item::{TransItem, InstantiationMode};
pub enum PartitioningStrategy {
/// Generate one codegen unit per source-level module.
PerModule,
/// Partition the whole crate into a fixed number of codegen units.
FixedUnitCount(usize)
}
pub struct CodegenUnit<'tcx> {
/// A name for this CGU. Incremental compilation requires that
/// name be unique amongst **all** crates. Therefore, it should
/// contain something unique to this crate (e.g., a module path)
/// as well as the crate name and disambiguator.
name: InternedString,
items: FxHashMap<TransItem<'tcx>, (llvm::Linkage, llvm::Visibility)>,
}
impl<'tcx> CodegenUnit<'tcx> {
pub fn new(name: InternedString,
items: FxHashMap<TransItem<'tcx>, (llvm::Linkage, llvm::Visibility)>)
-> Self {
CodegenUnit {
name,
items,
}
}
pub fn empty(name: InternedString) -> Self {
Self::new(name, FxHashMap())
}
pub fn contains_item(&self, item: &TransItem<'tcx>) -> bool {
self.items.contains_key(item)
}
pub fn name(&self) -> &str {
&self.name
}
pub fn items(&self) -> &FxHashMap<TransItem<'tcx>, (llvm::Linkage, llvm::Visibility)> {
&self.items
}
pub fn work_product_id(&self) -> WorkProductId {
WorkProductId::from_cgu_name(self.name())
}
pub fn work_product_dep_node(&self) -> DepNode {
self.work_product_id().to_dep_node()
}
pub fn compute_symbol_name_hash<'a>(&self,
scx: &SharedCrateContext<'a, 'tcx>,
exported_symbols: &ExportedSymbols)
-> u64 {
let mut state = IchHasher::new();
let exported_symbols = exported_symbols.local_exports();
let all_items = self.items_in_deterministic_order(scx.tcx());
for (item, _) in all_items {
let symbol_name = item.symbol_name(scx.tcx());
symbol_name.len().hash(&mut state);
symbol_name.hash(&mut state);
let exported = match item {
TransItem::Fn(ref instance) => {
let node_id =
scx.tcx().hir.as_local_node_id(instance.def_id());
node_id.map(|node_id| exported_symbols.contains(&node_id))
.unwrap_or(false)
}
TransItem::Static(node_id) => {
exported_symbols.contains(&node_id)
}
TransItem::GlobalAsm(..) => true,
};
exported.hash(&mut state);
}
state.finish().to_smaller_hash()
}
pub fn items_in_deterministic_order<'a>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> Vec<(TransItem<'tcx>,
(llvm::Linkage, llvm::Visibility))> {
// The codegen tests rely on items being process in the same order as
// they appear in the file, so for local items, we sort by node_id first
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct ItemSortKey(Option<NodeId>, ty::SymbolName);
fn item_sort_key<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
item: TransItem<'tcx>) -> ItemSortKey {
ItemSortKey(match item {
TransItem::Fn(instance) => {
tcx.hir.as_local_node_id(instance.def_id())
}
TransItem::Static(node_id) | TransItem::GlobalAsm(node_id) => {
Some(node_id)
}
}, item.symbol_name(tcx))
}
let items: Vec<_> = self.items.iter().map(|(&i, &l)| (i, l)).collect();
let mut items : Vec<_> = items.iter()
.map(|il| (il, item_sort_key(tcx, il.0))).collect();
items.sort_by(|&(_, ref key1), &(_, ref key2)| key1.cmp(key2));
items.into_iter().map(|(&item_linkage, _)| item_linkage).collect()
}
}
// Anything we can't find a proper codegen unit for goes into this.
const FALLBACK_CODEGEN_UNIT: &'static str = "__rustc_fallback_codegen_unit";
pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
trans_items: I,
strategy: PartitioningStrategy,
inlining_map: &InliningMap<'tcx>,
exported_symbols: &ExportedSymbols)
-> Vec<CodegenUnit<'tcx>>
where I: Iterator<Item = TransItem<'tcx>>
{
let tcx = scx.tcx();
// In the first step, we place all regular translation items into their
// respective 'home' codegen unit. Regular translation items are all
// functions and statics defined in the local crate.
let mut initial_partitioning = place_root_translation_items(scx,
exported_symbols,
trans_items);
debug_dump(tcx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
// If the partitioning should produce a fixed count of codegen units, merge
// until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name.as_str());
debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
}
// In the next step, we use the inlining map to determine which addtional
// translation items have to go into each codegen unit. These additional
// translation items can be drop-glue, functions from external crates, and
// local functions the definition of which is marked with #[inline].
let mut post_inlining = place_inlined_translation_items(initial_partitioning,
inlining_map);
debug_dump(tcx, "POST INLINING:", post_inlining.codegen_units.iter());
// Next we try to make as many symbols "internal" as possible, so LLVM has
// more freedom to optimize.
internalize_symbols(tcx, &mut post_inlining, inlining_map);
// Finally, sort by codegen unit name, so that we get deterministic results
let PostInliningPartitioning {
codegen_units: mut result,
trans_item_placements: _,
internalization_candidates: _,
} = post_inlining;
result.sort_by(|cgu1, cgu2| {
(&cgu1.name[..]).cmp(&cgu2.name[..])
});
if scx.sess().opts.enable_dep_node_debug_strs() {
for cgu in &result {
let dep_node = cgu.work_product_dep_node();
scx.tcx().dep_graph.register_dep_node_debug_str(dep_node,
|| cgu.name().to_string());
}
}
result
}
struct PreInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
roots: FxHashSet<TransItem<'tcx>>,
internalization_candidates: FxHashSet<TransItem<'tcx>>,
}
/// For symbol internalization, we need to know whether a symbol/trans-item is
/// accessed from outside the codegen unit it is defined in. This type is used
/// to keep track of that.
#[derive(Clone, PartialEq, Eq, Debug)]
enum TransItemPlacement {
SingleCgu { cgu_name: InternedString },
MultipleCgus,
}
struct PostInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
trans_item_placements: FxHashMap<TransItem<'tcx>, TransItemPlacement>,
internalization_candidates: FxHashSet<TransItem<'tcx>>,
}
fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
exported_symbols: &ExportedSymbols,
trans_items: I)
-> PreInliningPartitioning<'tcx>
where I: Iterator<Item = TransItem<'tcx>>
{
let tcx = scx.tcx();
let exported_symbols = exported_symbols.local_exports();
let mut roots = FxHashSet();
let mut codegen_units = FxHashMap();
let is_incremental_build = tcx.sess.opts.incremental.is_some();
let mut internalization_candidates = FxHashSet();
for trans_item in trans_items {
let is_root = trans_item.instantiation_mode(tcx) == InstantiationMode::GloballyShared;
if is_root {
let characteristic_def_id = characteristic_def_id_of_trans_item(scx, trans_item);
let is_volatile = is_incremental_build &&
trans_item.is_generic_fn();
let codegen_unit_name = match characteristic_def_id {
Some(def_id) => compute_codegen_unit_name(tcx, def_id, is_volatile),
None => Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(),
};
let make_codegen_unit = || {
CodegenUnit::empty(codegen_unit_name.clone())
};
let mut codegen_unit = codegen_units.entry(codegen_unit_name.clone())
.or_insert_with(make_codegen_unit);
let (linkage, visibility) = match trans_item.explicit_linkage(tcx) {
Some(explicit_linkage) => (explicit_linkage, llvm::Visibility::Default),
None => {
match trans_item {
TransItem::Fn(ref instance) => {
let visibility = match instance.def {
InstanceDef::Item(def_id) => {
if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
if exported_symbols.contains(&node_id) {
llvm::Visibility::Default
} else {
internalization_candidates.insert(trans_item);
llvm::Visibility::Hidden
}
} else {
internalization_candidates.insert(trans_item);
llvm::Visibility::Hidden
}
}
InstanceDef::FnPtrShim(..) |
InstanceDef::Virtual(..) |
InstanceDef::Intrinsic(..) |
InstanceDef::ClosureOnceShim { .. } |
InstanceDef::DropGlue(..) => {
bug!("partitioning: Encountered unexpected
root translation item: {:?}",
trans_item)
}
};
(llvm::ExternalLinkage, visibility)
}
TransItem::Static(node_id) |
TransItem::GlobalAsm(node_id) => {
let visibility = if exported_symbols.contains(&node_id) {
llvm::Visibility::Default
} else {
internalization_candidates.insert(trans_item);
llvm::Visibility::Hidden
};
(llvm::ExternalLinkage, visibility)
}
}
}
};
codegen_unit.items.insert(trans_item, (linkage, visibility));
roots.insert(trans_item);
}
}
// always ensure we have at least one CGU; otherwise, if we have a
// crate with just types (for example), we could wind up with no CGU
if codegen_units.is_empty() {
let codegen_unit_name = Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str();
codegen_units.insert(codegen_unit_name.clone(),
CodegenUnit::empty(codegen_unit_name.clone()));
}
PreInliningPartitioning {
codegen_units: codegen_units.into_iter()
.map(|(_, codegen_unit)| codegen_unit)
.collect(),
roots,
internalization_candidates,
}
}
fn merge_codegen_units<'tcx>(initial_partitioning: &mut PreInliningPartitioning<'tcx>,
target_cgu_count: usize,
crate_name: &str) {
assert!(target_cgu_count >= 1);
let codegen_units = &mut initial_partitioning.codegen_units;
// Merge the two smallest codegen units until the target size is reached.
// Note that "size" is estimated here rather inaccurately as the number of
// translation items in a given unit. This could be improved on.
while codegen_units.len() > target_cgu_count {
// Sort small cgus to the back
codegen_units.sort_by_key(|cgu| -(cgu.items.len() as i64));
let smallest = codegen_units.pop().unwrap();
let second_smallest = codegen_units.last_mut().unwrap();
for (k, v) in smallest.items.into_iter() {
second_smallest.items.insert(k, v);
}
}
for (index, cgu) in codegen_units.iter_mut().enumerate() {
cgu.name = numbered_codegen_unit_name(crate_name, index);
}
// If the initial partitioning contained less than target_cgu_count to begin
// with, we won't have enough codegen units here, so add a empty units until
// we reach the target count
while codegen_units.len() < target_cgu_count {
let index = codegen_units.len();
codegen_units.push(
CodegenUnit::empty(numbered_codegen_unit_name(crate_name, index)));
}
}
fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartitioning<'tcx>,
inlining_map: &InliningMap<'tcx>)
-> PostInliningPartitioning<'tcx> {
let mut new_partitioning = Vec::new();
let mut trans_item_placements = FxHashMap();
let PreInliningPartitioning {
codegen_units: initial_cgus,
roots,
internalization_candidates,
} = initial_partitioning;
let single_codegen_unit = initial_cgus.len() == 1;
for old_codegen_unit in initial_cgus {
// Collect all items that need to be available in this codegen unit
let mut reachable = FxHashSet();
for root in old_codegen_unit.items.keys() {
follow_inlining(*root, inlining_map, &mut reachable);
}
let mut new_codegen_unit = CodegenUnit {
name: old_codegen_unit.name,
items: FxHashMap(),
};
// Add all translation items that are not already there
for trans_item in reachable {
if let Some(linkage) = old_codegen_unit.items.get(&trans_item) {
// This is a root, just copy it over
new_codegen_unit.items.insert(trans_item, *linkage);
} else {
if roots.contains(&trans_item) {
bug!("GloballyShared trans-item inlined into other CGU: \
{:?}", trans_item);
}
// This is a cgu-private copy
new_codegen_unit.items.insert(trans_item,
(llvm::InternalLinkage, llvm::Visibility::Default));
}
if !single_codegen_unit {
// If there is more than one codegen unit, we need to keep track
// in which codegen units each translation item is placed:
match trans_item_placements.entry(trans_item) {
Entry::Occupied(e) => {
let placement = e.into_mut();
debug_assert!(match *placement {
TransItemPlacement::SingleCgu { ref cgu_name } => {
*cgu_name != new_codegen_unit.name
}
TransItemPlacement::MultipleCgus => true,
});
*placement = TransItemPlacement::MultipleCgus;
}
Entry::Vacant(e) => {
e.insert(TransItemPlacement::SingleCgu {
cgu_name: new_codegen_unit.name.clone()
});
}
}
}
}
new_partitioning.push(new_codegen_unit);
}
return PostInliningPartitioning {
codegen_units: new_partitioning,
trans_item_placements,
internalization_candidates,
};
fn follow_inlining<'tcx>(trans_item: TransItem<'tcx>,
inlining_map: &InliningMap<'tcx>,
visited: &mut FxHashSet<TransItem<'tcx>>) {
if !visited.insert(trans_item) {
return;
}
inlining_map.with_inlining_candidates(trans_item, |target| {
follow_inlining(target, inlining_map, visited);
});
}
}
fn internalize_symbols<'a, 'tcx>(_tcx: TyCtxt<'a, 'tcx, 'tcx>,
partitioning: &mut PostInliningPartitioning<'tcx>,
inlining_map: &InliningMap<'tcx>) {
if partitioning.codegen_units.len() == 1 {
// Fast path for when there is only one codegen unit. In this case we
// can internalize all candidates, since there is nowhere else they
// could be accessed from.
for cgu in &mut partitioning.codegen_units {
for candidate in &partitioning.internalization_candidates {
cgu.items.insert(*candidate, (llvm::InternalLinkage,
llvm::Visibility::Default));
}
}
return;
}
// Build a map from every translation item to all the translation items that
// reference it.
let mut accessor_map: FxHashMap<TransItem<'tcx>, Vec<TransItem<'tcx>>> = FxHashMap();
inlining_map.iter_accesses(|accessor, accessees| {
for accessee in accessees {
accessor_map.entry(*accessee)
.or_insert(Vec::new())
.push(accessor);
}
});
let trans_item_placements = &partitioning.trans_item_placements;
// For each internalization candidates in each codegen unit, check if it is
// accessed from outside its defining codegen unit.
for cgu in &mut partitioning.codegen_units {
let home_cgu = TransItemPlacement::SingleCgu {
cgu_name: cgu.name.clone()
};
for (accessee, linkage_and_visibility) in &mut cgu.items {
if !partitioning.internalization_candidates.contains(accessee) {
// This item is no candidate for internalizing, so skip it.
continue
}
debug_assert_eq!(trans_item_placements[accessee], home_cgu);
if let Some(accessors) = accessor_map.get(accessee) {
if accessors.iter()
.filter_map(|accessor| {
// Some accessors might not have been
// instantiated. We can safely ignore those.
trans_item_placements.get(accessor)
})
.any(|placement| *placement != home_cgu) {
// Found an accessor from another CGU, so skip to the next
// item without marking this one as internal.
continue
}
}
// If we got here, we did not find any accesses from other CGUs,
// so it's fine to make this translation item internal.
*linkage_and_visibility = (llvm::InternalLinkage, llvm::Visibility::Default);
}
}
}
fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
trans_item: TransItem<'tcx>)
-> Option<DefId> {
let tcx = scx.tcx();
match trans_item {
TransItem::Fn(instance) => {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Intrinsic(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Virtual(..) => return None
};
// If this is a method, we want to put it into the same module as
// its self-type. If the self-type does not provide a characteristic
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(def_id).is_some() {
let self_ty = instance.substs.type_at(0);
// This is an implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(def_id));
}
if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
// This is a method within an inherent impl, find out what the
// self-type is:
let impl_self_ty = common::def_ty(scx, impl_def_id, instance.substs);
if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) {
return Some(def_id);
}
}
Some(def_id)
}
TransItem::Static(node_id) |
TransItem::GlobalAsm(node_id) => Some(tcx.hir.local_def_id(node_id)),
}
}
fn compute_codegen_unit_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
volatile: bool)
-> InternedString {
// Unfortunately we cannot just use the `ty::item_path` infrastructure here
// because we need paths to modules and the DefIds of those are not
// available anymore for external items.
let mut mod_path = String::with_capacity(64);
let def_path = tcx.def_path(def_id);
mod_path.push_str(&tcx.crate_name(def_path.krate).as_str());
for part in tcx.def_path(def_id)
.data
.iter()
.take_while(|part| {
match part.data {
DefPathData::Module(..) => true,
_ => false,
}
}) {
mod_path.push_str("-");
mod_path.push_str(&part.data.as_interned_str());
}
if volatile {
mod_path.push_str(".volatile");
}
return Symbol::intern(&mod_path[..]).as_str();
}
fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString {
Symbol::intern(&format!("{}{}{}", crate_name, NUMBERED_CODEGEN_UNIT_MARKER, index)).as_str()
}
fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
label: &str,
cgus: I)
where I: Iterator<Item=&'b CodegenUnit<'tcx>>,
'tcx: 'a + 'b
{
if cfg!(debug_assertions) {
debug!("{}", label);
for cgu in cgus {
debug!("CodegenUnit {}:", cgu.name);
for (trans_item, linkage) in &cgu.items {
let symbol_name = trans_item.symbol_name(tcx);
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..])
.unwrap_or("<no hash>");
debug!(" - {} [{:?}] [{}]",
trans_item.to_string(tcx),
linkage,
symbol_hash);
}
debug!("");
}
}
}
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::hir;
use rustc::traits::{self, auto_trait as auto};
use rustc::ty::{self, ToPredicate, TypeFoldable};
use rustc::ty::subst::Subst;
use rustc::infer::InferOk;
use std::fmt::Debug;
use syntax_pos::DUMMY_SP;
use core::DocAccessLevels;
use super::*;
pub struct AutoTraitFinder<'a, 'tcx: 'a, 'rcx: 'a> {
pub cx: &'a core::DocContext<'a, 'tcx, 'rcx>,
pub f: auto::AutoTraitFinder<'a, 'tcx>,
}
impl<'a, 'tcx, 'rcx> AutoTraitFinder<'a, 'tcx, 'rcx> {
pub fn new(cx: &'a core::DocContext<'a, 'tcx, 'rcx>) -> Self {
let f = auto::AutoTraitFinder::new(&cx.tcx);
AutoTraitFinder { cx, f }
}
pub fn get_with_def_id(&self, def_id: DefId) -> Vec<Item> {
let ty = self.cx.tcx.type_of(def_id);
let def_ctor: fn(DefId) -> Def = match ty.sty {
ty::TyAdt(adt, _) => match adt.adt_kind() {
AdtKind::Struct => Def::Struct,
AdtKind::Enum => Def::Enum,
AdtKind::Union => Def::Union,
}
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyFloat(_) |
ty::TyStr |
ty::TyBool |
ty::TyChar => return self.get_auto_trait_impls(def_id, &move |_: DefId| {
match ty.sty {
ty::TyInt(x) => Def::PrimTy(hir::TyInt(x)),
ty::TyUint(x) => Def::PrimTy(hir::TyUint(x)),
ty::TyFloat(x) => Def::PrimTy(hir::TyFloat(x)),
ty::TyStr => Def::PrimTy(hir::TyStr),
ty::TyBool => Def::PrimTy(hir::TyBool),
ty::TyChar => Def::PrimTy(hir::TyChar),
_ => unreachable!(),
}
}, None),
_ => {
debug!("Unexpected type {:?}", def_id);
return Vec::new()
}
};
self.get_auto_trait_impls(def_id, &def_ctor, None)
}
pub fn get_with_node_id(&self, id: ast::NodeId, name: String) -> Vec<Item> {
let item = &self.cx.tcx.hir.expect_item(id).node;
let did = self.cx.tcx.hir.local_def_id(id);
let def_ctor = match *item {
hir::ItemKind::Struct(_, _) => Def::Struct,
hir::ItemKind::Union(_, _) => Def::Union,
hir::ItemKind::Enum(_, _) => Def::Enum,
_ => panic!("Unexpected type {:?} {:?}", item, id),
};
self.get_auto_trait_impls(did, &def_ctor, Some(name))
}
fn get_real_ty<F>(&self,
def_id: DefId,
def_ctor: &F,
real_name: &Option<Ident>,
generics: &ty::Generics,
) -> hir::Ty
where F: Fn(DefId) -> Def {
let path = get_path_for_type(self.cx.tcx, def_id, def_ctor);
let mut segments = path.segments.into_vec();
let last = segments.pop().unwrap();
segments.push(hir::PathSegment::new(
real_name.unwrap_or(last.ident),
self.generics_to_path_params(generics.clone()),
false,
));
let new_path = hir::Path {
span: path.span,
def: path.def,
segments: HirVec::from_vec(segments),
};
hir::Ty {
id: ast::DUMMY_NODE_ID,
node: hir::TyKind::Path(hir::QPath::Resolved(None, P(new_path))),
span: DUMMY_SP,
hir_id: hir::DUMMY_HIR_ID,
}
}
pub fn get_auto_trait_impls<F>(
&self,
def_id: DefId,
def_ctor: &F,
name: Option<String>,
) -> Vec<Item>
where F: Fn(DefId) -> Def {
if self.cx
.tcx
.get_attrs(def_id)
.lists("doc")
.has_word("hidden")
{
debug!(
"get_auto_trait_impls(def_id={:?}, def_ctor=...): item has doc('hidden'), \
aborting",
def_id
);
return Vec::new();
}
let tcx = self.cx.tcx;
let generics = self.cx.tcx.generics_of(def_id);
let ty = self.cx.tcx.type_of(def_id);
let mut traits = Vec::new();
if self.cx.crate_name != Some("core".to_string()) &&
self.cx.access_levels.borrow().is_doc_reachable(def_id) {
if let ty::TyAdt(_adt, _) = ty.sty {
let real_name = name.clone().map(|name| Ident::from_str(&name));
let param_env = self.cx.tcx.param_env(def_id);
for &trait_def_id in self.cx.all_traits.iter() {
if !self.cx.access_levels.borrow().is_doc_reachable(trait_def_id) ||
self.cx.generated_synthetics
.borrow_mut()
.get(&(def_id, trait_def_id))
.is_some() {
continue
}
self.cx.tcx.for_each_relevant_impl(trait_def_id, ty, |impl_def_id| {
self.cx.tcx.infer_ctxt().enter(|infcx| {
let t_generics = infcx.tcx.generics_of(impl_def_id);
let trait_ref = infcx.tcx.impl_trait_ref(impl_def_id).unwrap();
match infcx.tcx.type_of(impl_def_id).sty {
::rustc::ty::TypeVariants::TyParam(_) => {},
_ => return,
}
let substs = infcx.fresh_substs_for_item(DUMMY_SP, def_id);
let ty = ty.subst(infcx.tcx, substs);
let param_env = param_env.subst(infcx.tcx, substs);
let impl_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
let trait_ref = trait_ref.subst(infcx.tcx, impl_substs);
// Require the type the impl is implemented on to match
// our type, and ignore the impl if there was a mismatch.
let cause = traits::ObligationCause::dummy();
let eq_result = infcx.at(&cause, param_env)
.eq(trait_ref.self_ty(), ty);
if let Ok(InferOk { value: (), obligations }) = eq_result {
// FIXME(eddyb) ignoring `obligations` might cause false positives.
drop(obligations);
let may_apply = infcx.predicate_may_hold(&traits::Obligation::new(
cause.clone(),
param_env,
trait_ref.to_predicate(),
));
if !may_apply {
return
}
self.cx.generated_synthetics.borrow_mut()
.insert((def_id, trait_def_id));
let trait_ = hir::TraitRef {
path: get_path_for_type(infcx.tcx,
trait_def_id,
hir::def::Def::Trait),
ref_id: ast::DUMMY_NODE_ID,
};
let provided_trait_methods =
infcx.tcx.provided_trait_methods(trait_def_id)
.into_iter()
.map(|meth| meth.ident.to_string())
.collect();
let ty = self.get_real_ty(def_id, def_ctor, &real_name, generics);
let predicates = infcx.tcx.predicates_of(def_id);
traits.push(Item {
source: infcx.tcx.def_span(impl_def_id).clean(self.cx),
name: None,
attrs: Default::default(),
visibility: None,
def_id: self.next_def_id(impl_def_id.krate),
stability: None,
deprecation: None,
inner: ImplItem(Impl {
unsafety: hir::Unsafety::Normal,
generics: (t_generics, &predicates).clean(self.cx),
provided_trait_methods,
trait_: Some(trait_.clean(self.cx)),
for_: ty.clean(self.cx),
items: infcx.tcx.associated_items(impl_def_id)
.collect::<Vec<_>>()
.clean(self.cx),
polarity: None,
synthetic: true,
}),
});
debug!("{:?} => {}", trait_ref, may_apply);
}
});
});
}
}
}
debug!(
"get_auto_trait_impls(def_id={:?}, def_ctor=..., generics={:?}",
def_id, generics
);
let auto_traits: Vec<_> =
self.cx.send_trait
.and_then(|send_trait| {
self.get_auto_trait_impl_for(
def_id,
name.clone(),
generics.clone(),
def_ctor,
send_trait,
)
}).into_iter()
.chain(self.get_auto_trait_impl_for(
def_id,
name.clone(),
generics.clone(),
def_ctor,
tcx.require_lang_item(lang_items::SyncTraitLangItem),
).into_iter())
.chain(traits.into_iter())
.collect();
debug!(
"get_auto_traits: type {:?} auto_traits {:?}",
def_id, auto_traits
);
auto_traits
}
fn get_auto_trait_impl_for<F>(
&self,
def_id: DefId,
name: Option<String>,
generics: ty::Generics,
def_ctor: &F,
trait_def_id: DefId,
) -> Option<Item>
where F: Fn(DefId) -> Def {
if !self.cx
.generated_synthetics
.borrow_mut()
.insert((def_id, trait_def_id))
{
debug!(
"get_auto_trait_impl_for(def_id={:?}, generics={:?}, def_ctor=..., \
trait_def_id={:?}): already generated, aborting",
def_id, generics, trait_def_id
);
return None;
}
let result = self.find_auto_trait_generics(def_id, trait_def_id, &generics);
if result.is_auto() {
let trait_ = hir::TraitRef {
path: get_path_for_type(self.cx.tcx, trait_def_id, hir::def::Def::Trait),
ref_id: ast::DUMMY_NODE_ID,
};
let polarity;
let new_generics = match result {
AutoTraitResult::PositiveImpl(new_generics) => {
polarity = None;
new_generics
}
AutoTraitResult::NegativeImpl => {
polarity = Some(ImplPolarity::Negative);
// For negative impls, we use the generic params, but *not* the predicates,
// from the original type. Otherwise, the displayed impl appears to be a
// conditional negative impl, when it's really unconditional.
//
// For example, consider the struct Foo<T: Copy>(*mut T). Using
// the original predicates in our impl would cause us to generate
// `impl !Send for Foo<T: Copy>`, which makes it appear that Foo
// implements Send where T is not copy.
//
// Instead, we generate `impl !Send for Foo<T>`, which better
// expresses the fact that `Foo<T>` never implements `Send`,
// regardless of the choice of `T`.
let real_generics = (&generics, &Default::default());
// Clean the generics, but ignore the '?Sized' bounds generated
// by the `Clean` impl
let clean_generics = real_generics.clean(self.cx);
Generics {
params: clean_generics.params,
where_predicates: Vec::new(),
}
}
_ => unreachable!(),
};
let real_name = name.map(|name| Ident::from_str(&name));
let ty = self.get_real_ty(def_id, def_ctor, &real_name, &generics);
return Some(Item {
source: Span::empty(),
name: None,
attrs: Default::default(),
visibility: None,
def_id: self.next_def_id(def_id.krate),
stability: None,
deprecation: None,
inner: ImplItem(Impl {
unsafety: hir::Unsafety::Normal,
generics: new_generics,
provided_trait_methods: FxHashSet(),
trait_: Some(trait_.clean(self.cx)),
for_: ty.clean(self.cx),
items: Vec::new(),
polarity,
synthetic: true,
}),
});
}
None
}
fn generics_to_path_params(&self, generics: ty::Generics) -> hir::GenericArgs {
let mut args = vec![];
for param in generics.params.iter() {
match param.kind {
ty::GenericParamDefKind::Lifetime => {
let name = if param.name == "" {
hir::ParamName::Plain(keywords::StaticLifetime.ident())
} else {
hir::ParamName::Plain(ast::Ident::from_interned_str(param.name))
};
args.push(hir::GenericArg::Lifetime(hir::Lifetime {
id: ast::DUMMY_NODE_ID,
span: DUMMY_SP,
name: hir::LifetimeName::Param(name),
}));
}
ty::GenericParamDefKind::Type {..} => {
args.push(hir::GenericArg::Type(self.ty_param_to_ty(param.clone())));
}
}
}
hir::GenericArgs {
args: HirVec::from_vec(args),
bindings: HirVec::new(),
parenthesized: false,
}
}
fn ty_param_to_ty(&self, param: ty::GenericParamDef) -> hir::Ty {
debug!("ty_param_to_ty({:?}) {:?}", param, param.def_id);
hir::Ty {
id: ast::DUMMY_NODE_ID,
node: hir::TyKind::Path(hir::QPath::Resolved(
None,
P(hir::Path {
span: DUMMY_SP,
def: Def::TyParam(param.def_id),
segments: HirVec::from_vec(vec![
hir::PathSegment::from_ident(Ident::from_interned_str(param.name))
]),
}),
)),
span: DUMMY_SP,
hir_id: hir::DUMMY_HIR_ID,
}
}
fn find_auto_trait_generics(
&self,
did: DefId,
trait_did: DefId,
generics: &ty::Generics,
) -> AutoTraitResult {
match self.f.find_auto_trait_generics(did, trait_did, generics,
|infcx, mut info| {
let region_data = info.region_data;
let names_map =
info.names_map
.drain()
.map(|name| (name.clone(), Lifetime(name)))
.collect();
let lifetime_predicates =
self.handle_lifetimes(®ion_data, &names_map);
let new_generics = self.param_env_to_generics(
infcx.tcx,
did,
info.full_user_env,
generics.clone(),
lifetime_predicates,
info.vid_to_region,
);
debug!(
"find_auto_trait_generics(did={:?}, trait_did={:?}, generics={:?}): \
finished with {:?}",
did, trait_did, generics, new_generics
);
new_generics
}) {
auto::AutoTraitResult::ExplicitImpl => AutoTraitResult::ExplicitImpl,
auto::AutoTraitResult::NegativeImpl => AutoTraitResult::NegativeImpl,
auto::AutoTraitResult::PositiveImpl(res) => AutoTraitResult::PositiveImpl(res),
}
}
fn get_lifetime(&self, region: Region, names_map: &FxHashMap<String, Lifetime>) -> Lifetime {
self.region_name(region)
.map(|name| {
names_map.get(&name).unwrap_or_else(|| {
panic!("Missing lifetime with name {:?} for {:?}", name, region)
})
})
.unwrap_or(&Lifetime::statik())
.clone()
}
fn region_name(&self, region: Region) -> Option<String> {
match region {
&ty::ReEarlyBound(r) => Some(r.name.to_string()),
_ => None,
}
}
// This method calculates two things: Lifetime constraints of the form 'a: 'b,
// and region constraints of the form ReVar: 'a
//
// This is essentially a simplified version of lexical_region_resolve. However,
// handle_lifetimes determines what *needs be* true in order for an impl to hold.
// lexical_region_resolve, along with much of the rest of the compiler, is concerned
// with determining if a given set up constraints/predicates *are* met, given some
// starting conditions (e.g. user-provided code). For this reason, it's easier
// to perform the calculations we need on our own, rather than trying to make
// existing inference/solver code do what we want.
fn handle_lifetimes<'cx>(
&self,
regions: &RegionConstraintData<'cx>,
names_map: &FxHashMap<String, Lifetime>,
) -> Vec<WherePredicate> {
// Our goal is to 'flatten' the list of constraints by eliminating
// all intermediate RegionVids. At the end, all constraints should
// be between Regions (aka region variables). This gives us the information
// we need to create the Generics.
let mut finished = FxHashMap();
let mut vid_map: FxHashMap<RegionTarget, RegionDeps> = FxHashMap();
// Flattening is done in two parts. First, we insert all of the constraints
// into a map. Each RegionTarget (either a RegionVid or a Region) maps
// to its smaller and larger regions. Note that 'larger' regions correspond
// to sub-regions in Rust code (e.g. in 'a: 'b, 'a is the larger region).
for constraint in regions.constraints.keys() {
match constraint {
&Constraint::VarSubVar(r1, r2) => {
{
let deps1 = vid_map
.entry(RegionTarget::RegionVid(r1))
.or_insert_with(|| Default::default());
deps1.larger.insert(RegionTarget::RegionVid(r2));
}
let deps2 = vid_map
.entry(RegionTarget::RegionVid(r2))
.or_insert_with(|| Default::default());
deps2.smaller.insert(RegionTarget::RegionVid(r1));
}
&Constraint::RegSubVar(region, vid) => {
let deps = vid_map
.entry(RegionTarget::RegionVid(vid))
.or_insert_with(|| Default::default());
deps.smaller.insert(RegionTarget::Region(region));
}
&Constraint::VarSubReg(vid, region) => {
let deps = vid_map
.entry(RegionTarget::RegionVid(vid))
.or_insert_with(|| Default::default());
deps.larger.insert(RegionTarget::Region(region));
}
&Constraint::RegSubReg(r1, r2) => {
// The constraint is already in the form that we want, so we're done with it
// Desired order is 'larger, smaller', so flip then
if self.region_name(r1) != self.region_name(r2) {
finished
.entry(self.region_name(r2).unwrap())
.or_insert_with(|| Vec::new())
.push(r1);
}
}
}
}
// Here, we 'flatten' the map one element at a time.
// All of the element's sub and super regions are connected
// to each other. For example, if we have a graph that looks like this:
//
// (A, B) - C - (D, E)
// Where (A, B) are subregions, and (D,E) are super-regions
//
// then after deleting 'C', the graph will look like this:
// ... - A - (D, E ...)
// ... - B - (D, E, ...)
// (A, B, ...) - D - ...
// (A, B, ...) - E - ...
//
// where '...' signifies the existing sub and super regions of an entry
// When two adjacent ty::Regions are encountered, we've computed a final
// constraint, and add it to our list. Since we make sure to never re-add
// deleted items, this process will always finish.
while !vid_map.is_empty() {
let target = vid_map.keys().next().expect("Keys somehow empty").clone();
let deps = vid_map.remove(&target).expect("Entry somehow missing");
for smaller in deps.smaller.iter() {
for larger in deps.larger.iter() {
match (smaller, larger) {
(&RegionTarget::Region(r1), &RegionTarget::Region(r2)) => {
if self.region_name(r1) != self.region_name(r2) {
finished
.entry(self.region_name(r2).unwrap())
.or_insert_with(|| Vec::new())
.push(r1) // Larger, smaller
}
}
(&RegionTarget::RegionVid(_), &RegionTarget::Region(_)) => {
if let Entry::Occupied(v) = vid_map.entry(*smaller) {
let smaller_deps = v.into_mut();
smaller_deps.larger.insert(*larger);
smaller_deps.larger.remove(&target);
}
}
(&RegionTarget::Region(_), &RegionTarget::RegionVid(_)) => {
if let Entry::Occupied(v) = vid_map.entry(*larger) {
let deps = v.into_mut();
deps.smaller.insert(*smaller);
deps.smaller.remove(&target);
}
}
(&RegionTarget::RegionVid(_), &RegionTarget::RegionVid(_)) => {
if let Entry::Occupied(v) = vid_map.entry(*smaller) {
let smaller_deps = v.into_mut();
smaller_deps.larger.insert(*larger);
smaller_deps.larger.remove(&target);
}
if let Entry::Occupied(v) = vid_map.entry(*larger) {
let larger_deps = v.into_mut();
larger_deps.smaller.insert(*smaller);
larger_deps.smaller.remove(&target);
}
}
}
}
}
}
let lifetime_predicates = names_map
.iter()
.flat_map(|(name, lifetime)| {
let empty = Vec::new();
let bounds: FxHashSet<GenericBound> = finished.get(name).unwrap_or(&empty).iter()
.map(|region| GenericBound::Outlives(self.get_lifetime(region, names_map)))
.collect();
if bounds.is_empty() {
return None;
}
Some(WherePredicate::RegionPredicate {
lifetime: lifetime.clone(),
bounds: bounds.into_iter().collect(),
})
})
.collect();
lifetime_predicates
}
fn extract_for_generics<'b, 'c, 'd>(
&self,
tcx: TyCtxt<'b, 'c, 'd>,
pred: ty::Predicate<'d>,
) -> FxHashSet<GenericParamDef> {
pred.walk_tys()
.flat_map(|t| {
let mut regions = FxHashSet();
tcx.collect_regions(&t, &mut regions);
regions.into_iter().flat_map(|r| {
match r {
// We only care about late bound regions, as we need to add them
// to the 'for<>' section
&ty::ReLateBound(_, ty::BoundRegion::BrNamed(_, name)) => {
Some(GenericParamDef {
name: name.to_string(),
kind: GenericParamDefKind::Lifetime,
})
}
&ty::ReVar(_) | &ty::ReEarlyBound(_) => None,
_ => panic!("Unexpected region type {:?}", r),
}
})
})
.collect()
}
fn make_final_bounds<'b, 'c, 'cx>(
&self,
ty_to_bounds: FxHashMap<Type, FxHashSet<GenericBound>>,
ty_to_fn: FxHashMap<Type, (Option<PolyTrait>, Option<Type>)>,
lifetime_to_bounds: FxHashMap<Lifetime, FxHashSet<GenericBound>>,
) -> Vec<WherePredicate> {
ty_to_bounds
.into_iter()
.flat_map(|(ty, mut bounds)| {
if let Some(data) = ty_to_fn.get(&ty) {
let (poly_trait, output) =
(data.0.as_ref().unwrap().clone(), data.1.as_ref().cloned());
let new_ty = match &poly_trait.trait_ {
&Type::ResolvedPath {
ref path,
ref typarams,
ref did,
ref is_generic,
} => {
let mut new_path = path.clone();
let last_segment = new_path.segments.pop().unwrap();
let (old_input, old_output) = match last_segment.args {
GenericArgs::AngleBracketed { types, .. } => (types, None),
GenericArgs::Parenthesized { inputs, output, .. } => {
(inputs, output)
}
};
if old_output.is_some() && old_output != output {
panic!(
"Output mismatch for {:?} {:?} {:?}",
ty, old_output, data.1
);
}
let new_params = GenericArgs::Parenthesized {
inputs: old_input,
output,
};
new_path.segments.push(PathSegment {
name: last_segment.name,
args: new_params,
});
Type::ResolvedPath {
path: new_path,
typarams: typarams.clone(),
did: did.clone(),
is_generic: *is_generic,
}
}
_ => panic!("Unexpected data: {:?}, {:?}", ty, data),
};
bounds.insert(GenericBound::TraitBound(
PolyTrait {
trait_: new_ty,
generic_params: poly_trait.generic_params,
},
hir::TraitBoundModifier::None,
));
}
if bounds.is_empty() {
return None;
}
let mut bounds_vec = bounds.into_iter().collect();
self.sort_where_bounds(&mut bounds_vec);
Some(WherePredicate::BoundPredicate {
ty,
bounds: bounds_vec,
})
})
.chain(
lifetime_to_bounds
.into_iter()
.filter(|&(_, ref bounds)| !bounds.is_empty())
.map(|(lifetime, bounds)| {
let mut bounds_vec = bounds.into_iter().collect();
self.sort_where_bounds(&mut bounds_vec);
WherePredicate::RegionPredicate {
lifetime,
bounds: bounds_vec,
}
}),
)
.collect()
}
// Converts the calculated ParamEnv and lifetime information to a clean::Generics, suitable for
// display on the docs page. Cleaning the Predicates produces sub-optimal WherePredicate's,
// so we fix them up:
//
// * Multiple bounds for the same type are coalesced into one: e.g. 'T: Copy', 'T: Debug'
// becomes 'T: Copy + Debug'
// * Fn bounds are handled specially - instead of leaving it as 'T: Fn(), <T as Fn::Output> =
// K', we use the dedicated syntax 'T: Fn() -> K'
// * We explcitly add a '?Sized' bound if we didn't find any 'Sized' predicates for a type
fn param_env_to_generics<'b, 'c, 'cx>(
&self,
tcx: TyCtxt<'b, 'c, 'cx>,
did: DefId,
param_env: ty::ParamEnv<'cx>,
type_generics: ty::Generics,
mut existing_predicates: Vec<WherePredicate>,
vid_to_region: FxHashMap<ty::RegionVid, ty::Region<'cx>>,
) -> Generics {
debug!(
"param_env_to_generics(did={:?}, param_env={:?}, type_generics={:?}, \
existing_predicates={:?})",
did, param_env, type_generics, existing_predicates
);
// The `Sized` trait must be handled specially, since we only only display it when
// it is *not* required (i.e. '?Sized')
let sized_trait = self.cx
.tcx
.require_lang_item(lang_items::SizedTraitLangItem);
let mut replacer = RegionReplacer {
vid_to_region: &vid_to_region,
tcx,
};
let orig_bounds: FxHashSet<_> = self.cx.tcx.param_env(did).caller_bounds.iter().collect();
let clean_where_predicates = param_env
.caller_bounds
.iter()
.filter(|p| {
!orig_bounds.contains(p) || match p {
&&ty::Predicate::Trait(pred) => pred.def_id() == sized_trait,
_ => false,
}
})
.map(|p| {
let replaced = p.fold_with(&mut replacer);
(replaced.clone(), replaced.clean(self.cx))
});
let full_generics = (&type_generics, &tcx.predicates_of(did));
let Generics {
params: mut generic_params,
..
} = full_generics.clean(self.cx);
let mut has_sized = FxHashSet();
let mut ty_to_bounds = FxHashMap();
let mut lifetime_to_bounds = FxHashMap();
let mut ty_to_traits: FxHashMap<Type, FxHashSet<Type>> = FxHashMap();
let mut ty_to_fn: FxHashMap<Type, (Option<PolyTrait>, Option<Type>)> = FxHashMap();
for (orig_p, p) in clean_where_predicates {
match p {
WherePredicate::BoundPredicate { ty, mut bounds } => {
// Writing a projection trait bound of the form
// <T as Trait>::Name : ?Sized
// is illegal, because ?Sized bounds can only
// be written in the (here, nonexistant) definition
// of the type.
// Therefore, we make sure that we never add a ?Sized
// bound for projections
match &ty {
&Type::QPath { .. } => {
has_sized.insert(ty.clone());
}
_ => {}
}
if bounds.is_empty() {
continue;
}
let mut for_generics = self.extract_for_generics(tcx, orig_p.clone());
assert!(bounds.len() == 1);
let mut b = bounds.pop().unwrap();
if b.is_sized_bound(self.cx) {
has_sized.insert(ty.clone());
} else if !b.get_trait_type()
.and_then(|t| {
ty_to_traits
.get(&ty)
.map(|bounds| bounds.contains(&strip_type(t.clone())))
})
.unwrap_or(false)
{
// If we've already added a projection bound for the same type, don't add
// this, as it would be a duplicate
// Handle any 'Fn/FnOnce/FnMut' bounds specially,
// as we want to combine them with any 'Output' qpaths
// later
let is_fn = match &mut b {
&mut GenericBound::TraitBound(ref mut p, _) => {
// Insert regions into the for_generics hash map first, to ensure
// that we don't end up with duplicate bounds (e.g. for<'b, 'b>)
for_generics.extend(p.generic_params.clone());
p.generic_params = for_generics.into_iter().collect();
self.is_fn_ty(&tcx, &p.trait_)
}
_ => false,
};
let poly_trait = b.get_poly_trait().unwrap();
if is_fn {
ty_to_fn
.entry(ty.clone())
.and_modify(|e| *e = (Some(poly_trait.clone()), e.1.clone()))
.or_insert(((Some(poly_trait.clone())), None));
ty_to_bounds
.entry(ty.clone())
.or_insert_with(|| FxHashSet());
} else {
ty_to_bounds
.entry(ty.clone())
.or_insert_with(|| FxHashSet())
.insert(b.clone());
}
}
}
WherePredicate::RegionPredicate { lifetime, bounds } => {
lifetime_to_bounds
.entry(lifetime)
.or_insert_with(|| FxHashSet())
.extend(bounds);
}
WherePredicate::EqPredicate { lhs, rhs } => {
match &lhs {
&Type::QPath {
name: ref left_name,
ref self_type,
ref trait_,
} => {
let ty = &*self_type;
match **trait_ {
Type::ResolvedPath {
path: ref trait_path,
ref typarams,
ref did,
ref is_generic,
} => {
let mut new_trait_path = trait_path.clone();
if self.is_fn_ty(&tcx, trait_) && left_name == FN_OUTPUT_NAME {
ty_to_fn
.entry(*ty.clone())
.and_modify(|e| *e = (e.0.clone(), Some(rhs.clone())))
.or_insert((None, Some(rhs)));
continue;
}
// FIXME: Remove this scope when NLL lands
{
let args =
&mut new_trait_path.segments.last_mut().unwrap().args;
match args {
// Convert somethiung like '<T as Iterator::Item> = u8'
// to 'T: Iterator<Item=u8>'
&mut GenericArgs::AngleBracketed {
ref mut bindings,
..
} => {
bindings.push(TypeBinding {
name: left_name.clone(),
ty: rhs,
});
}
&mut GenericArgs::Parenthesized { .. } => {
existing_predicates.push(
WherePredicate::EqPredicate {
lhs: lhs.clone(),
rhs,
},
);
continue; // If something other than a Fn ends up
// with parenthesis, leave it alone
}
}
}
let bounds = ty_to_bounds
.entry(*ty.clone())
.or_insert_with(|| FxHashSet());
bounds.insert(GenericBound::TraitBound(
PolyTrait {
trait_: Type::ResolvedPath {
path: new_trait_path,
typarams: typarams.clone(),
did: did.clone(),
is_generic: *is_generic,
},
generic_params: Vec::new(),
},
hir::TraitBoundModifier::None,
));
// Remove any existing 'plain' bound (e.g. 'T: Iterator`) so
// that we don't see a
// duplicate bound like `T: Iterator + Iterator<Item=u8>`
// on the docs page.
bounds.remove(&GenericBound::TraitBound(
PolyTrait {
trait_: *trait_.clone(),
generic_params: Vec::new(),
},
hir::TraitBoundModifier::None,
));
// Avoid creating any new duplicate bounds later in the outer
// loop
ty_to_traits
.entry(*ty.clone())
.or_insert_with(|| FxHashSet())
.insert(*trait_.clone());
}
_ => panic!("Unexpected trait {:?} for {:?}", trait_, did),
}
}
_ => panic!("Unexpected LHS {:?} for {:?}", lhs, did),
}
}
};
}
let final_bounds = self.make_final_bounds(ty_to_bounds, ty_to_fn, lifetime_to_bounds);
existing_predicates.extend(final_bounds);
for param in generic_params.iter_mut() {
match param.kind {
GenericParamDefKind::Type { ref mut default, ref mut bounds, .. } => {
// We never want something like `impl<T=Foo>`.
default.take();
let generic_ty = Type::Generic(param.name.clone());
if !has_sized.contains(&generic_ty) {
bounds.insert(0, GenericBound::maybe_sized(self.cx));
}
}
GenericParamDefKind::Lifetime => {}
}
}
self.sort_where_predicates(&mut existing_predicates);
Generics {
params: generic_params,
where_predicates: existing_predicates,
}
}
// Ensure that the predicates are in a consistent order. The precise
// ordering doesn't actually matter, but it's important that
// a given set of predicates always appears in the same order -
// both for visual consistency between 'rustdoc' runs, and to
// make writing tests much easier
#[inline]
fn sort_where_predicates(&self, mut predicates: &mut Vec<WherePredicate>) {
// We should never have identical bounds - and if we do,
// they're visually identical as well. Therefore, using
// an unstable sort is fine.
self.unstable_debug_sort(&mut predicates);
}
// Ensure that the bounds are in a consistent order. The precise
// ordering doesn't actually matter, but it's important that
// a given set of bounds always appears in the same order -
// both for visual consistency between 'rustdoc' runs, and to
// make writing tests much easier
#[inline]
fn sort_where_bounds(&self, mut bounds: &mut Vec<GenericBound>) {
// We should never have identical bounds - and if we do,
// they're visually identical as well. Therefore, using
// an unstable sort is fine.
self.unstable_debug_sort(&mut bounds);
}
// This might look horrendously hacky, but it's actually not that bad.
//
// For performance reasons, we use several different FxHashMaps
// in the process of computing the final set of where predicates.
// However, the iteration order of a HashMap is completely unspecified.
// In fact, the iteration of an FxHashMap can even vary between platforms,
// since FxHasher has different behavior for 32-bit and 64-bit platforms.
//
// Obviously, it's extremely undesireable for documentation rendering
// to be depndent on the platform it's run on. Apart from being confusing
// to end users, it makes writing tests much more difficult, as predicates
// can appear in any order in the final result.
//
// To solve this problem, we sort WherePredicates and GenericBounds
// by their Debug string. The thing to keep in mind is that we don't really
// care what the final order is - we're synthesizing an impl or bound
// ourselves, so any order can be considered equally valid. By sorting the
// predicates and bounds, however, we ensure that for a given codebase, all
// auto-trait impls always render in exactly the same way.
//
// Using the Debug impementation for sorting prevents us from needing to
// write quite a bit of almost entirely useless code (e.g. how should two
// Types be sorted relative to each other). It also allows us to solve the
// problem for both WherePredicates and GenericBounds at the same time. This
// approach is probably somewhat slower, but the small number of items
// involved (impls rarely have more than a few bounds) means that it
// shouldn't matter in practice.
fn unstable_debug_sort<T: Debug>(&self, vec: &mut Vec<T>) {
vec.sort_by_cached_key(|x| format!("{:?}", x))
}
fn is_fn_ty(&self, tcx: &TyCtxt, ty: &Type) -> bool {
match &ty {
&&Type::ResolvedPath { ref did, .. } => {
*did == tcx.require_lang_item(lang_items::FnTraitLangItem)
|| *did == tcx.require_lang_item(lang_items::FnMutTraitLangItem)
|| *did == tcx.require_lang_item(lang_items::FnOnceTraitLangItem)
}
_ => false,
}
}
// This is an ugly hack, but it's the simplest way to handle synthetic impls without greatly
// refactoring either librustdoc or librustc. In particular, allowing new DefIds to be
// registered after the AST is constructed would require storing the defid mapping in a
// RefCell, decreasing the performance for normal compilation for very little gain.
//
// Instead, we construct 'fake' def ids, which start immediately after the last DefId in
// DefIndexAddressSpace::Low. In the Debug impl for clean::Item, we explicitly check for fake
// def ids, as we'll end up with a panic if we use the DefId Debug impl for fake DefIds
fn next_def_id(&self, crate_num: CrateNum) -> DefId {
let start_def_id = {
let next_id = if crate_num == LOCAL_CRATE {
self.cx
.tcx
.hir
.definitions()
.def_path_table()
.next_id(DefIndexAddressSpace::Low)
} else {
self.cx
.cstore
.def_path_table(crate_num)
.next_id(DefIndexAddressSpace::Low)
};
DefId {
krate: crate_num,
index: next_id,
}
};
let mut fake_ids = self.cx.fake_def_ids.borrow_mut();
let def_id = fake_ids.entry(crate_num).or_insert(start_def_id).clone();
fake_ids.insert(
crate_num,
DefId {
krate: crate_num,
index: DefIndex::from_array_index(
def_id.index.as_array_index() + 1,
def_id.index.address_space(),
),
},
);
MAX_DEF_ID.with(|m| {
m.borrow_mut()
.entry(def_id.krate.clone())
.or_insert(start_def_id);
});
self.cx.all_fake_def_ids.borrow_mut().insert(def_id);
def_id.clone()
}
}
// Replaces all ReVars in a type with ty::Region's, using the provided map
struct RegionReplacer<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
vid_to_region: &'a FxHashMap<ty::RegionVid, ty::Region<'tcx>>,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> {
self.tcx
}
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
(match r {
&ty::ReVar(vid) => self.vid_to_region.get(&vid).cloned(),
_ => None,
}).unwrap_or_else(|| r.super_fold_with(self))
}
}
Remove generic-impl rendering filter
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::hir;
use rustc::traits::{self, auto_trait as auto};
use rustc::ty::{self, ToPredicate, TypeFoldable};
use rustc::ty::subst::Subst;
use rustc::infer::InferOk;
use std::fmt::Debug;
use syntax_pos::DUMMY_SP;
use core::DocAccessLevels;
use super::*;
pub struct AutoTraitFinder<'a, 'tcx: 'a, 'rcx: 'a> {
pub cx: &'a core::DocContext<'a, 'tcx, 'rcx>,
pub f: auto::AutoTraitFinder<'a, 'tcx>,
}
impl<'a, 'tcx, 'rcx> AutoTraitFinder<'a, 'tcx, 'rcx> {
pub fn new(cx: &'a core::DocContext<'a, 'tcx, 'rcx>) -> Self {
let f = auto::AutoTraitFinder::new(&cx.tcx);
AutoTraitFinder { cx, f }
}
pub fn get_with_def_id(&self, def_id: DefId) -> Vec<Item> {
let ty = self.cx.tcx.type_of(def_id);
let def_ctor: fn(DefId) -> Def = match ty.sty {
ty::TyAdt(adt, _) => match adt.adt_kind() {
AdtKind::Struct => Def::Struct,
AdtKind::Enum => Def::Enum,
AdtKind::Union => Def::Union,
}
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyFloat(_) |
ty::TyStr |
ty::TyBool |
ty::TyChar => return self.get_auto_trait_impls(def_id, &move |_: DefId| {
match ty.sty {
ty::TyInt(x) => Def::PrimTy(hir::TyInt(x)),
ty::TyUint(x) => Def::PrimTy(hir::TyUint(x)),
ty::TyFloat(x) => Def::PrimTy(hir::TyFloat(x)),
ty::TyStr => Def::PrimTy(hir::TyStr),
ty::TyBool => Def::PrimTy(hir::TyBool),
ty::TyChar => Def::PrimTy(hir::TyChar),
_ => unreachable!(),
}
}, None),
_ => {
debug!("Unexpected type {:?}", def_id);
return Vec::new()
}
};
self.get_auto_trait_impls(def_id, &def_ctor, None)
}
pub fn get_with_node_id(&self, id: ast::NodeId, name: String) -> Vec<Item> {
let item = &self.cx.tcx.hir.expect_item(id).node;
let did = self.cx.tcx.hir.local_def_id(id);
let def_ctor = match *item {
hir::ItemKind::Struct(_, _) => Def::Struct,
hir::ItemKind::Union(_, _) => Def::Union,
hir::ItemKind::Enum(_, _) => Def::Enum,
_ => panic!("Unexpected type {:?} {:?}", item, id),
};
self.get_auto_trait_impls(did, &def_ctor, Some(name))
}
fn get_real_ty<F>(&self,
def_id: DefId,
def_ctor: &F,
real_name: &Option<Ident>,
generics: &ty::Generics,
) -> hir::Ty
where F: Fn(DefId) -> Def {
let path = get_path_for_type(self.cx.tcx, def_id, def_ctor);
let mut segments = path.segments.into_vec();
let last = segments.pop().unwrap();
segments.push(hir::PathSegment::new(
real_name.unwrap_or(last.ident),
self.generics_to_path_params(generics.clone()),
false,
));
let new_path = hir::Path {
span: path.span,
def: path.def,
segments: HirVec::from_vec(segments),
};
hir::Ty {
id: ast::DUMMY_NODE_ID,
node: hir::TyKind::Path(hir::QPath::Resolved(None, P(new_path))),
span: DUMMY_SP,
hir_id: hir::DUMMY_HIR_ID,
}
}
pub fn get_auto_trait_impls<F>(
&self,
def_id: DefId,
def_ctor: &F,
name: Option<String>,
) -> Vec<Item>
where F: Fn(DefId) -> Def {
if self.cx
.tcx
.get_attrs(def_id)
.lists("doc")
.has_word("hidden")
{
debug!(
"get_auto_trait_impls(def_id={:?}, def_ctor=...): item has doc('hidden'), \
aborting",
def_id
);
return Vec::new();
}
let tcx = self.cx.tcx;
let generics = self.cx.tcx.generics_of(def_id);
let ty = self.cx.tcx.type_of(def_id);
let mut traits = Vec::new();
if self.cx.crate_name != Some("core".to_string()) &&
self.cx.access_levels.borrow().is_doc_reachable(def_id) {
let real_name = name.clone().map(|name| Ident::from_str(&name));
let param_env = self.cx.tcx.param_env(def_id);
for &trait_def_id in self.cx.all_traits.iter() {
if !self.cx.access_levels.borrow().is_doc_reachable(trait_def_id) ||
self.cx.generated_synthetics
.borrow_mut()
.get(&(def_id, trait_def_id))
.is_some() {
continue
}
self.cx.tcx.for_each_relevant_impl(trait_def_id, ty, |impl_def_id| {
self.cx.tcx.infer_ctxt().enter(|infcx| {
let t_generics = infcx.tcx.generics_of(impl_def_id);
let trait_ref = infcx.tcx.impl_trait_ref(impl_def_id).unwrap();
match infcx.tcx.type_of(impl_def_id).sty {
::rustc::ty::TypeVariants::TyParam(_) => {},
_ => return,
}
let substs = infcx.fresh_substs_for_item(DUMMY_SP, def_id);
let ty = ty.subst(infcx.tcx, substs);
let param_env = param_env.subst(infcx.tcx, substs);
let impl_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
let trait_ref = trait_ref.subst(infcx.tcx, impl_substs);
// Require the type the impl is implemented on to match
// our type, and ignore the impl if there was a mismatch.
let cause = traits::ObligationCause::dummy();
let eq_result = infcx.at(&cause, param_env)
.eq(trait_ref.self_ty(), ty);
if let Ok(InferOk { value: (), obligations }) = eq_result {
// FIXME(eddyb) ignoring `obligations` might cause false positives.
drop(obligations);
let may_apply = infcx.predicate_may_hold(&traits::Obligation::new(
cause.clone(),
param_env,
trait_ref.to_predicate(),
));
if !may_apply {
return
}
self.cx.generated_synthetics.borrow_mut()
.insert((def_id, trait_def_id));
let trait_ = hir::TraitRef {
path: get_path_for_type(infcx.tcx,
trait_def_id,
hir::def::Def::Trait),
ref_id: ast::DUMMY_NODE_ID,
};
let provided_trait_methods =
infcx.tcx.provided_trait_methods(trait_def_id)
.into_iter()
.map(|meth| meth.ident.to_string())
.collect();
let ty = self.get_real_ty(def_id, def_ctor, &real_name, generics);
let predicates = infcx.tcx.predicates_of(def_id);
traits.push(Item {
source: infcx.tcx.def_span(impl_def_id).clean(self.cx),
name: None,
attrs: Default::default(),
visibility: None,
def_id: self.next_def_id(impl_def_id.krate),
stability: None,
deprecation: None,
inner: ImplItem(Impl {
unsafety: hir::Unsafety::Normal,
generics: (t_generics, &predicates).clean(self.cx),
provided_trait_methods,
trait_: Some(trait_.clean(self.cx)),
for_: ty.clean(self.cx),
items: infcx.tcx.associated_items(impl_def_id)
.collect::<Vec<_>>()
.clean(self.cx),
polarity: None,
synthetic: true,
}),
});
debug!("{:?} => {}", trait_ref, may_apply);
}
});
});
}
}
debug!(
"get_auto_trait_impls(def_id={:?}, def_ctor=..., generics={:?}",
def_id, generics
);
let auto_traits: Vec<_> =
self.cx.send_trait
.and_then(|send_trait| {
self.get_auto_trait_impl_for(
def_id,
name.clone(),
generics.clone(),
def_ctor,
send_trait,
)
}).into_iter()
.chain(self.get_auto_trait_impl_for(
def_id,
name.clone(),
generics.clone(),
def_ctor,
tcx.require_lang_item(lang_items::SyncTraitLangItem),
).into_iter())
.chain(traits.into_iter())
.collect();
debug!(
"get_auto_traits: type {:?} auto_traits {:?}",
def_id, auto_traits
);
auto_traits
}
fn get_auto_trait_impl_for<F>(
&self,
def_id: DefId,
name: Option<String>,
generics: ty::Generics,
def_ctor: &F,
trait_def_id: DefId,
) -> Option<Item>
where F: Fn(DefId) -> Def {
if !self.cx
.generated_synthetics
.borrow_mut()
.insert((def_id, trait_def_id))
{
debug!(
"get_auto_trait_impl_for(def_id={:?}, generics={:?}, def_ctor=..., \
trait_def_id={:?}): already generated, aborting",
def_id, generics, trait_def_id
);
return None;
}
let result = self.find_auto_trait_generics(def_id, trait_def_id, &generics);
if result.is_auto() {
let trait_ = hir::TraitRef {
path: get_path_for_type(self.cx.tcx, trait_def_id, hir::def::Def::Trait),
ref_id: ast::DUMMY_NODE_ID,
};
let polarity;
let new_generics = match result {
AutoTraitResult::PositiveImpl(new_generics) => {
polarity = None;
new_generics
}
AutoTraitResult::NegativeImpl => {
polarity = Some(ImplPolarity::Negative);
// For negative impls, we use the generic params, but *not* the predicates,
// from the original type. Otherwise, the displayed impl appears to be a
// conditional negative impl, when it's really unconditional.
//
// For example, consider the struct Foo<T: Copy>(*mut T). Using
// the original predicates in our impl would cause us to generate
// `impl !Send for Foo<T: Copy>`, which makes it appear that Foo
// implements Send where T is not copy.
//
// Instead, we generate `impl !Send for Foo<T>`, which better
// expresses the fact that `Foo<T>` never implements `Send`,
// regardless of the choice of `T`.
let real_generics = (&generics, &Default::default());
// Clean the generics, but ignore the '?Sized' bounds generated
// by the `Clean` impl
let clean_generics = real_generics.clean(self.cx);
Generics {
params: clean_generics.params,
where_predicates: Vec::new(),
}
}
_ => unreachable!(),
};
let real_name = name.map(|name| Ident::from_str(&name));
let ty = self.get_real_ty(def_id, def_ctor, &real_name, &generics);
return Some(Item {
source: Span::empty(),
name: None,
attrs: Default::default(),
visibility: None,
def_id: self.next_def_id(def_id.krate),
stability: None,
deprecation: None,
inner: ImplItem(Impl {
unsafety: hir::Unsafety::Normal,
generics: new_generics,
provided_trait_methods: FxHashSet(),
trait_: Some(trait_.clean(self.cx)),
for_: ty.clean(self.cx),
items: Vec::new(),
polarity,
synthetic: true,
}),
});
}
None
}
fn generics_to_path_params(&self, generics: ty::Generics) -> hir::GenericArgs {
let mut args = vec![];
for param in generics.params.iter() {
match param.kind {
ty::GenericParamDefKind::Lifetime => {
let name = if param.name == "" {
hir::ParamName::Plain(keywords::StaticLifetime.ident())
} else {
hir::ParamName::Plain(ast::Ident::from_interned_str(param.name))
};
args.push(hir::GenericArg::Lifetime(hir::Lifetime {
id: ast::DUMMY_NODE_ID,
span: DUMMY_SP,
name: hir::LifetimeName::Param(name),
}));
}
ty::GenericParamDefKind::Type {..} => {
args.push(hir::GenericArg::Type(self.ty_param_to_ty(param.clone())));
}
}
}
hir::GenericArgs {
args: HirVec::from_vec(args),
bindings: HirVec::new(),
parenthesized: false,
}
}
fn ty_param_to_ty(&self, param: ty::GenericParamDef) -> hir::Ty {
debug!("ty_param_to_ty({:?}) {:?}", param, param.def_id);
hir::Ty {
id: ast::DUMMY_NODE_ID,
node: hir::TyKind::Path(hir::QPath::Resolved(
None,
P(hir::Path {
span: DUMMY_SP,
def: Def::TyParam(param.def_id),
segments: HirVec::from_vec(vec![
hir::PathSegment::from_ident(Ident::from_interned_str(param.name))
]),
}),
)),
span: DUMMY_SP,
hir_id: hir::DUMMY_HIR_ID,
}
}
fn find_auto_trait_generics(
&self,
did: DefId,
trait_did: DefId,
generics: &ty::Generics,
) -> AutoTraitResult {
match self.f.find_auto_trait_generics(did, trait_did, generics,
|infcx, mut info| {
let region_data = info.region_data;
let names_map =
info.names_map
.drain()
.map(|name| (name.clone(), Lifetime(name)))
.collect();
let lifetime_predicates =
self.handle_lifetimes(®ion_data, &names_map);
let new_generics = self.param_env_to_generics(
infcx.tcx,
did,
info.full_user_env,
generics.clone(),
lifetime_predicates,
info.vid_to_region,
);
debug!(
"find_auto_trait_generics(did={:?}, trait_did={:?}, generics={:?}): \
finished with {:?}",
did, trait_did, generics, new_generics
);
new_generics
}) {
auto::AutoTraitResult::ExplicitImpl => AutoTraitResult::ExplicitImpl,
auto::AutoTraitResult::NegativeImpl => AutoTraitResult::NegativeImpl,
auto::AutoTraitResult::PositiveImpl(res) => AutoTraitResult::PositiveImpl(res),
}
}
fn get_lifetime(&self, region: Region, names_map: &FxHashMap<String, Lifetime>) -> Lifetime {
self.region_name(region)
.map(|name| {
names_map.get(&name).unwrap_or_else(|| {
panic!("Missing lifetime with name {:?} for {:?}", name, region)
})
})
.unwrap_or(&Lifetime::statik())
.clone()
}
fn region_name(&self, region: Region) -> Option<String> {
match region {
&ty::ReEarlyBound(r) => Some(r.name.to_string()),
_ => None,
}
}
// This method calculates two things: Lifetime constraints of the form 'a: 'b,
// and region constraints of the form ReVar: 'a
//
// This is essentially a simplified version of lexical_region_resolve. However,
// handle_lifetimes determines what *needs be* true in order for an impl to hold.
// lexical_region_resolve, along with much of the rest of the compiler, is concerned
// with determining if a given set up constraints/predicates *are* met, given some
// starting conditions (e.g. user-provided code). For this reason, it's easier
// to perform the calculations we need on our own, rather than trying to make
// existing inference/solver code do what we want.
fn handle_lifetimes<'cx>(
&self,
regions: &RegionConstraintData<'cx>,
names_map: &FxHashMap<String, Lifetime>,
) -> Vec<WherePredicate> {
// Our goal is to 'flatten' the list of constraints by eliminating
// all intermediate RegionVids. At the end, all constraints should
// be between Regions (aka region variables). This gives us the information
// we need to create the Generics.
let mut finished = FxHashMap();
let mut vid_map: FxHashMap<RegionTarget, RegionDeps> = FxHashMap();
// Flattening is done in two parts. First, we insert all of the constraints
// into a map. Each RegionTarget (either a RegionVid or a Region) maps
// to its smaller and larger regions. Note that 'larger' regions correspond
// to sub-regions in Rust code (e.g. in 'a: 'b, 'a is the larger region).
for constraint in regions.constraints.keys() {
match constraint {
&Constraint::VarSubVar(r1, r2) => {
{
let deps1 = vid_map
.entry(RegionTarget::RegionVid(r1))
.or_insert_with(|| Default::default());
deps1.larger.insert(RegionTarget::RegionVid(r2));
}
let deps2 = vid_map
.entry(RegionTarget::RegionVid(r2))
.or_insert_with(|| Default::default());
deps2.smaller.insert(RegionTarget::RegionVid(r1));
}
&Constraint::RegSubVar(region, vid) => {
let deps = vid_map
.entry(RegionTarget::RegionVid(vid))
.or_insert_with(|| Default::default());
deps.smaller.insert(RegionTarget::Region(region));
}
&Constraint::VarSubReg(vid, region) => {
let deps = vid_map
.entry(RegionTarget::RegionVid(vid))
.or_insert_with(|| Default::default());
deps.larger.insert(RegionTarget::Region(region));
}
&Constraint::RegSubReg(r1, r2) => {
// The constraint is already in the form that we want, so we're done with it
// Desired order is 'larger, smaller', so flip then
if self.region_name(r1) != self.region_name(r2) {
finished
.entry(self.region_name(r2).unwrap())
.or_insert_with(|| Vec::new())
.push(r1);
}
}
}
}
// Here, we 'flatten' the map one element at a time.
// All of the element's sub and super regions are connected
// to each other. For example, if we have a graph that looks like this:
//
// (A, B) - C - (D, E)
// Where (A, B) are subregions, and (D,E) are super-regions
//
// then after deleting 'C', the graph will look like this:
// ... - A - (D, E ...)
// ... - B - (D, E, ...)
// (A, B, ...) - D - ...
// (A, B, ...) - E - ...
//
// where '...' signifies the existing sub and super regions of an entry
// When two adjacent ty::Regions are encountered, we've computed a final
// constraint, and add it to our list. Since we make sure to never re-add
// deleted items, this process will always finish.
while !vid_map.is_empty() {
let target = vid_map.keys().next().expect("Keys somehow empty").clone();
let deps = vid_map.remove(&target).expect("Entry somehow missing");
for smaller in deps.smaller.iter() {
for larger in deps.larger.iter() {
match (smaller, larger) {
(&RegionTarget::Region(r1), &RegionTarget::Region(r2)) => {
if self.region_name(r1) != self.region_name(r2) {
finished
.entry(self.region_name(r2).unwrap())
.or_insert_with(|| Vec::new())
.push(r1) // Larger, smaller
}
}
(&RegionTarget::RegionVid(_), &RegionTarget::Region(_)) => {
if let Entry::Occupied(v) = vid_map.entry(*smaller) {
let smaller_deps = v.into_mut();
smaller_deps.larger.insert(*larger);
smaller_deps.larger.remove(&target);
}
}
(&RegionTarget::Region(_), &RegionTarget::RegionVid(_)) => {
if let Entry::Occupied(v) = vid_map.entry(*larger) {
let deps = v.into_mut();
deps.smaller.insert(*smaller);
deps.smaller.remove(&target);
}
}
(&RegionTarget::RegionVid(_), &RegionTarget::RegionVid(_)) => {
if let Entry::Occupied(v) = vid_map.entry(*smaller) {
let smaller_deps = v.into_mut();
smaller_deps.larger.insert(*larger);
smaller_deps.larger.remove(&target);
}
if let Entry::Occupied(v) = vid_map.entry(*larger) {
let larger_deps = v.into_mut();
larger_deps.smaller.insert(*smaller);
larger_deps.smaller.remove(&target);
}
}
}
}
}
}
let lifetime_predicates = names_map
.iter()
.flat_map(|(name, lifetime)| {
let empty = Vec::new();
let bounds: FxHashSet<GenericBound> = finished.get(name).unwrap_or(&empty).iter()
.map(|region| GenericBound::Outlives(self.get_lifetime(region, names_map)))
.collect();
if bounds.is_empty() {
return None;
}
Some(WherePredicate::RegionPredicate {
lifetime: lifetime.clone(),
bounds: bounds.into_iter().collect(),
})
})
.collect();
lifetime_predicates
}
fn extract_for_generics<'b, 'c, 'd>(
&self,
tcx: TyCtxt<'b, 'c, 'd>,
pred: ty::Predicate<'d>,
) -> FxHashSet<GenericParamDef> {
pred.walk_tys()
.flat_map(|t| {
let mut regions = FxHashSet();
tcx.collect_regions(&t, &mut regions);
regions.into_iter().flat_map(|r| {
match r {
// We only care about late bound regions, as we need to add them
// to the 'for<>' section
&ty::ReLateBound(_, ty::BoundRegion::BrNamed(_, name)) => {
Some(GenericParamDef {
name: name.to_string(),
kind: GenericParamDefKind::Lifetime,
})
}
&ty::ReVar(_) | &ty::ReEarlyBound(_) => None,
_ => panic!("Unexpected region type {:?}", r),
}
})
})
.collect()
}
fn make_final_bounds<'b, 'c, 'cx>(
&self,
ty_to_bounds: FxHashMap<Type, FxHashSet<GenericBound>>,
ty_to_fn: FxHashMap<Type, (Option<PolyTrait>, Option<Type>)>,
lifetime_to_bounds: FxHashMap<Lifetime, FxHashSet<GenericBound>>,
) -> Vec<WherePredicate> {
ty_to_bounds
.into_iter()
.flat_map(|(ty, mut bounds)| {
if let Some(data) = ty_to_fn.get(&ty) {
let (poly_trait, output) =
(data.0.as_ref().unwrap().clone(), data.1.as_ref().cloned());
let new_ty = match &poly_trait.trait_ {
&Type::ResolvedPath {
ref path,
ref typarams,
ref did,
ref is_generic,
} => {
let mut new_path = path.clone();
let last_segment = new_path.segments.pop().unwrap();
let (old_input, old_output) = match last_segment.args {
GenericArgs::AngleBracketed { types, .. } => (types, None),
GenericArgs::Parenthesized { inputs, output, .. } => {
(inputs, output)
}
};
if old_output.is_some() && old_output != output {
panic!(
"Output mismatch for {:?} {:?} {:?}",
ty, old_output, data.1
);
}
let new_params = GenericArgs::Parenthesized {
inputs: old_input,
output,
};
new_path.segments.push(PathSegment {
name: last_segment.name,
args: new_params,
});
Type::ResolvedPath {
path: new_path,
typarams: typarams.clone(),
did: did.clone(),
is_generic: *is_generic,
}
}
_ => panic!("Unexpected data: {:?}, {:?}", ty, data),
};
bounds.insert(GenericBound::TraitBound(
PolyTrait {
trait_: new_ty,
generic_params: poly_trait.generic_params,
},
hir::TraitBoundModifier::None,
));
}
if bounds.is_empty() {
return None;
}
let mut bounds_vec = bounds.into_iter().collect();
self.sort_where_bounds(&mut bounds_vec);
Some(WherePredicate::BoundPredicate {
ty,
bounds: bounds_vec,
})
})
.chain(
lifetime_to_bounds
.into_iter()
.filter(|&(_, ref bounds)| !bounds.is_empty())
.map(|(lifetime, bounds)| {
let mut bounds_vec = bounds.into_iter().collect();
self.sort_where_bounds(&mut bounds_vec);
WherePredicate::RegionPredicate {
lifetime,
bounds: bounds_vec,
}
}),
)
.collect()
}
// Converts the calculated ParamEnv and lifetime information to a clean::Generics, suitable for
// display on the docs page. Cleaning the Predicates produces sub-optimal WherePredicate's,
// so we fix them up:
//
// * Multiple bounds for the same type are coalesced into one: e.g. 'T: Copy', 'T: Debug'
// becomes 'T: Copy + Debug'
// * Fn bounds are handled specially - instead of leaving it as 'T: Fn(), <T as Fn::Output> =
// K', we use the dedicated syntax 'T: Fn() -> K'
// * We explcitly add a '?Sized' bound if we didn't find any 'Sized' predicates for a type
fn param_env_to_generics<'b, 'c, 'cx>(
&self,
tcx: TyCtxt<'b, 'c, 'cx>,
did: DefId,
param_env: ty::ParamEnv<'cx>,
type_generics: ty::Generics,
mut existing_predicates: Vec<WherePredicate>,
vid_to_region: FxHashMap<ty::RegionVid, ty::Region<'cx>>,
) -> Generics {
debug!(
"param_env_to_generics(did={:?}, param_env={:?}, type_generics={:?}, \
existing_predicates={:?})",
did, param_env, type_generics, existing_predicates
);
// The `Sized` trait must be handled specially, since we only only display it when
// it is *not* required (i.e. '?Sized')
let sized_trait = self.cx
.tcx
.require_lang_item(lang_items::SizedTraitLangItem);
let mut replacer = RegionReplacer {
vid_to_region: &vid_to_region,
tcx,
};
let orig_bounds: FxHashSet<_> = self.cx.tcx.param_env(did).caller_bounds.iter().collect();
let clean_where_predicates = param_env
.caller_bounds
.iter()
.filter(|p| {
!orig_bounds.contains(p) || match p {
&&ty::Predicate::Trait(pred) => pred.def_id() == sized_trait,
_ => false,
}
})
.map(|p| {
let replaced = p.fold_with(&mut replacer);
(replaced.clone(), replaced.clean(self.cx))
});
let full_generics = (&type_generics, &tcx.predicates_of(did));
let Generics {
params: mut generic_params,
..
} = full_generics.clean(self.cx);
let mut has_sized = FxHashSet();
let mut ty_to_bounds = FxHashMap();
let mut lifetime_to_bounds = FxHashMap();
let mut ty_to_traits: FxHashMap<Type, FxHashSet<Type>> = FxHashMap();
let mut ty_to_fn: FxHashMap<Type, (Option<PolyTrait>, Option<Type>)> = FxHashMap();
for (orig_p, p) in clean_where_predicates {
match p {
WherePredicate::BoundPredicate { ty, mut bounds } => {
// Writing a projection trait bound of the form
// <T as Trait>::Name : ?Sized
// is illegal, because ?Sized bounds can only
// be written in the (here, nonexistant) definition
// of the type.
// Therefore, we make sure that we never add a ?Sized
// bound for projections
match &ty {
&Type::QPath { .. } => {
has_sized.insert(ty.clone());
}
_ => {}
}
if bounds.is_empty() {
continue;
}
let mut for_generics = self.extract_for_generics(tcx, orig_p.clone());
assert!(bounds.len() == 1);
let mut b = bounds.pop().unwrap();
if b.is_sized_bound(self.cx) {
has_sized.insert(ty.clone());
} else if !b.get_trait_type()
.and_then(|t| {
ty_to_traits
.get(&ty)
.map(|bounds| bounds.contains(&strip_type(t.clone())))
})
.unwrap_or(false)
{
// If we've already added a projection bound for the same type, don't add
// this, as it would be a duplicate
// Handle any 'Fn/FnOnce/FnMut' bounds specially,
// as we want to combine them with any 'Output' qpaths
// later
let is_fn = match &mut b {
&mut GenericBound::TraitBound(ref mut p, _) => {
// Insert regions into the for_generics hash map first, to ensure
// that we don't end up with duplicate bounds (e.g. for<'b, 'b>)
for_generics.extend(p.generic_params.clone());
p.generic_params = for_generics.into_iter().collect();
self.is_fn_ty(&tcx, &p.trait_)
}
_ => false,
};
let poly_trait = b.get_poly_trait().unwrap();
if is_fn {
ty_to_fn
.entry(ty.clone())
.and_modify(|e| *e = (Some(poly_trait.clone()), e.1.clone()))
.or_insert(((Some(poly_trait.clone())), None));
ty_to_bounds
.entry(ty.clone())
.or_insert_with(|| FxHashSet());
} else {
ty_to_bounds
.entry(ty.clone())
.or_insert_with(|| FxHashSet())
.insert(b.clone());
}
}
}
WherePredicate::RegionPredicate { lifetime, bounds } => {
lifetime_to_bounds
.entry(lifetime)
.or_insert_with(|| FxHashSet())
.extend(bounds);
}
WherePredicate::EqPredicate { lhs, rhs } => {
match &lhs {
&Type::QPath {
name: ref left_name,
ref self_type,
ref trait_,
} => {
let ty = &*self_type;
match **trait_ {
Type::ResolvedPath {
path: ref trait_path,
ref typarams,
ref did,
ref is_generic,
} => {
let mut new_trait_path = trait_path.clone();
if self.is_fn_ty(&tcx, trait_) && left_name == FN_OUTPUT_NAME {
ty_to_fn
.entry(*ty.clone())
.and_modify(|e| *e = (e.0.clone(), Some(rhs.clone())))
.or_insert((None, Some(rhs)));
continue;
}
// FIXME: Remove this scope when NLL lands
{
let args =
&mut new_trait_path.segments.last_mut().unwrap().args;
match args {
// Convert somethiung like '<T as Iterator::Item> = u8'
// to 'T: Iterator<Item=u8>'
&mut GenericArgs::AngleBracketed {
ref mut bindings,
..
} => {
bindings.push(TypeBinding {
name: left_name.clone(),
ty: rhs,
});
}
&mut GenericArgs::Parenthesized { .. } => {
existing_predicates.push(
WherePredicate::EqPredicate {
lhs: lhs.clone(),
rhs,
},
);
continue; // If something other than a Fn ends up
// with parenthesis, leave it alone
}
}
}
let bounds = ty_to_bounds
.entry(*ty.clone())
.or_insert_with(|| FxHashSet());
bounds.insert(GenericBound::TraitBound(
PolyTrait {
trait_: Type::ResolvedPath {
path: new_trait_path,
typarams: typarams.clone(),
did: did.clone(),
is_generic: *is_generic,
},
generic_params: Vec::new(),
},
hir::TraitBoundModifier::None,
));
// Remove any existing 'plain' bound (e.g. 'T: Iterator`) so
// that we don't see a
// duplicate bound like `T: Iterator + Iterator<Item=u8>`
// on the docs page.
bounds.remove(&GenericBound::TraitBound(
PolyTrait {
trait_: *trait_.clone(),
generic_params: Vec::new(),
},
hir::TraitBoundModifier::None,
));
// Avoid creating any new duplicate bounds later in the outer
// loop
ty_to_traits
.entry(*ty.clone())
.or_insert_with(|| FxHashSet())
.insert(*trait_.clone());
}
_ => panic!("Unexpected trait {:?} for {:?}", trait_, did),
}
}
_ => panic!("Unexpected LHS {:?} for {:?}", lhs, did),
}
}
};
}
let final_bounds = self.make_final_bounds(ty_to_bounds, ty_to_fn, lifetime_to_bounds);
existing_predicates.extend(final_bounds);
for param in generic_params.iter_mut() {
match param.kind {
GenericParamDefKind::Type { ref mut default, ref mut bounds, .. } => {
// We never want something like `impl<T=Foo>`.
default.take();
let generic_ty = Type::Generic(param.name.clone());
if !has_sized.contains(&generic_ty) {
bounds.insert(0, GenericBound::maybe_sized(self.cx));
}
}
GenericParamDefKind::Lifetime => {}
}
}
self.sort_where_predicates(&mut existing_predicates);
Generics {
params: generic_params,
where_predicates: existing_predicates,
}
}
// Ensure that the predicates are in a consistent order. The precise
// ordering doesn't actually matter, but it's important that
// a given set of predicates always appears in the same order -
// both for visual consistency between 'rustdoc' runs, and to
// make writing tests much easier
#[inline]
fn sort_where_predicates(&self, mut predicates: &mut Vec<WherePredicate>) {
// We should never have identical bounds - and if we do,
// they're visually identical as well. Therefore, using
// an unstable sort is fine.
self.unstable_debug_sort(&mut predicates);
}
// Ensure that the bounds are in a consistent order. The precise
// ordering doesn't actually matter, but it's important that
// a given set of bounds always appears in the same order -
// both for visual consistency between 'rustdoc' runs, and to
// make writing tests much easier
#[inline]
fn sort_where_bounds(&self, mut bounds: &mut Vec<GenericBound>) {
// We should never have identical bounds - and if we do,
// they're visually identical as well. Therefore, using
// an unstable sort is fine.
self.unstable_debug_sort(&mut bounds);
}
// This might look horrendously hacky, but it's actually not that bad.
//
// For performance reasons, we use several different FxHashMaps
// in the process of computing the final set of where predicates.
// However, the iteration order of a HashMap is completely unspecified.
// In fact, the iteration of an FxHashMap can even vary between platforms,
// since FxHasher has different behavior for 32-bit and 64-bit platforms.
//
// Obviously, it's extremely undesireable for documentation rendering
// to be depndent on the platform it's run on. Apart from being confusing
// to end users, it makes writing tests much more difficult, as predicates
// can appear in any order in the final result.
//
// To solve this problem, we sort WherePredicates and GenericBounds
// by their Debug string. The thing to keep in mind is that we don't really
// care what the final order is - we're synthesizing an impl or bound
// ourselves, so any order can be considered equally valid. By sorting the
// predicates and bounds, however, we ensure that for a given codebase, all
// auto-trait impls always render in exactly the same way.
//
// Using the Debug impementation for sorting prevents us from needing to
// write quite a bit of almost entirely useless code (e.g. how should two
// Types be sorted relative to each other). It also allows us to solve the
// problem for both WherePredicates and GenericBounds at the same time. This
// approach is probably somewhat slower, but the small number of items
// involved (impls rarely have more than a few bounds) means that it
// shouldn't matter in practice.
fn unstable_debug_sort<T: Debug>(&self, vec: &mut Vec<T>) {
vec.sort_by_cached_key(|x| format!("{:?}", x))
}
fn is_fn_ty(&self, tcx: &TyCtxt, ty: &Type) -> bool {
match &ty {
&&Type::ResolvedPath { ref did, .. } => {
*did == tcx.require_lang_item(lang_items::FnTraitLangItem)
|| *did == tcx.require_lang_item(lang_items::FnMutTraitLangItem)
|| *did == tcx.require_lang_item(lang_items::FnOnceTraitLangItem)
}
_ => false,
}
}
// This is an ugly hack, but it's the simplest way to handle synthetic impls without greatly
// refactoring either librustdoc or librustc. In particular, allowing new DefIds to be
// registered after the AST is constructed would require storing the defid mapping in a
// RefCell, decreasing the performance for normal compilation for very little gain.
//
// Instead, we construct 'fake' def ids, which start immediately after the last DefId in
// DefIndexAddressSpace::Low. In the Debug impl for clean::Item, we explicitly check for fake
// def ids, as we'll end up with a panic if we use the DefId Debug impl for fake DefIds
fn next_def_id(&self, crate_num: CrateNum) -> DefId {
let start_def_id = {
let next_id = if crate_num == LOCAL_CRATE {
self.cx
.tcx
.hir
.definitions()
.def_path_table()
.next_id(DefIndexAddressSpace::Low)
} else {
self.cx
.cstore
.def_path_table(crate_num)
.next_id(DefIndexAddressSpace::Low)
};
DefId {
krate: crate_num,
index: next_id,
}
};
let mut fake_ids = self.cx.fake_def_ids.borrow_mut();
let def_id = fake_ids.entry(crate_num).or_insert(start_def_id).clone();
fake_ids.insert(
crate_num,
DefId {
krate: crate_num,
index: DefIndex::from_array_index(
def_id.index.as_array_index() + 1,
def_id.index.address_space(),
),
},
);
MAX_DEF_ID.with(|m| {
m.borrow_mut()
.entry(def_id.krate.clone())
.or_insert(start_def_id);
});
self.cx.all_fake_def_ids.borrow_mut().insert(def_id);
def_id.clone()
}
}
// Replaces all ReVars in a type with ty::Region's, using the provided map
struct RegionReplacer<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
vid_to_region: &'a FxHashMap<ty::RegionVid, ty::Region<'tcx>>,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> {
self.tcx
}
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
(match r {
&ty::ReVar(vid) => self.vid_to_region.get(&vid).cloned(),
_ => None,
}).unwrap_or_else(|| r.super_fold_with(self))
}
}
|
use super::Pass;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::main_body_opts;
use core::ops::Range;
use pulldown_cmark::{Event, Parser, Tag};
use std::iter::Peekable;
use std::str::CharIndices;
crate const CHECK_INVALID_HTML_TAGS: Pass = Pass {
name: "check-invalid-html-tags",
run: check_invalid_html_tags,
description: "detects invalid HTML tags in doc comments",
};
struct InvalidHtmlTagsLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_invalid_html_tags(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
if !cx.tcx.sess.is_nightly_build() {
krate
} else {
let mut coll = InvalidHtmlTagsLinter { cx };
coll.fold_crate(krate)
}
}
const ALLOWED_UNCLOSED: &[&str] = &[
"area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "param",
"source", "track", "wbr",
];
fn drop_tag(
tags: &mut Vec<(String, Range<usize>)>,
tag_name: String,
range: Range<usize>,
f: &impl Fn(&str, &Range<usize>),
) {
let tag_name_low = tag_name.to_lowercase();
if let Some(pos) = tags.iter().rposition(|(t, _)| t.to_lowercase() == tag_name_low) {
// If the tag is nested inside a "<script>" or a "<style>" tag, no warning should
// be emitted.
let should_not_warn = tags.iter().take(pos + 1).any(|(at, _)| {
let at = at.to_lowercase();
at == "script" || at == "style"
});
for (last_tag_name, last_tag_span) in tags.drain(pos + 1..) {
if should_not_warn {
continue;
}
let last_tag_name_low = last_tag_name.to_lowercase();
if ALLOWED_UNCLOSED.iter().any(|&at| at == last_tag_name_low) {
continue;
}
// `tags` is used as a queue, meaning that everything after `pos` is included inside it.
// So `<h2><h3></h2>` will look like `["h2", "h3"]`. So when closing `h2`, we will still
// have `h3`, meaning the tag wasn't closed as it should have.
f(&format!("unclosed HTML tag `{}`", last_tag_name), &last_tag_span);
}
// Remove the `tag_name` that was originally closed
tags.pop();
} else {
// It can happen for example in this case: `<h2></script></h2>` (the `h2` tag isn't required
// but it helps for the visualization).
f(&format!("unopened HTML tag `{}`", tag_name), &range);
}
}
fn extract_html_tag(
tags: &mut Vec<(String, Range<usize>)>,
text: &str,
range: &Range<usize>,
start_pos: usize,
iter: &mut Peekable<CharIndices<'_>>,
f: &impl Fn(&str, &Range<usize>),
) {
let mut tag_name = String::new();
let mut is_closing = false;
let mut prev_pos = start_pos;
loop {
let (pos, c) = match iter.peek() {
Some((pos, c)) => (*pos, *c),
// In case we reached the of the doc comment, we want to check that it's an
// unclosed HTML tag. For example "/// <h3".
None => (prev_pos, '\0'),
};
prev_pos = pos;
// Checking if this is a closing tag (like `</a>` for `<a>`).
if c == '/' && tag_name.is_empty() {
is_closing = true;
} else if c.is_ascii_alphanumeric() {
tag_name.push(c);
} else {
if !tag_name.is_empty() {
let mut r = Range { start: range.start + start_pos, end: range.start + pos };
if c == '>' {
// In case we have a tag without attribute, we can consider the span to
// refer to it fully.
r.end += 1;
}
if is_closing {
// In case we have "</div >" or even "</div >".
if c != '>' {
if !c.is_whitespace() {
// It seems like it's not a valid HTML tag.
break;
}
let mut found = false;
for (new_pos, c) in text[pos..].char_indices() {
if !c.is_whitespace() {
if c == '>' {
r.end = range.start + new_pos + 1;
found = true;
}
break;
}
}
if !found {
break;
}
}
drop_tag(tags, tag_name, r, f);
} else {
tags.push((tag_name, r));
}
}
break;
}
iter.next();
}
}
fn extract_tags(
tags: &mut Vec<(String, Range<usize>)>,
text: &str,
range: Range<usize>,
is_in_comment: &mut Option<Range<usize>>,
f: &impl Fn(&str, &Range<usize>),
) {
let mut iter = text.char_indices().peekable();
while let Some((start_pos, c)) = iter.next() {
if is_in_comment.is_some() {
if text[start_pos..].starts_with("-->") {
*is_in_comment = None;
}
} else if c == '<' {
if text[start_pos..].starts_with("<!--") {
// We skip the "!--" part. (Once `advance_by` is stable, might be nice to use it!)
iter.next();
iter.next();
iter.next();
*is_in_comment = Some(Range {
start: range.start + start_pos,
end: range.start + start_pos + 3,
});
} else {
extract_html_tag(tags, text, &range, start_pos, &mut iter, f);
}
}
}
}
impl<'a, 'tcx> DocFolder for InvalidHtmlTagsLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let tcx = self.cx.tcx;
let hir_id = match DocContext::as_local_hir_id(tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return Some(self.fold_item_recur(item));
}
};
let dox = item.attrs.collapsed_doc_value().unwrap_or_default();
if !dox.is_empty() {
let report_diag = |msg: &str, range: &Range<usize>| {
let sp = match super::source_span_for_markdown_range(tcx, &dox, range, &item.attrs)
{
Some(sp) => sp,
None => item.attr_span(tcx),
};
tcx.struct_span_lint_hir(crate::lint::INVALID_HTML_TAGS, hir_id, sp, |lint| {
lint.build(msg).emit()
});
};
let mut tags = Vec::new();
let mut is_in_comment = None;
let mut in_code_block = false;
let p = Parser::new_ext(&dox, main_body_opts()).into_offset_iter();
for (event, range) in p {
match event {
Event::Start(Tag::CodeBlock(_)) => in_code_block = true,
Event::Html(text) | Event::Text(text) if !in_code_block => {
extract_tags(&mut tags, &text, range, &mut is_in_comment, &report_diag)
}
Event::End(Tag::CodeBlock(_)) => in_code_block = false,
_ => {}
}
}
for (tag, range) in tags.iter().filter(|(t, _)| {
let t = t.to_lowercase();
ALLOWED_UNCLOSED.iter().find(|&&at| at == t).is_none()
}) {
report_diag(&format!("unclosed HTML tag `{}`", tag), range);
}
if let Some(range) = is_in_comment {
report_diag("Unclosed HTML comment", &range);
}
}
Some(self.fold_item_recur(item))
}
}
Rollup merge of #89444 - notriddle:notriddle/contains-str, r=jyn514
rustdoc: use slice::contains instead of open-coding it
use super::Pass;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::main_body_opts;
use core::ops::Range;
use pulldown_cmark::{Event, Parser, Tag};
use std::iter::Peekable;
use std::str::CharIndices;
crate const CHECK_INVALID_HTML_TAGS: Pass = Pass {
name: "check-invalid-html-tags",
run: check_invalid_html_tags,
description: "detects invalid HTML tags in doc comments",
};
struct InvalidHtmlTagsLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_invalid_html_tags(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
if !cx.tcx.sess.is_nightly_build() {
krate
} else {
let mut coll = InvalidHtmlTagsLinter { cx };
coll.fold_crate(krate)
}
}
const ALLOWED_UNCLOSED: &[&str] = &[
"area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "param",
"source", "track", "wbr",
];
fn drop_tag(
tags: &mut Vec<(String, Range<usize>)>,
tag_name: String,
range: Range<usize>,
f: &impl Fn(&str, &Range<usize>),
) {
let tag_name_low = tag_name.to_lowercase();
if let Some(pos) = tags.iter().rposition(|(t, _)| t.to_lowercase() == tag_name_low) {
// If the tag is nested inside a "<script>" or a "<style>" tag, no warning should
// be emitted.
let should_not_warn = tags.iter().take(pos + 1).any(|(at, _)| {
let at = at.to_lowercase();
at == "script" || at == "style"
});
for (last_tag_name, last_tag_span) in tags.drain(pos + 1..) {
if should_not_warn {
continue;
}
let last_tag_name_low = last_tag_name.to_lowercase();
if ALLOWED_UNCLOSED.contains(&last_tag_name_low.as_str()) {
continue;
}
// `tags` is used as a queue, meaning that everything after `pos` is included inside it.
// So `<h2><h3></h2>` will look like `["h2", "h3"]`. So when closing `h2`, we will still
// have `h3`, meaning the tag wasn't closed as it should have.
f(&format!("unclosed HTML tag `{}`", last_tag_name), &last_tag_span);
}
// Remove the `tag_name` that was originally closed
tags.pop();
} else {
// It can happen for example in this case: `<h2></script></h2>` (the `h2` tag isn't required
// but it helps for the visualization).
f(&format!("unopened HTML tag `{}`", tag_name), &range);
}
}
fn extract_html_tag(
tags: &mut Vec<(String, Range<usize>)>,
text: &str,
range: &Range<usize>,
start_pos: usize,
iter: &mut Peekable<CharIndices<'_>>,
f: &impl Fn(&str, &Range<usize>),
) {
let mut tag_name = String::new();
let mut is_closing = false;
let mut prev_pos = start_pos;
loop {
let (pos, c) = match iter.peek() {
Some((pos, c)) => (*pos, *c),
// In case we reached the of the doc comment, we want to check that it's an
// unclosed HTML tag. For example "/// <h3".
None => (prev_pos, '\0'),
};
prev_pos = pos;
// Checking if this is a closing tag (like `</a>` for `<a>`).
if c == '/' && tag_name.is_empty() {
is_closing = true;
} else if c.is_ascii_alphanumeric() {
tag_name.push(c);
} else {
if !tag_name.is_empty() {
let mut r = Range { start: range.start + start_pos, end: range.start + pos };
if c == '>' {
// In case we have a tag without attribute, we can consider the span to
// refer to it fully.
r.end += 1;
}
if is_closing {
// In case we have "</div >" or even "</div >".
if c != '>' {
if !c.is_whitespace() {
// It seems like it's not a valid HTML tag.
break;
}
let mut found = false;
for (new_pos, c) in text[pos..].char_indices() {
if !c.is_whitespace() {
if c == '>' {
r.end = range.start + new_pos + 1;
found = true;
}
break;
}
}
if !found {
break;
}
}
drop_tag(tags, tag_name, r, f);
} else {
tags.push((tag_name, r));
}
}
break;
}
iter.next();
}
}
fn extract_tags(
tags: &mut Vec<(String, Range<usize>)>,
text: &str,
range: Range<usize>,
is_in_comment: &mut Option<Range<usize>>,
f: &impl Fn(&str, &Range<usize>),
) {
let mut iter = text.char_indices().peekable();
while let Some((start_pos, c)) = iter.next() {
if is_in_comment.is_some() {
if text[start_pos..].starts_with("-->") {
*is_in_comment = None;
}
} else if c == '<' {
if text[start_pos..].starts_with("<!--") {
// We skip the "!--" part. (Once `advance_by` is stable, might be nice to use it!)
iter.next();
iter.next();
iter.next();
*is_in_comment = Some(Range {
start: range.start + start_pos,
end: range.start + start_pos + 3,
});
} else {
extract_html_tag(tags, text, &range, start_pos, &mut iter, f);
}
}
}
}
impl<'a, 'tcx> DocFolder for InvalidHtmlTagsLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let tcx = self.cx.tcx;
let hir_id = match DocContext::as_local_hir_id(tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return Some(self.fold_item_recur(item));
}
};
let dox = item.attrs.collapsed_doc_value().unwrap_or_default();
if !dox.is_empty() {
let report_diag = |msg: &str, range: &Range<usize>| {
let sp = match super::source_span_for_markdown_range(tcx, &dox, range, &item.attrs)
{
Some(sp) => sp,
None => item.attr_span(tcx),
};
tcx.struct_span_lint_hir(crate::lint::INVALID_HTML_TAGS, hir_id, sp, |lint| {
lint.build(msg).emit()
});
};
let mut tags = Vec::new();
let mut is_in_comment = None;
let mut in_code_block = false;
let p = Parser::new_ext(&dox, main_body_opts()).into_offset_iter();
for (event, range) in p {
match event {
Event::Start(Tag::CodeBlock(_)) => in_code_block = true,
Event::Html(text) | Event::Text(text) if !in_code_block => {
extract_tags(&mut tags, &text, range, &mut is_in_comment, &report_diag)
}
Event::End(Tag::CodeBlock(_)) => in_code_block = false,
_ => {}
}
}
for (tag, range) in tags.iter().filter(|(t, _)| {
let t = t.to_lowercase();
!ALLOWED_UNCLOSED.contains(&t.as_str())
}) {
report_diag(&format!("unclosed HTML tag `{}`", tag), range);
}
if let Some(range) = is_in_comment {
report_diag("Unclosed HTML comment", &range);
}
}
Some(self.fold_item_recur(item))
}
}
|
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use self::Entry::*;
use self::VacantEntryState::*;
use collections::CollectionAllocErr;
use cell::Cell;
use borrow::Borrow;
use cmp::max;
use fmt::{self, Debug};
#[allow(deprecated)]
use hash::{Hash, Hasher, BuildHasher, SipHasher13};
use iter::{FromIterator, FusedIterator};
use mem::{self, replace};
use ops::{Deref, DerefMut, Index};
use sys;
use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable,
SafeHash};
use super::table::BucketState::{Empty, Full};
use super::table::Fallibility::{Fallible, Infallible};
const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two
/// The default behavior of HashMap implements a maximum load factor of 90.9%.
#[derive(Clone)]
struct DefaultResizePolicy;
impl DefaultResizePolicy {
#[inline]
fn new() -> DefaultResizePolicy {
DefaultResizePolicy
}
/// A hash map's "capacity" is the number of elements it can hold without
/// being resized. Its "raw capacity" is the number of slots required to
/// provide that capacity, accounting for maximum loading. The raw capacity
/// is always zero or a power of two.
#[inline]
fn try_raw_capacity(&self, len: usize) -> Result<usize, CollectionAllocErr> {
if len == 0 {
Ok(0)
} else {
// 1. Account for loading: `raw_capacity >= len * 1.1`.
// 2. Ensure it is a power of two.
// 3. Ensure it is at least the minimum size.
let mut raw_cap = len.checked_mul(11)
.map(|l| l / 10)
.and_then(|l| l.checked_next_power_of_two())
.ok_or(CollectionAllocErr::CapacityOverflow)?;
raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap);
Ok(raw_cap)
}
}
#[inline]
fn raw_capacity(&self, len: usize) -> usize {
self.try_raw_capacity(len).expect("raw_capacity overflow")
}
/// The capacity of the given raw capacity.
#[inline]
fn capacity(&self, raw_cap: usize) -> usize {
// This doesn't have to be checked for overflow since allocation size
// in bytes will overflow earlier than multiplication by 10.
//
// As per https://github.com/rust-lang/rust/pull/30991 this is updated
// to be: (raw_cap * den + den - 1) / num
(raw_cap * 10 + 10 - 1) / 11
}
}
// The main performance trick in this hashmap is called Robin Hood Hashing.
// It gains its excellent performance from one essential operation:
//
// If an insertion collides with an existing element, and that element's
// "probe distance" (how far away the element is from its ideal location)
// is higher than how far we've already probed, swap the elements.
//
// This massively lowers variance in probe distance, and allows us to get very
// high load factors with good performance. The 90% load factor I use is rather
// conservative.
//
// > Why a load factor of approximately 90%?
//
// In general, all the distances to initial buckets will converge on the mean.
// At a load factor of α, the odds of finding the target bucket after k
// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
// this down to make the math easier on the CPU and avoid its FPU.
// Since on average we start the probing in the middle of a cache line, this
// strategy pulls in two cache lines of hashes on every lookup. I think that's
// pretty good, but if you want to trade off some space, it could go down to one
// cache line on average with an α of 0.84.
//
// > Wait, what? Where did you get 1-α^k from?
//
// On the first probe, your odds of a collision with an existing element is α.
// The odds of doing this twice in a row is approximately α^2. For three times,
// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
// colliding after k tries is 1-α^k.
//
// The paper from 1986 cited below mentions an implementation which keeps track
// of the distance-to-initial-bucket histogram. This approach is not suitable
// for modern architectures because it requires maintaining an internal data
// structure. This allows very good first guesses, but we are most concerned
// with guessing entire cache lines, not individual indexes. Furthermore, array
// accesses are no longer linear and in one direction, as we have now. There
// is also memory and cache pressure that this would entail that would be very
// difficult to properly see in a microbenchmark.
//
// ## Future Improvements (FIXME!)
//
// Allow the load factor to be changed dynamically and/or at initialization.
//
// Also, would it be possible for us to reuse storage when growing the
// underlying table? This is exactly the use case for 'realloc', and may
// be worth exploring.
//
// ## Future Optimizations (FIXME!)
//
// Another possible design choice that I made without any real reason is
// parameterizing the raw table over keys and values. Technically, all we need
// is the size and alignment of keys and values, and the code should be just as
// efficient (well, we might need one for power-of-two size and one for not...).
// This has the potential to reduce code bloat in rust executables, without
// really losing anything except 4 words (key size, key alignment, val size,
// val alignment) which can be passed in to every call of a `RawTable` function.
// This would definitely be an avenue worth exploring if people start complaining
// about the size of rust executables.
//
// Annotate exceedingly likely branches in `table::make_hash`
// and `search_hashed` to reduce instruction cache pressure
// and mispredictions once it becomes possible (blocked on issue #11092).
//
// Shrinking the table could simply reallocate in place after moving buckets
// to the first half.
//
// The growth algorithm (fragment of the Proof of Correctness)
// --------------------
//
// The growth algorithm is basically a fast path of the naive reinsertion-
// during-resize algorithm. Other paths should never be taken.
//
// Consider growing a robin hood hashtable of capacity n. Normally, we do this
// by allocating a new table of capacity `2n`, and then individually reinsert
// each element in the old table into the new one. This guarantees that the
// new table is a valid robin hood hashtable with all the desired statistical
// properties. Remark that the order we reinsert the elements in should not
// matter. For simplicity and efficiency, we will consider only linear
// reinsertions, which consist of reinserting all elements in the old table
// into the new one by increasing order of index. However we will not be
// starting our reinsertions from index 0 in general. If we start from index
// i, for the purpose of reinsertion we will consider all elements with real
// index j < i to have virtual index n + j.
//
// Our hash generation scheme consists of generating a 64-bit hash and
// truncating the most significant bits. When moving to the new table, we
// simply introduce a new bit to the front of the hash. Therefore, if an
// element has ideal index i in the old table, it can have one of two ideal
// locations in the new table. If the new bit is 0, then the new ideal index
// is i. If the new bit is 1, then the new ideal index is n + i. Intuitively,
// we are producing two independent tables of size n, and for each element we
// independently choose which table to insert it into with equal probability.
// However, rather than wrapping around themselves on overflowing their
// indexes, the first table overflows into the second, and the second into the
// first. Visually, our new table will look something like:
//
// [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy]
//
// Where x's are elements inserted into the first table, y's are elements
// inserted into the second, and _'s are empty sections. We now define a few
// key concepts that we will use later. Note that this is a very abstract
// perspective of the table. A real resized table would be at least half
// empty.
//
// Theorem: A linear robin hood reinsertion from the first ideal element
// produces identical results to a linear naive reinsertion from the same
// element.
//
// FIXME(Gankro, pczarn): review the proof and put it all in a separate README.md
//
// Adaptive early resizing
// ----------------------
// To protect against degenerate performance scenarios (including DOS attacks),
// the implementation includes an adaptive behavior that can resize the map
// early (before its capacity is exceeded) when suspiciously long probe sequences
// are encountered.
//
// With this algorithm in place it would be possible to turn a CPU attack into
// a memory attack due to the aggressive resizing. To prevent that the
// adaptive behavior only triggers when the map is at least half full.
// This reduces the effectiveness of the algorithm but also makes it completely safe.
//
// The previous safety measure also prevents degenerate interactions with
// really bad quality hash algorithms that can make normal inputs look like a
// DOS attack.
//
const DISPLACEMENT_THRESHOLD: usize = 128;
//
// The threshold of 128 is chosen to minimize the chance of exceeding it.
// In particular, we want that chance to be less than 10^-8 with a load of 90%.
// For displacement, the smallest constant that fits our needs is 90,
// so we round that up to 128.
//
// At a load factor of α, the odds of finding the target bucket after exactly n
// unsuccessful probes[1] are
//
// Pr_α{displacement = n} =
// (1 - α) / α * ∑_{k≥1} e^(-kα) * (kα)^(k+n) / (k + n)! * (1 - kα / (k + n + 1))
//
// We use this formula to find the probability of triggering the adaptive behavior
//
// Pr_0.909{displacement > 128} = 1.601 * 10^-11
//
// 1. Alfredo Viola (2005). Distributional analysis of Robin Hood linear probing
// hashing with buckets.
/// A hash map implemented with linear probing and Robin Hood bucket stealing.
///
/// By default, `HashMap` uses a hashing algorithm selected to provide
/// resistance against HashDoS attacks. The algorithm is randomly seeded, and a
/// reasonable best-effort is made to generate this seed from a high quality,
/// secure source of randomness provided by the host without blocking the
/// program. Because of this, the randomness of the seed depends on the output
/// quality of the system's random number generator when the seed is created.
/// In particular, seeds generated when the system's entropy pool is abnormally
/// low such as during system boot may be of a lower quality.
///
/// The default hashing algorithm is currently SipHash 1-3, though this is
/// subject to change at any point in the future. While its performance is very
/// competitive for medium sized keys, other hashing algorithms will outperform
/// it for small keys such as integers as well as large keys such as long
/// strings, though those algorithms will typically *not* protect against
/// attacks such as HashDoS.
///
/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many
/// alternative algorithms are available on crates.io, such as the [`fnv`] crate.
///
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
/// If you implement these yourself, it is important that the following
/// property holds:
///
/// ```text
/// k1 == k2 -> hash(k1) == hash(k2)
/// ```
///
/// In other words, if two keys are equal, their hashes must be equal.
///
/// It is a logic error for a key to be modified in such a way that the key's
/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
/// the [`Eq`] trait, changes while it is in the map. This is normally only
/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
///
/// Relevant papers/articles:
///
/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
/// 2. Emmanuel Goossaert. ["Robin Hood
/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// // Type inference lets us omit an explicit type signature (which
/// // would be `HashMap<String, String>` in this example).
/// let mut book_reviews = HashMap::new();
///
/// // Review some books.
/// book_reviews.insert(
/// "Adventures of Huckleberry Finn".to_string(),
/// "My favorite book.".to_string(),
/// );
/// book_reviews.insert(
/// "Grimms' Fairy Tales".to_string(),
/// "Masterpiece.".to_string(),
/// );
/// book_reviews.insert(
/// "Pride and Prejudice".to_string(),
/// "Very enjoyable.".to_string(),
/// );
/// book_reviews.insert(
/// "The Adventures of Sherlock Holmes".to_string(),
/// "Eye lyked it alot.".to_string(),
/// );
///
/// // Check for a specific one.
/// // When collections store owned values (String), they can still be
/// // queried using references (&str).
/// if !book_reviews.contains_key("Les Misérables") {
/// println!("We've got {} reviews, but Les Misérables ain't one.",
/// book_reviews.len());
/// }
///
/// // oops, this review has a lot of spelling mistakes, let's delete it.
/// book_reviews.remove("The Adventures of Sherlock Holmes");
///
/// // Look up the values associated with some keys.
/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
/// for &book in &to_find {
/// match book_reviews.get(book) {
/// Some(review) => println!("{}: {}", book, review),
/// None => println!("{} is unreviewed.", book)
/// }
/// }
///
/// // Iterate over everything.
/// for (book, review) in &book_reviews {
/// println!("{}: \"{}\"", book, review);
/// }
/// ```
///
/// `HashMap` also implements an [`Entry API`](#method.entry), which allows
/// for more complex methods of getting, setting, updating and removing keys and
/// their values:
///
/// ```
/// use std::collections::HashMap;
///
/// // type inference lets us omit an explicit type signature (which
/// // would be `HashMap<&str, u8>` in this example).
/// let mut player_stats = HashMap::new();
///
/// fn random_stat_buff() -> u8 {
/// // could actually return some random value here - let's just return
/// // some fixed value for now
/// 42
/// }
///
/// // insert a key only if it doesn't already exist
/// player_stats.entry("health").or_insert(100);
///
/// // insert a key using a function that provides a new value only if it
/// // doesn't already exist
/// player_stats.entry("defence").or_insert_with(random_stat_buff);
///
/// // update a key, guarding against the key possibly not being set
/// let stat = player_stats.entry("attack").or_insert(100);
/// *stat += random_stat_buff();
/// ```
///
/// The easiest way to use `HashMap` with a custom type as key is to derive [`Eq`] and [`Hash`].
/// We must also derive [`PartialEq`].
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
/// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html
/// [`RefCell`]: ../../std/cell/struct.RefCell.html
/// [`Cell`]: ../../std/cell/struct.Cell.html
/// [`default`]: #method.default
/// [`with_hasher`]: #method.with_hasher
/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher
/// [`fnv`]: https://crates.io/crates/fnv
///
/// ```
/// use std::collections::HashMap;
///
/// #[derive(Hash, Eq, PartialEq, Debug)]
/// struct Viking {
/// name: String,
/// country: String,
/// }
///
/// impl Viking {
/// /// Create a new Viking.
/// fn new(name: &str, country: &str) -> Viking {
/// Viking { name: name.to_string(), country: country.to_string() }
/// }
/// }
///
/// // Use a HashMap to store the vikings' health points.
/// let mut vikings = HashMap::new();
///
/// vikings.insert(Viking::new("Einar", "Norway"), 25);
/// vikings.insert(Viking::new("Olaf", "Denmark"), 24);
/// vikings.insert(Viking::new("Harald", "Iceland"), 12);
///
/// // Use derived implementation to print the status of the vikings.
/// for (viking, health) in &vikings {
/// println!("{:?} has {} hp", viking, health);
/// }
/// ```
///
/// A `HashMap` with fixed list of elements can be initialized from an array:
///
/// ```
/// use std::collections::HashMap;
///
/// fn main() {
/// let timber_resources: HashMap<&str, i32> =
/// [("Norway", 100),
/// ("Denmark", 50),
/// ("Iceland", 10)]
/// .iter().cloned().collect();
/// // use the values stored in map
/// }
/// ```
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct HashMap<K, V, S = RandomState> {
// All hashes are keyed on these values, to prevent hash collision attacks.
hash_builder: S,
table: RawTable<K, V>,
resize_policy: DefaultResizePolicy,
}
/// Search for a pre-hashed key.
/// If you don't already know the hash, use search or search_mut instead
#[inline]
fn search_hashed<K, V, M, F>(table: M, hash: SafeHash, is_match: F) -> InternalEntry<K, V, M>
where M: Deref<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
{
// This is the only function where capacity can be zero. To avoid
// undefined behavior when Bucket::new gets the raw bucket in this
// case, immediately return the appropriate search result.
if table.capacity() == 0 {
return InternalEntry::TableIsEmpty;
}
search_hashed_nonempty(table, hash, is_match, true)
}
/// Search for a pre-hashed key when the hash map is known to be non-empty.
#[inline]
fn search_hashed_nonempty<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F,
compare_hashes: bool)
-> InternalEntry<K, V, M>
where M: Deref<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
{
// Do not check the capacity as an extra branch could slow the lookup.
let size = table.size();
let mut probe = Bucket::new(table, hash);
let mut displacement = 0;
loop {
let full = match probe.peek() {
Empty(bucket) => {
// Found a hole!
return InternalEntry::Vacant {
hash,
elem: NoElem(bucket, displacement),
};
}
Full(bucket) => bucket,
};
let probe_displacement = full.displacement();
if probe_displacement < displacement {
// Found a luckier bucket than me.
// We can finish the search early if we hit any bucket
// with a lower distance to initial bucket than we've probed.
return InternalEntry::Vacant {
hash,
elem: NeqElem(full, probe_displacement),
};
}
// If the hash doesn't match, it can't be this one..
if !compare_hashes || hash == full.hash() {
// If the key doesn't match, it can't be this one..
if is_match(full.read().0) {
return InternalEntry::Occupied { elem: full };
}
}
displacement += 1;
probe = full.next();
debug_assert!(displacement <= size);
}
}
/// Same as `search_hashed_nonempty` but for mutable access.
#[inline]
fn search_hashed_nonempty_mut<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F,
compare_hashes: bool)
-> InternalEntry<K, V, M>
where M: DerefMut<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
{
// Do not check the capacity as an extra branch could slow the lookup.
let size = table.size();
let mut probe = Bucket::new(table, hash);
let mut displacement = 0;
loop {
let mut full = match probe.peek() {
Empty(bucket) => {
// Found a hole!
return InternalEntry::Vacant {
hash,
elem: NoElem(bucket, displacement),
};
}
Full(bucket) => bucket,
};
let probe_displacement = full.displacement();
if probe_displacement < displacement {
// Found a luckier bucket than me.
// We can finish the search early if we hit any bucket
// with a lower distance to initial bucket than we've probed.
return InternalEntry::Vacant {
hash,
elem: NeqElem(full, probe_displacement),
};
}
// If the hash doesn't match, it can't be this one..
if hash == full.hash() || !compare_hashes {
// If the key doesn't match, it can't be this one..
if is_match(full.read_mut().0) {
return InternalEntry::Occupied { elem: full };
}
}
displacement += 1;
probe = full.next();
debug_assert!(displacement <= size);
}
}
fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>)
-> (K, V, &mut RawTable<K, V>)
{
let (empty, retkey, retval) = starting_bucket.take();
let mut gap = match empty.gap_peek() {
Ok(b) => b,
Err(b) => return (retkey, retval, b.into_table()),
};
while gap.full().displacement() != 0 {
gap = match gap.shift() {
Ok(b) => b,
Err(b) => {
return (retkey, retval, b.into_table());
},
};
}
// Now we've done all our shifting. Return the value we grabbed earlier.
(retkey, retval, gap.into_table())
}
/// Perform robin hood bucket stealing at the given `bucket`. You must
/// also pass that bucket's displacement so we don't have to recalculate it.
///
/// `hash`, `key`, and `val` are the elements to "robin hood" into the hashtable.
fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>,
mut displacement: usize,
mut hash: SafeHash,
mut key: K,
mut val: V)
-> FullBucketMut<'a, K, V> {
let size = bucket.table().size();
let raw_capacity = bucket.table().capacity();
// There can be at most `size - dib` buckets to displace, because
// in the worst case, there are `size` elements and we already are
// `displacement` buckets away from the initial one.
let idx_end = (bucket.index() + size - bucket.displacement()) % raw_capacity;
// Save the *starting point*.
let mut bucket = bucket.stash();
loop {
let (old_hash, old_key, old_val) = bucket.replace(hash, key, val);
hash = old_hash;
key = old_key;
val = old_val;
loop {
displacement += 1;
let probe = bucket.next();
debug_assert!(probe.index() != idx_end);
let full_bucket = match probe.peek() {
Empty(bucket) => {
// Found a hole!
let bucket = bucket.put(hash, key, val);
// Now that it's stolen, just read the value's pointer
// right out of the table! Go back to the *starting point*.
//
// This use of `into_table` is misleading. It turns the
// bucket, which is a FullBucket on top of a
// FullBucketMut, into just one FullBucketMut. The "table"
// refers to the inner FullBucketMut in this context.
return bucket.into_table();
}
Full(bucket) => bucket,
};
let probe_displacement = full_bucket.displacement();
bucket = full_bucket;
// Robin hood! Steal the spot.
if probe_displacement < displacement {
displacement = probe_displacement;
break;
}
}
}
}
impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
fn make_hash<X: ?Sized>(&self, x: &X) -> SafeHash
where X: Hash
{
table::make_hash(&self.hash_builder, x)
}
/// Search for a key, yielding the index if it's found in the hashtable.
/// If you already have the hash for the key lying around, or if you need an
/// InternalEntry, use search_hashed or search_hashed_nonempty.
#[inline]
fn search<'a, Q: ?Sized>(&'a self, q: &Q)
-> Option<FullBucket<K, V, &'a RawTable<K, V>>>
where K: Borrow<Q>,
Q: Eq + Hash
{
if self.is_empty() {
return None;
}
let hash = self.make_hash(q);
search_hashed_nonempty(&self.table, hash, |k| q.eq(k.borrow()), true)
.into_occupied_bucket()
}
#[inline]
fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q)
-> Option<FullBucket<K, V, &'a mut RawTable<K, V>>>
where K: Borrow<Q>,
Q: Eq + Hash
{
if self.is_empty() {
return None;
}
let hash = self.make_hash(q);
search_hashed_nonempty(&mut self.table, hash, |k| q.eq(k.borrow()), true)
.into_occupied_bucket()
}
// The caller should ensure that invariants by Robin Hood Hashing hold
// and that there's space in the underlying table.
fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
let mut buckets = Bucket::new(&mut self.table, hash);
let start_index = buckets.index();
loop {
// We don't need to compare hashes for value swap.
// Not even DIBs for Robin Hood.
buckets = match buckets.peek() {
Empty(empty) => {
empty.put(hash, k, v);
return;
}
Full(b) => b.into_bucket(),
};
buckets.next();
debug_assert!(buckets.index() != start_index);
}
}
}
impl<K: Hash + Eq, V> HashMap<K, V, RandomState> {
/// Creates an empty `HashMap`.
///
/// The hash map is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, i32> = HashMap::new();
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> HashMap<K, V, RandomState> {
Default::default()
}
/// Creates an empty `HashMap` with the specified capacity.
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> HashMap<K, V, RandomState> {
HashMap::with_capacity_and_hasher(capacity, Default::default())
}
}
impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
/// Creates an empty `HashMap` which will use the given hash builder to hash
/// keys.
///
/// The created map has the default initial capacity.
///
/// Warning: `hash_builder` is normally randomly generated, and
/// is designed to allow HashMaps to be resistant to attacks that
/// cause many collisions and very poor performance. Setting it
/// manually using this function can expose a DoS attack vector.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
/// map.insert(1, 2);
/// ```
#[inline]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn with_hasher(hash_builder: S) -> HashMap<K, V, S> {
HashMap {
hash_builder,
resize_policy: DefaultResizePolicy::new(),
table: RawTable::new(0),
}
}
/// Creates an empty `HashMap` with the specified capacity, using `hash_builder`
/// to hash the keys.
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
/// Warning: `hash_builder` is normally randomly generated, and
/// is designed to allow HashMaps to be resistant to attacks that
/// cause many collisions and very poor performance. Setting it
/// manually using this function can expose a DoS attack vector.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_capacity_and_hasher(10, s);
/// map.insert(1, 2);
/// ```
#[inline]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap<K, V, S> {
let resize_policy = DefaultResizePolicy::new();
let raw_cap = resize_policy.raw_capacity(capacity);
HashMap {
hash_builder,
resize_policy,
table: RawTable::new(raw_cap),
}
}
/// Returns a reference to the map's [`BuildHasher`].
///
/// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let hasher = RandomState::new();
/// let map: HashMap<i32, i32> = HashMap::with_hasher(hasher);
/// let hasher: &RandomState = map.hasher();
/// ```
#[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
pub fn hasher(&self) -> &S {
&self.hash_builder
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the `HashMap<K, V>` might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// assert!(map.capacity() >= 100);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.resize_policy.capacity(self.raw_capacity())
}
/// Returns the hash map's raw capacity.
#[inline]
fn raw_capacity(&self) -> usize {
self.table.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the `HashMap`. The collection may reserve more space to avoid
/// frequent reallocations.
///
/// # Panics
///
/// Panics if the new allocation size overflows [`usize`].
///
/// [`usize`]: ../../std/primitive.usize.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, i32> = HashMap::new();
/// map.reserve(10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
match self.reserve_internal(additional, Infallible) {
Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
Err(CollectionAllocErr::AllocErr) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the given `HashMap<K,V>`. The collection may reserve more space to avoid
/// frequent reallocations.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, isize> = HashMap::new();
/// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
self.reserve_internal(additional, Fallible)
}
#[inline]
fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility)
-> Result<(), CollectionAllocErr> {
let remaining = self.capacity() - self.len(); // this can't overflow
if remaining < additional {
let min_cap = self.len()
.checked_add(additional)
.ok_or(CollectionAllocErr::CapacityOverflow)?;
let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?;
self.try_resize(raw_cap, fallibility)?;
} else if self.table.tag() && remaining <= self.len() {
// Probe sequence is too long and table is half full,
// resize early to reduce probing length.
let new_capacity = self.table.capacity() * 2;
self.try_resize(new_capacity, fallibility)?;
}
Ok(())
}
/// Resizes the internal vectors to a new capacity. It's your
/// responsibility to:
/// 1) Ensure `new_raw_cap` is enough for all the elements, accounting
/// for the load factor.
/// 2) Ensure `new_raw_cap` is a power of two or zero.
#[inline(never)]
#[cold]
fn try_resize(
&mut self,
new_raw_cap: usize,
fallibility: Fallibility,
) -> Result<(), CollectionAllocErr> {
assert!(self.table.size() <= new_raw_cap);
assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
let mut old_table = replace(
&mut self.table,
match fallibility {
Infallible => RawTable::new(new_raw_cap),
Fallible => RawTable::try_new(new_raw_cap)?,
}
);
let old_size = old_table.size();
if old_table.size() == 0 {
return Ok(());
}
let mut bucket = Bucket::head_bucket(&mut old_table);
// This is how the buckets might be laid out in memory:
// ($ marks an initialized bucket)
// ________________
// |$$$_$$$$$$_$$$$$|
//
// But we've skipped the entire initial cluster of buckets
// and will continue iteration in this order:
// ________________
// |$$$$$$_$$$$$
// ^ wrap around once end is reached
// ________________
// $$$_____________|
// ^ exit once table.size == 0
loop {
bucket = match bucket.peek() {
Full(bucket) => {
let h = bucket.hash();
let (b, k, v) = bucket.take();
self.insert_hashed_ordered(h, k, v);
if b.table().size() == 0 {
break;
}
b.into_bucket()
}
Empty(b) => b.into_bucket(),
};
bucket.next();
}
assert_eq!(self.table.size(), old_size);
Ok(())
}
/// Shrinks the capacity of the map as much as possible. It will drop
/// down as much as possible while maintaining the internal rules
/// and possibly leaving some space in accordance with the resize policy.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// map.insert(1, 2);
/// map.insert(3, 4);
/// assert!(map.capacity() >= 100);
/// map.shrink_to_fit();
/// assert!(map.capacity() >= 2);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
let new_raw_cap = self.resize_policy.raw_capacity(self.len());
if self.raw_capacity() != new_raw_cap {
let old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
let old_size = old_table.size();
// Shrink the table. Naive algorithm for resizing:
for (h, k, v) in old_table.into_iter() {
self.insert_hashed_nocheck(h, k, v);
}
debug_assert_eq!(self.table.size(), old_size);
}
}
/// Shrinks the capacity of the map with a lower limit. It will drop
/// down no lower than the supplied limit while maintaining the internal rules
/// and possibly leaving some space in accordance with the resize policy.
///
/// Panics if the current capacity is smaller than the supplied
/// minimum capacity.
///
/// # Examples
///
/// ```
/// #![feature(shrink_to)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// map.insert(1, 2);
/// map.insert(3, 4);
/// assert!(map.capacity() >= 100);
/// map.shrink_to(10);
/// assert!(map.capacity() >= 10);
/// map.shrink_to(0);
/// assert!(map.capacity() >= 2);
/// ```
#[unstable(feature = "shrink_to", reason = "new API", issue="56431")]
pub fn shrink_to(&mut self, min_capacity: usize) {
assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity");
let new_raw_cap = self.resize_policy.raw_capacity(max(self.len(), min_capacity));
if self.raw_capacity() != new_raw_cap {
let old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
let old_size = old_table.size();
// Shrink the table. Naive algorithm for resizing:
for (h, k, v) in old_table.into_iter() {
self.insert_hashed_nocheck(h, k, v);
}
debug_assert_eq!(self.table.size(), old_size);
}
}
/// Insert a pre-hashed key-value pair, without first checking
/// that there's enough room in the buckets. Returns a reference to the
/// newly insert value.
///
/// If the key already exists, the hashtable will be returned untouched
/// and a reference to the existing element will be returned.
fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> Option<V> {
let entry = search_hashed(&mut self.table, hash, |key| *key == k).into_entry(k);
match entry {
Some(Occupied(mut elem)) => Some(elem.insert(v)),
Some(Vacant(elem)) => {
elem.insert(v);
None
}
None => unreachable!(),
}
}
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for key in map.keys() {
/// println!("{}", key);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn keys(&self) -> Keys<K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn values(&self) -> Values<K, V> {
Values { inner: self.iter() }
}
/// An iterator visiting all values mutably in arbitrary order.
/// The iterator element type is `&'a mut V`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
///
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for val in map.values_mut() {
/// *val = *val + 10;
/// }
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub fn values_mut(&mut self) -> ValuesMut<K, V> {
ValuesMut { inner: self.iter_mut() }
}
/// An iterator visiting all key-value pairs in arbitrary order.
/// The iterator element type is `(&'a K, &'a V)`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for (key, val) in map.iter() {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<K, V> {
Iter { inner: self.table.iter() }
}
/// An iterator visiting all key-value pairs in arbitrary order,
/// with mutable references to the values.
/// The iterator element type is `(&'a K, &'a mut V)`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// for (key, val) in &map {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut { inner: self.table.iter_mut() }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut letters = HashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn entry(&mut self, key: K) -> Entry<K, V> {
// Gotta resize now.
self.reserve(1);
let hash = self.make_hash(&key);
search_hashed(&mut self.table, hash, |q| q.eq(&key))
.into_entry(key).expect("unreachable")
}
/// Returns the number of elements in the map.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a");
/// assert_eq!(a.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
self.table.size()
}
/// Returns true if the map contains no elements.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a");
/// assert!(!a.is_empty());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Clears the map, returning all key-value pairs as an iterator. Keeps the
/// allocated memory for reuse.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
///
/// for (k, v) in a.drain().take(1) {
/// assert!(k == 1 || k == 2);
/// assert!(v == "a" || v == "b");
/// }
///
/// assert!(a.is_empty());
/// ```
#[inline]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain(&mut self) -> Drain<K, V> {
Drain { inner: self.table.drain() }
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory
/// for reuse.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// a.insert(1, "a");
/// a.clear();
/// assert!(a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn clear(&mut self) {
self.drain();
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search(k).map(|bucket| bucket.into_refs().1)
}
/// Returns the key-value pair corresponding to the supplied key.
///
/// The supplied key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// #![feature(map_get_key_value)]
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.get_key_value(&1), Some((&1, &"a")));
/// assert_eq!(map.get_key_value(&2), None);
/// ```
#[unstable(feature = "map_get_key_value", issue = "49347")]
pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search(k).map(|bucket| bucket.into_refs())
}
/// Returns true if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search(k).is_some()
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search_mut(k).map(|bucket| bucket.into_mut_refs().1)
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, [`None`] is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical. See the [module-level
/// documentation] for more.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [module-level documentation]: index.html#insert-and-complex-keys
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
let hash = self.make_hash(&k);
self.reserve(1);
self.insert_hashed_nocheck(hash, k, v)
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search_mut(k).map(|bucket| pop_internal(bucket).1)
}
/// Removes a key from the map, returning the stored key and value if the
/// key was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// # fn main() {
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove_entry(&1), Some((1, "a")));
/// assert_eq!(map.remove(&1), None);
/// # }
/// ```
#[stable(feature = "hash_map_remove_entry", since = "1.27.0")]
pub fn remove_entry<Q: ?Sized>(&mut self, k: &Q) -> Option<(K, V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search_mut(k)
.map(|bucket| {
let (k, v, _) = pop_internal(bucket);
(k, v)
})
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns `false`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x|(x, x*10)).collect();
/// map.retain(|&k, _| k % 2 == 0);
/// assert_eq!(map.len(), 4);
/// ```
#[stable(feature = "retain_hash_collection", since = "1.18.0")]
pub fn retain<F>(&mut self, mut f: F)
where F: FnMut(&K, &mut V) -> bool
{
if self.table.size() == 0 {
return;
}
let mut elems_left = self.table.size();
let mut bucket = Bucket::head_bucket(&mut self.table);
bucket.prev();
let start_index = bucket.index();
while elems_left != 0 {
bucket = match bucket.peek() {
Full(mut full) => {
elems_left -= 1;
let should_remove = {
let (k, v) = full.read_mut();
!f(k, v)
};
if should_remove {
let prev_raw = full.raw();
let (_, _, t) = pop_internal(full);
Bucket::new_from(prev_raw, t)
} else {
full.into_bucket()
}
},
Empty(b) => {
b.into_bucket()
}
};
bucket.prev(); // reverse iteration
debug_assert!(elems_left == 0 || bucket.index() != start_index);
}
}
}
impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline(always)]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<K, V, S> {
self.reserve(1);
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn raw_entry(&self) -> RawEntryBuilder<K, V, S> {
RawEntryBuilder { map: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> PartialEq for HashMap<K, V, S>
where K: Eq + Hash,
V: PartialEq,
S: BuildHasher
{
fn eq(&self, other: &HashMap<K, V, S>) -> bool {
if self.len() != other.len() {
return false;
}
self.iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Eq for HashMap<K, V, S>
where K: Eq + Hash,
V: Eq,
S: BuildHasher
{
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Debug for HashMap<K, V, S>
where K: Eq + Hash + Debug,
V: Debug,
S: BuildHasher
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Default for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher + Default
{
/// Creates an empty `HashMap<K, V, S>`, with the `Default` value for the hasher.
fn default() -> HashMap<K, V, S> {
HashMap::with_hasher(Default::default())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, Q: ?Sized, V, S> Index<&'a Q> for HashMap<K, V, S>
where K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
S: BuildHasher
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the `HashMap`.
#[inline]
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}
/// An iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`iter`]: struct.HashMap.html#method.iter
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
inner: table::Iter<'a, K, V>,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Iter<'a, K, V> {
Iter { inner: self.inner.clone() }
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V: Debug> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.clone())
.finish()
}
}
/// A mutable iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.HashMap.html#method.iter_mut
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, K: 'a, V: 'a> {
inner: table::IterMut<'a, K, V>,
}
/// An owning iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`into_iter`] method on [`HashMap`][`HashMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.HashMap.html#method.into_iter
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K, V> {
pub(super) inner: table::IntoIter<K, V>,
}
/// An iterator over the keys of a `HashMap`.
///
/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`keys`]: struct.HashMap.html#method.keys
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Keys<'a, K, V> {
Keys { inner: self.inner.clone() }
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V> fmt::Debug for Keys<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.clone())
.finish()
}
}
/// An iterator over the values of a `HashMap`.
///
/// This `struct` is created by the [`values`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`values`]: struct.HashMap.html#method.values
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Values<'a, K, V> {
Values { inner: self.inner.clone() }
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V: Debug> fmt::Debug for Values<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.clone())
.finish()
}
}
/// A draining iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`drain`]: struct.HashMap.html#method.drain
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, K: 'a, V: 'a> {
pub(super) inner: table::Drain<'a, K, V>,
}
/// A mutable iterator over the values of a `HashMap`.
///
/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`values_mut`]: struct.HashMap.html#method.values_mut
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub struct ValuesMut<'a, K: 'a, V: 'a> {
inner: IterMut<'a, K, V>,
}
enum InternalEntry<K, V, M> {
Occupied { elem: FullBucket<K, V, M> },
Vacant {
hash: SafeHash,
elem: VacantEntryState<K, V, M>,
},
TableIsEmpty,
}
impl<K, V, M> InternalEntry<K, V, M> {
#[inline]
fn into_occupied_bucket(self) -> Option<FullBucket<K, V, M>> {
match self {
InternalEntry::Occupied { elem } => Some(elem),
_ => None,
}
}
}
impl<'a, K, V> InternalEntry<K, V, &'a mut RawTable<K, V>> {
#[inline]
fn into_entry(self, key: K) -> Option<Entry<'a, K, V>> {
match self {
InternalEntry::Occupied { elem } => {
Some(Occupied(OccupiedEntry {
key: Some(key),
elem,
}))
}
InternalEntry::Vacant { hash, elem } => {
Some(Vacant(VacantEntry {
hash,
key,
elem,
}))
}
InternalEntry::TableIsEmpty => None,
}
}
}
/// A builder for computing where in a HashMap a key-value pair would be stored.
///
/// See the [`HashMap::raw_entry_mut`] docs for usage examples.
///
/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawEntryBuilderMut<'a, K: 'a, V: 'a, S: 'a> {
map: &'a mut HashMap<K, V, S>,
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This is a lower-level version of [`Entry`].
///
/// This `enum` is constructed from the [`raw_entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`Entry`]: enum.Entry.html
/// [`raw_entry`]: struct.HashMap.html#method.raw_entry
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub enum RawEntryMut<'a, K: 'a, V: 'a, S: 'a> {
/// An occupied entry.
Occupied(RawOccupiedEntryMut<'a, K, V>),
/// A vacant entry.
Vacant(RawVacantEntryMut<'a, K, V, S>),
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`RawEntryMut`] enum.
///
/// [`RawEntryMut`]: enum.RawEntryMut.html
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawOccupiedEntryMut<'a, K: 'a, V: 'a> {
elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`RawEntryMut`] enum.
///
/// [`RawEntryMut`]: enum.RawEntryMut.html
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawVacantEntryMut<'a, K: 'a, V: 'a, S: 'a> {
elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
hash_builder: &'a S,
}
/// A builder for computing where in a HashMap a key-value pair would be stored.
///
/// See the [`HashMap::raw_entry`] docs for usage examples.
///
/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawEntryBuilder<'a, K: 'a, V: 'a, S: 'a> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S>
where S: BuildHasher,
K: Eq + Hash,
{
/// Create a `RawEntryMut` from the given key.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key<Q: ?Sized>(self, k: &Q) -> RawEntryMut<'a, K, V, S>
where K: Borrow<Q>,
Q: Hash + Eq
{
let mut hasher = self.map.hash_builder.build_hasher();
k.hash(&mut hasher);
self.from_key_hashed_nocheck(hasher.finish(), k)
}
/// Create a `RawEntryMut` from the given key and its hash.
#[inline]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S>
where K: Borrow<Q>,
Q: Eq
{
self.from_hash(hash, |q| q.borrow().eq(k))
}
#[inline]
fn search<F>(self, hash: u64, is_match: F, compare_hashes: bool) -> RawEntryMut<'a, K, V, S>
where for<'b> F: FnMut(&'b K) -> bool,
{
match search_hashed_nonempty_mut(&mut self.map.table,
SafeHash::new(hash),
is_match,
compare_hashes) {
InternalEntry::Occupied { elem } => {
RawEntryMut::Occupied(RawOccupiedEntryMut { elem })
}
InternalEntry::Vacant { elem, .. } => {
RawEntryMut::Vacant(RawVacantEntryMut {
elem,
hash_builder: &self.map.hash_builder,
})
}
InternalEntry::TableIsEmpty => {
unreachable!()
}
}
}
/// Create a `RawEntryMut` from the given hash.
#[inline]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_hash<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
where for<'b> F: FnMut(&'b K) -> bool,
{
self.search(hash, is_match, true)
}
/// Search possible locations for an element with hash `hash` until `is_match` returns true for
/// one of them. There is no guarantee that all keys passed to `is_match` will have the provided
/// hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn search_bucket<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
where for<'b> F: FnMut(&'b K) -> bool,
{
self.search(hash, is_match, false)
}
}
impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S>
where S: BuildHasher,
{
/// Access an entry by key.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key<Q: ?Sized>(self, k: &Q) -> Option<(&'a K, &'a V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
let mut hasher = self.map.hash_builder.build_hasher();
k.hash(&mut hasher);
self.from_key_hashed_nocheck(hasher.finish(), k)
}
/// Access an entry by a key and its hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.from_hash(hash, |q| q.borrow().eq(k))
}
fn search<F>(self, hash: u64, is_match: F, compare_hashes: bool) -> Option<(&'a K, &'a V)>
where F: FnMut(&K) -> bool
{
match search_hashed_nonempty(&self.map.table,
SafeHash::new(hash),
is_match,
compare_hashes) {
InternalEntry::Occupied { elem } => Some(elem.into_refs()),
InternalEntry::Vacant { .. } => None,
InternalEntry::TableIsEmpty => unreachable!(),
}
}
/// Access an entry by hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_hash<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
where F: FnMut(&K) -> bool
{
self.search(hash, is_match, true)
}
/// Search possible locations for an element with hash `hash` until `is_match` returns true for
/// one of them. There is no guarantee that all keys passed to `is_match` will have the provided
/// hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn search_bucket<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
where F: FnMut(&K) -> bool
{
self.search(hash, is_match, false)
}
}
impl<'a, K, V, S> RawEntryMut<'a, K, V, S> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// mutable references to the key and value in the entry.
///
/// # Examples
///
/// ```
/// #![feature(hash_raw_entry)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V)
where K: Hash,
S: BuildHasher,
{
match self {
RawEntryMut::Occupied(entry) => entry.into_key_value(),
RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns mutable references to the key and value in the entry.
///
/// # Examples
///
/// ```
/// #![feature(hash_raw_entry)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
///
/// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| {
/// ("poneyland", "hoho".to_string())
/// });
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn or_insert_with<F>(self, default: F) -> (&'a mut K, &'a mut V)
where F: FnOnce() -> (K, V),
K: Hash,
S: BuildHasher,
{
match self {
RawEntryMut::Occupied(entry) => entry.into_key_value(),
RawEntryMut::Vacant(entry) => {
let (k, v) = default();
entry.insert(k, v)
}
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// #![feature(hash_raw_entry)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.raw_entry_mut()
/// .from_key("poneyland")
/// .and_modify(|_k, v| { *v += 1 })
/// .or_insert("poneyland", 42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.raw_entry_mut()
/// .from_key("poneyland")
/// .and_modify(|_k, v| { *v += 1 })
/// .or_insert("poneyland", 0);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn and_modify<F>(self, f: F) -> Self
where F: FnOnce(&mut K, &mut V)
{
match self {
RawEntryMut::Occupied(mut entry) => {
{
let (k, v) = entry.get_key_value_mut();
f(k, v);
}
RawEntryMut::Occupied(entry)
},
RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry),
}
}
}
impl<'a, K, V> RawOccupiedEntryMut<'a, K, V> {
/// Gets a reference to the key in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn key(&self) -> &K {
self.elem.read().0
}
/// Gets a mutable reference to the key in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn key_mut(&mut self) -> &mut K {
self.elem.read_mut().0
}
/// Converts the entry into a mutable reference to the key in the entry
/// with a lifetime bound to the map itself.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn into_key(self) -> &'a mut K {
self.elem.into_mut_refs().0
}
/// Gets a reference to the value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get(&self) -> &V {
self.elem.read().1
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn into_mut(self) -> &'a mut V {
self.elem.into_mut_refs().1
}
/// Gets a mutable reference to the value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get_mut(&mut self) -> &mut V {
self.elem.read_mut().1
}
/// Gets a reference to the key and value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get_key_value(&mut self) -> (&K, &V) {
self.elem.read()
}
/// Gets a mutable reference to the key and value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) {
self.elem.read_mut()
}
/// Converts the OccupiedEntry into a mutable reference to the key and value in the entry
/// with a lifetime bound to the map itself.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn into_key_value(self) -> (&'a mut K, &'a mut V) {
self.elem.into_mut_refs()
}
/// Sets the value of the entry, and returns the entry's old value.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert(&mut self, value: V) -> V {
mem::replace(self.get_mut(), value)
}
/// Sets the value of the entry, and returns the entry's old value.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert_key(&mut self, key: K) -> K {
mem::replace(self.key_mut(), key)
}
/// Takes the value out of the entry, and returns it.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn remove(self) -> V {
pop_internal(self.elem).1
}
/// Take the ownership of the key and value from the map.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn remove_entry(self) -> (K, V) {
let (k, v, _) = pop_internal(self.elem);
(k, v)
}
}
impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> {
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V)
where K: Hash,
S: BuildHasher,
{
let mut hasher = self.hash_builder.build_hasher();
key.hash(&mut hasher);
self.insert_hashed_nocheck(hasher.finish(), key, value)
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
#[inline]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) {
let hash = SafeHash::new(hash);
let b = match self.elem {
NeqElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
robin_hood(bucket, disp, hash, key, value)
},
NoElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
bucket.put(hash, key, value)
},
};
b.into_mut_refs()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K, V, S> Debug for RawEntryBuilderMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawEntryBuilder")
.finish()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K: Debug, V: Debug, S> Debug for RawEntryMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
RawEntryMut::Vacant(ref v) => {
f.debug_tuple("RawEntry")
.field(v)
.finish()
}
RawEntryMut::Occupied(ref o) => {
f.debug_tuple("RawEntry")
.field(o)
.finish()
}
}
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K: Debug, V: Debug> Debug for RawOccupiedEntryMut<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawOccupiedEntryMut")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K, V, S> Debug for RawVacantEntryMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawVacantEntryMut")
.finish()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K, V, S> Debug for RawEntryBuilder<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawEntryBuilder")
.finish()
}
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`entry`]: struct.HashMap.html#method.entry
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Entry<'a, K: 'a, V: 'a> {
/// An occupied entry.
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(#[stable(feature = "rust1", since = "1.0.0")]
OccupiedEntry<'a, K, V>),
/// A vacant entry.
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(#[stable(feature = "rust1", since = "1.0.0")]
VacantEntry<'a, K, V>),
}
#[stable(feature= "debug_hash_map", since = "1.12.0")]
impl<'a, K: 'a + Debug, V: 'a + Debug> Debug for Entry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Vacant(ref v) => {
f.debug_tuple("Entry")
.field(v)
.finish()
}
Occupied(ref o) => {
f.debug_tuple("Entry")
.field(o)
.finish()
}
}
}
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
key: Option<K>,
elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
}
#[stable(feature= "debug_hash_map", since = "1.12.0")]
impl<'a, K: 'a + Debug, V: 'a + Debug> Debug for OccupiedEntry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<'a, K: 'a, V: 'a> {
hash: SafeHash,
key: K,
elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
}
#[stable(feature= "debug_hash_map", since = "1.12.0")]
impl<'a, K: 'a + Debug, V: 'a> Debug for VacantEntry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VacantEntry")
.field(self.key())
.finish()
}
}
/// Possible states of a VacantEntry.
enum VacantEntryState<K, V, M> {
/// The index is occupied, but the key to insert has precedence,
/// and will kick the current one out on insertion.
NeqElem(FullBucket<K, V, M>, usize),
/// The index is genuinely vacant.
NoElem(EmptyBucket<K, V, M>, usize),
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V, S> IntoIterator for &'a HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V, S> IntoIterator for &'a mut HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(self) -> IterMut<'a, K, V> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> IntoIterator for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
/// Creates a consuming iterator, that is, one that moves each key-value
/// pair out of the map in arbitrary order. The map cannot be used after
/// calling this.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Not possible with .iter()
/// let vec: Vec<(&str, i32)> = map.into_iter().collect();
/// ```
fn into_iter(self) -> IntoIter<K, V> {
IntoIter { inner: self.table.into_iter() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
#[inline]
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Iter<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[inline]
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for IterMut<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter())
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
self.inner.next().map(|(_, k, v)| (k, v))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<K, V> FusedIterator for IntoIter<K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<K: Debug, V: Debug> fmt::Debug for IntoIter<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter())
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<(&'a K)> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Keys<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<(&'a V)> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Values<'a, K, V> {}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
type Item = &'a mut V;
#[inline]
fn next(&mut self) -> Option<(&'a mut V)> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for ValuesMut<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.inner.iter())
.finish()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, K, V> Iterator for Drain<'a, K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
self.inner.next().map(|(_, k, v)| (k, v))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Drain<'a, K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for Drain<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter())
.finish()
}
}
impl<'a, K, V> Entry<'a, K, V> {
#[stable(feature = "rust1", since = "1.0.0")]
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.entry("poneyland").or_insert(3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.entry("poneyland").or_insert(10) *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
#[stable(feature = "rust1", since = "1.0.0")]
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
/// let s = "hoho".to_string();
///
/// map.entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[stable(feature = "entry_and_modify", since = "1.26.0")]
pub fn and_modify<F>(self, f: F) -> Self
where F: FnOnce(&mut V)
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
},
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K, V: Default> Entry<'a, K, V> {
#[stable(feature = "entry_or_default", since = "1.28.0")]
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// # fn main() {
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
/// map.entry("poneyland").or_default();
///
/// assert_eq!(map["poneyland"], None);
/// # }
/// ```
pub fn or_default(self) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K, V> OccupiedEntry<'a, K, V> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
self.elem.read().0
}
/// Take the ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn remove_entry(self) -> (K, V) {
let (k, v, _) = pop_internal(self.elem);
(k, v)
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self) -> &V {
self.elem.read().1
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `OccupiedEntry` which may outlive the
/// destruction of the `Entry` value, see [`into_mut`].
///
/// [`into_mut`]: #method.into_mut
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same Entry multiple times.
/// *o.get_mut() += 2;
/// }
///
/// assert_eq!(map["poneyland"], 24);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut V {
self.elem.read_mut().1
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
///
/// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// *o.into_mut() += 10;
/// }
///
/// assert_eq!(map["poneyland"], 22);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_mut(self) -> &'a mut V {
self.elem.into_mut_refs().1
}
/// Sets the value of the entry, and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
///
/// assert_eq!(map["poneyland"], 15);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, mut value: V) -> V {
let old_value = self.get_mut();
mem::swap(&mut value, old_value);
value
}
/// Takes the value out of the entry, and returns it.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(self) -> V {
pop_internal(self.elem).1
}
/// Returns a key that was used for search.
///
/// The key was retained for further use.
fn take_key(&mut self) -> Option<K> {
self.key.take()
}
/// Replaces the entry, returning the old key and value. The new key in the hash map will be
/// the key used to create this entry.
///
/// # Examples
///
/// ```
/// #![feature(map_entry_replace)]
/// use std::collections::hash_map::{Entry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// map.insert(Rc::new("Stringthing".to_string()), 15);
///
/// let my_key = Rc::new("Stringthing".to_string());
///
/// if let Entry::Occupied(entry) = map.entry(my_key) {
/// // Also replace the key with a handle to our other key.
/// let (old_key, old_value): (Rc<String>, u32) = entry.replace_entry(16);
/// }
///
/// ```
#[unstable(feature = "map_entry_replace", issue = "44286")]
pub fn replace_entry(mut self, value: V) -> (K, V) {
let (old_key, old_value) = self.elem.read_mut();
let old_key = mem::replace(old_key, self.key.unwrap());
let old_value = mem::replace(old_value, value);
(old_key, old_value)
}
/// Replaces the key in the hash map with the key used to create this entry.
///
/// # Examples
///
/// ```
/// #![feature(map_entry_replace)]
/// use std::collections::hash_map::{Entry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// let mut known_strings: Vec<Rc<String>> = Vec::new();
///
/// // Initialise known strings, run program, etc.
///
/// reclaim_memory(&mut map, &known_strings);
///
/// fn reclaim_memory(map: &mut HashMap<Rc<String>, u32>, known_strings: &[Rc<String>] ) {
/// for s in known_strings {
/// if let Entry::Occupied(entry) = map.entry(s.clone()) {
/// // Replaces the entry's key with our version of it in `known_strings`.
/// entry.replace_key();
/// }
/// }
/// }
/// ```
#[unstable(feature = "map_entry_replace", issue = "44286")]
pub fn replace_key(mut self) -> K {
let (old_key, _) = self.elem.read_mut();
mem::replace(old_key, self.key.unwrap())
}
}
impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `VacantEntry`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let Entry::Vacant(v) = map.entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let Entry::Vacant(o) = map.entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(self, value: V) -> &'a mut V {
let b = match self.elem {
NeqElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
robin_hood(bucket, disp, self.hash, self.key, value)
},
NoElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
bucket.put(self.hash, self.key, value)
},
};
b.into_mut_refs().1
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> FromIterator<(K, V)> for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher + Default
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> HashMap<K, V, S> {
let mut map = HashMap::with_hasher(Default::default());
map.extend(iter);
map
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Extend<(K, V)> for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
// Keys may be already present or show multiple times in the iterator.
// Reserve the entire hint lower bound if the map is empty.
// Otherwise reserve half the hint (rounded up), so the map
// will only resize twice in the worst case.
let iter = iter.into_iter();
let reserve = if self.is_empty() {
iter.size_hint().0
} else {
(iter.size_hint().0 + 1) / 2
};
self.reserve(reserve);
for (k, v) in iter {
self.insert(k, v);
}
}
}
#[stable(feature = "hash_extend_copy", since = "1.4.0")]
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for HashMap<K, V, S>
where K: Eq + Hash + Copy,
V: Copy,
S: BuildHasher
{
fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
}
}
/// `RandomState` is the default state for [`HashMap`] types.
///
/// A particular instance `RandomState` will create the same instances of
/// [`Hasher`], but the hashers created by two different `RandomState`
/// instances are unlikely to produce the same result for the same values.
///
/// [`HashMap`]: struct.HashMap.html
/// [`Hasher`]: ../../hash/trait.Hasher.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
/// map.insert(1, 2);
/// ```
#[derive(Clone)]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub struct RandomState {
k0: u64,
k1: u64,
}
impl RandomState {
/// Constructs a new `RandomState` that is initialized with random keys.
///
/// # Examples
///
/// ```
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// ```
#[inline]
#[allow(deprecated)]
// rand
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn new() -> RandomState {
// Historically this function did not cache keys from the OS and instead
// simply always called `rand::thread_rng().gen()` twice. In #31356 it
// was discovered, however, that because we re-seed the thread-local RNG
// from the OS periodically that this can cause excessive slowdown when
// many hash maps are created on a thread. To solve this performance
// trap we cache the first set of randomly generated keys per-thread.
//
// Later in #36481 it was discovered that exposing a deterministic
// iteration order allows a form of DOS attack. To counter that we
// increment one of the seeds on every RandomState creation, giving
// every corresponding HashMap a different iteration order.
thread_local!(static KEYS: Cell<(u64, u64)> = {
Cell::new(sys::hashmap_random_keys())
});
KEYS.with(|keys| {
let (k0, k1) = keys.get();
keys.set((k0.wrapping_add(1), k1));
RandomState { k0: k0, k1: k1 }
})
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl BuildHasher for RandomState {
type Hasher = DefaultHasher;
#[inline]
#[allow(deprecated)]
fn build_hasher(&self) -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
}
}
/// The default [`Hasher`] used by [`RandomState`].
///
/// The internal algorithm is not specified, and so it and its hashes should
/// not be relied upon over releases.
///
/// [`RandomState`]: struct.RandomState.html
/// [`Hasher`]: ../../hash/trait.Hasher.html
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[allow(deprecated)]
#[derive(Clone, Debug)]
pub struct DefaultHasher(SipHasher13);
impl DefaultHasher {
/// Creates a new `DefaultHasher`.
///
/// This hasher is not guaranteed to be the same as all other
/// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
/// instances created through `new` or `default`.
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[allow(deprecated)]
pub fn new() -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(0, 0))
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Default for DefaultHasher {
/// Creates a new `DefaultHasher` using [`new`][DefaultHasher::new].
/// See its documentation for more.
fn default() -> DefaultHasher {
DefaultHasher::new()
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Hasher for DefaultHasher {
#[inline]
fn write(&mut self, msg: &[u8]) {
self.0.write(msg)
}
#[inline]
fn finish(&self) -> u64 {
self.0.finish()
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl Default for RandomState {
/// Constructs a new `RandomState`.
#[inline]
fn default() -> RandomState {
RandomState::new()
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for RandomState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("RandomState { .. }")
}
}
impl<K, S, Q: ?Sized> super::Recover<Q> for HashMap<K, (), S>
where K: Eq + Hash + Borrow<Q>,
S: BuildHasher,
Q: Eq + Hash
{
type Key = K;
#[inline]
fn get(&self, key: &Q) -> Option<&K> {
self.search(key).map(|bucket| bucket.into_refs().0)
}
fn take(&mut self, key: &Q) -> Option<K> {
self.search_mut(key).map(|bucket| pop_internal(bucket).0)
}
#[inline]
fn replace(&mut self, key: K) -> Option<K> {
self.reserve(1);
match self.entry(key) {
Occupied(mut occupied) => {
let key = occupied.take_key().unwrap();
Some(mem::replace(occupied.elem.read_mut().0, key))
}
Vacant(vacant) => {
vacant.insert(());
None
}
}
}
}
#[allow(dead_code)]
fn assert_covariance() {
fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> {
v
}
fn map_val<'new>(v: HashMap<u8, &'static str>) -> HashMap<u8, &'new str> {
v
}
fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> {
v
}
fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> {
v
}
fn into_iter_key<'new>(v: IntoIter<&'static str, u8>) -> IntoIter<&'new str, u8> {
v
}
fn into_iter_val<'new>(v: IntoIter<u8, &'static str>) -> IntoIter<u8, &'new str> {
v
}
fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> {
v
}
fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> {
v
}
fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> {
v
}
fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> {
v
}
fn drain<'new>(d: Drain<'static, &'static str, &'static str>)
-> Drain<'new, &'new str, &'new str> {
d
}
}
#[cfg(test)]
mod test_map {
use super::HashMap;
use super::Entry::{Occupied, Vacant};
use super::RandomState;
use cell::RefCell;
use rand::{thread_rng, Rng};
use realstd::collections::CollectionAllocErr::*;
use realstd::mem::size_of;
use realstd::usize;
#[test]
fn test_zero_capacities() {
type HM = HashMap<i32, i32>;
let m = HM::new();
assert_eq!(m.capacity(), 0);
let m = HM::default();
assert_eq!(m.capacity(), 0);
let m = HM::with_hasher(RandomState::new());
assert_eq!(m.capacity(), 0);
let m = HM::with_capacity(0);
assert_eq!(m.capacity(), 0);
let m = HM::with_capacity_and_hasher(0, RandomState::new());
assert_eq!(m.capacity(), 0);
let mut m = HM::new();
m.insert(1, 1);
m.insert(2, 2);
m.remove(&1);
m.remove(&2);
m.shrink_to_fit();
assert_eq!(m.capacity(), 0);
let mut m = HM::new();
m.reserve(0);
assert_eq!(m.capacity(), 0);
}
#[test]
fn test_create_capacity_zero() {
let mut m = HashMap::with_capacity(0);
assert!(m.insert(1, 1).is_none());
assert!(m.contains_key(&1));
assert!(!m.contains_key(&0));
}
#[test]
fn test_insert() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert!(m.insert(1, 2).is_none());
assert_eq!(m.len(), 1);
assert!(m.insert(2, 4).is_none());
assert_eq!(m.len(), 2);
assert_eq!(*m.get(&1).unwrap(), 2);
assert_eq!(*m.get(&2).unwrap(), 4);
}
#[test]
fn test_clone() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert!(m.insert(1, 2).is_none());
assert_eq!(m.len(), 1);
assert!(m.insert(2, 4).is_none());
assert_eq!(m.len(), 2);
let m2 = m.clone();
assert_eq!(*m2.get(&1).unwrap(), 2);
assert_eq!(*m2.get(&2).unwrap(), 4);
assert_eq!(m2.len(), 2);
}
thread_local! { static DROP_VECTOR: RefCell<Vec<i32>> = RefCell::new(Vec::new()) }
#[derive(Hash, PartialEq, Eq)]
struct Droppable {
k: usize,
}
impl Droppable {
fn new(k: usize) -> Droppable {
DROP_VECTOR.with(|slot| {
slot.borrow_mut()[k] += 1;
});
Droppable { k }
}
}
impl Drop for Droppable {
fn drop(&mut self) {
DROP_VECTOR.with(|slot| {
slot.borrow_mut()[self.k] -= 1;
});
}
}
impl Clone for Droppable {
fn clone(&self) -> Droppable {
Droppable::new(self.k)
}
}
#[test]
fn test_drops() {
DROP_VECTOR.with(|slot| {
*slot.borrow_mut() = vec![0; 200];
});
{
let mut m = HashMap::new();
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
for i in 0..100 {
let d1 = Droppable::new(i);
let d2 = Droppable::new(i + 100);
m.insert(d1, d2);
}
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 1);
}
});
for i in 0..50 {
let k = Droppable::new(i);
let v = m.remove(&k);
assert!(v.is_some());
DROP_VECTOR.with(|v| {
assert_eq!(v.borrow()[i], 1);
assert_eq!(v.borrow()[i+100], 1);
});
}
DROP_VECTOR.with(|v| {
for i in 0..50 {
assert_eq!(v.borrow()[i], 0);
assert_eq!(v.borrow()[i+100], 0);
}
for i in 50..100 {
assert_eq!(v.borrow()[i], 1);
assert_eq!(v.borrow()[i+100], 1);
}
});
}
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
}
#[test]
fn test_into_iter_drops() {
DROP_VECTOR.with(|v| {
*v.borrow_mut() = vec![0; 200];
});
let hm = {
let mut hm = HashMap::new();
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
for i in 0..100 {
let d1 = Droppable::new(i);
let d2 = Droppable::new(i + 100);
hm.insert(d1, d2);
}
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 1);
}
});
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
{
let mut half = hm.into_iter().take(50);
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 1);
}
});
for _ in half.by_ref() {}
DROP_VECTOR.with(|v| {
let nk = (0..100)
.filter(|&i| v.borrow()[i] == 1)
.count();
let nv = (0..100)
.filter(|&i| v.borrow()[i + 100] == 1)
.count();
assert_eq!(nk, 50);
assert_eq!(nv, 50);
});
};
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
}
#[test]
fn test_empty_remove() {
let mut m: HashMap<i32, bool> = HashMap::new();
assert_eq!(m.remove(&0), None);
}
#[test]
fn test_empty_entry() {
let mut m: HashMap<i32, bool> = HashMap::new();
match m.entry(0) {
Occupied(_) => panic!(),
Vacant(_) => {}
}
assert!(*m.entry(0).or_insert(true));
assert_eq!(m.len(), 1);
}
#[test]
fn test_empty_iter() {
let mut m: HashMap<i32, bool> = HashMap::new();
assert_eq!(m.drain().next(), None);
assert_eq!(m.keys().next(), None);
assert_eq!(m.values().next(), None);
assert_eq!(m.values_mut().next(), None);
assert_eq!(m.iter().next(), None);
assert_eq!(m.iter_mut().next(), None);
assert_eq!(m.len(), 0);
assert!(m.is_empty());
assert_eq!(m.into_iter().next(), None);
}
#[test]
fn test_lots_of_insertions() {
let mut m = HashMap::new();
// Try this a few times to make sure we never screw up the hashmap's
// internal state.
for _ in 0..10 {
assert!(m.is_empty());
for i in 1..1001 {
assert!(m.insert(i, i).is_none());
for j in 1..=i {
let r = m.get(&j);
assert_eq!(r, Some(&j));
}
for j in i + 1..1001 {
let r = m.get(&j);
assert_eq!(r, None);
}
}
for i in 1001..2001 {
assert!(!m.contains_key(&i));
}
// remove forwards
for i in 1..1001 {
assert!(m.remove(&i).is_some());
for j in 1..=i {
assert!(!m.contains_key(&j));
}
for j in i + 1..1001 {
assert!(m.contains_key(&j));
}
}
for i in 1..1001 {
assert!(!m.contains_key(&i));
}
for i in 1..1001 {
assert!(m.insert(i, i).is_none());
}
// remove backwards
for i in (1..1001).rev() {
assert!(m.remove(&i).is_some());
for j in i..1001 {
assert!(!m.contains_key(&j));
}
for j in 1..i {
assert!(m.contains_key(&j));
}
}
}
}
#[test]
fn test_find_mut() {
let mut m = HashMap::new();
assert!(m.insert(1, 12).is_none());
assert!(m.insert(2, 8).is_none());
assert!(m.insert(5, 14).is_none());
let new = 100;
match m.get_mut(&5) {
None => panic!(),
Some(x) => *x = new,
}
assert_eq!(m.get(&5), Some(&new));
}
#[test]
fn test_insert_overwrite() {
let mut m = HashMap::new();
assert!(m.insert(1, 2).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert!(!m.insert(1, 3).is_none());
assert_eq!(*m.get(&1).unwrap(), 3);
}
#[test]
fn test_insert_conflicts() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
assert!(m.insert(5, 3).is_none());
assert!(m.insert(9, 4).is_none());
assert_eq!(*m.get(&9).unwrap(), 4);
assert_eq!(*m.get(&5).unwrap(), 3);
assert_eq!(*m.get(&1).unwrap(), 2);
}
#[test]
fn test_conflict_remove() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert!(m.insert(5, 3).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert_eq!(*m.get(&5).unwrap(), 3);
assert!(m.insert(9, 4).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert_eq!(*m.get(&5).unwrap(), 3);
assert_eq!(*m.get(&9).unwrap(), 4);
assert!(m.remove(&1).is_some());
assert_eq!(*m.get(&9).unwrap(), 4);
assert_eq!(*m.get(&5).unwrap(), 3);
}
#[test]
fn test_is_empty() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
assert!(!m.is_empty());
assert!(m.remove(&1).is_some());
assert!(m.is_empty());
}
#[test]
fn test_remove() {
let mut m = HashMap::new();
m.insert(1, 2);
assert_eq!(m.remove(&1), Some(2));
assert_eq!(m.remove(&1), None);
}
#[test]
fn test_remove_entry() {
let mut m = HashMap::new();
m.insert(1, 2);
assert_eq!(m.remove_entry(&1), Some((1, 2)));
assert_eq!(m.remove(&1), None);
}
#[test]
fn test_iterate() {
let mut m = HashMap::with_capacity(4);
for i in 0..32 {
assert!(m.insert(i, i*2).is_none());
}
assert_eq!(m.len(), 32);
let mut observed: u32 = 0;
for (k, v) in &m {
assert_eq!(*v, *k * 2);
observed |= 1 << *k;
}
assert_eq!(observed, 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_iter().collect();
let keys: Vec<_> = map.keys().cloned().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_iter().collect();
let values: Vec<_> = map.values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
#[test]
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_iter().collect();
for value in map.values_mut() {
*value = (*value) * 2
}
let values: Vec<_> = map.values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_find() {
let mut m = HashMap::new();
assert!(m.get(&1).is_none());
m.insert(1, 2);
match m.get(&1) {
None => panic!(),
Some(v) => assert_eq!(*v, 2),
}
}
#[test]
fn test_eq() {
let mut m1 = HashMap::new();
m1.insert(1, 2);
m1.insert(2, 3);
m1.insert(3, 4);
let mut m2 = HashMap::new();
m2.insert(1, 2);
m2.insert(2, 3);
assert!(m1 != m2);
m2.insert(3, 4);
assert_eq!(m1, m2);
}
#[test]
fn test_show() {
let mut map = HashMap::new();
let empty: HashMap<i32, i32> = HashMap::new();
map.insert(1, 2);
map.insert(3, 4);
let map_str = format!("{:?}", map);
assert!(map_str == "{1: 2, 3: 4}" ||
map_str == "{3: 4, 1: 2}");
assert_eq!(format!("{:?}", empty), "{}");
}
#[test]
fn test_expand() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert!(m.is_empty());
let mut i = 0;
let old_raw_cap = m.raw_capacity();
while old_raw_cap == m.raw_capacity() {
m.insert(i, i);
i += 1;
}
assert_eq!(m.len(), i);
assert!(!m.is_empty());
}
#[test]
fn test_behavior_resize_policy() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert_eq!(m.raw_capacity(), 0);
assert!(m.is_empty());
m.insert(0, 0);
m.remove(&0);
assert!(m.is_empty());
let initial_raw_cap = m.raw_capacity();
m.reserve(initial_raw_cap);
let raw_cap = m.raw_capacity();
assert_eq!(raw_cap, initial_raw_cap * 2);
let mut i = 0;
for _ in 0..raw_cap * 3 / 4 {
m.insert(i, i);
i += 1;
}
// three quarters full
assert_eq!(m.len(), i);
assert_eq!(m.raw_capacity(), raw_cap);
for _ in 0..raw_cap / 4 {
m.insert(i, i);
i += 1;
}
// half full
let new_raw_cap = m.raw_capacity();
assert_eq!(new_raw_cap, raw_cap * 2);
for _ in 0..raw_cap / 2 - 1 {
i -= 1;
m.remove(&i);
assert_eq!(m.raw_capacity(), new_raw_cap);
}
// A little more than one quarter full.
m.shrink_to_fit();
assert_eq!(m.raw_capacity(), raw_cap);
// again, a little more than half full
for _ in 0..raw_cap / 2 - 1 {
i -= 1;
m.remove(&i);
}
m.shrink_to_fit();
assert_eq!(m.len(), i);
assert!(!m.is_empty());
assert_eq!(m.raw_capacity(), initial_raw_cap);
}
#[test]
fn test_reserve_shrink_to_fit() {
let mut m = HashMap::new();
m.insert(0, 0);
m.remove(&0);
assert!(m.capacity() >= m.len());
for i in 0..128 {
m.insert(i, i);
}
m.reserve(256);
let usable_cap = m.capacity();
for i in 128..(128 + 256) {
m.insert(i, i);
assert_eq!(m.capacity(), usable_cap);
}
for i in 100..(128 + 256) {
assert_eq!(m.remove(&i), Some(i));
}
m.shrink_to_fit();
assert_eq!(m.len(), 100);
assert!(!m.is_empty());
assert!(m.capacity() >= m.len());
for i in 0..100 {
assert_eq!(m.remove(&i), Some(i));
}
m.shrink_to_fit();
m.insert(0, 0);
assert_eq!(m.len(), 1);
assert!(m.capacity() >= m.len());
assert_eq!(m.remove(&0), Some(0));
}
#[test]
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.iter().cloned().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
}
}
#[test]
fn test_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_iter_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.len(), 3);
}
#[test]
fn test_mut_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_iter_mut_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.len(), 3);
}
#[test]
fn test_index() {
let mut map = HashMap::new();
map.insert(1, 2);
map.insert(2, 1);
map.insert(3, 4);
assert_eq!(map[&2], 1);
}
#[test]
#[should_panic]
fn test_index_nonexistent() {
let mut map = HashMap::new();
map.insert(1, 2);
map.insert(2, 1);
map.insert(3, 4);
map[&4];
}
#[test]
fn test_entry() {
let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
// Existing key (insert)
match map.entry(1) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
assert_eq!(view.get(), &10);
assert_eq!(view.insert(100), 10);
}
}
assert_eq!(map.get(&1).unwrap(), &100);
assert_eq!(map.len(), 6);
// Existing key (update)
match map.entry(2) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
let v = view.get_mut();
let new_v = (*v) * 10;
*v = new_v;
}
}
assert_eq!(map.get(&2).unwrap(), &200);
assert_eq!(map.len(), 6);
// Existing key (take)
match map.entry(3) {
Vacant(_) => unreachable!(),
Occupied(view) => {
assert_eq!(view.remove(), 30);
}
}
assert_eq!(map.get(&3), None);
assert_eq!(map.len(), 5);
// Inexistent key (insert)
match map.entry(10) {
Occupied(_) => unreachable!(),
Vacant(view) => {
assert_eq!(*view.insert(1000), 1000);
}
}
assert_eq!(map.get(&10).unwrap(), &1000);
assert_eq!(map.len(), 6);
}
#[test]
fn test_entry_take_doesnt_corrupt() {
#![allow(deprecated)] //rand
// Test for #19292
fn check(m: &HashMap<i32, ()>) {
for k in m.keys() {
assert!(m.contains_key(k),
"{} is in keys() but not in the map?", k);
}
}
let mut m = HashMap::new();
let mut rng = thread_rng();
// Populate the map with some items.
for _ in 0..50 {
let x = rng.gen_range(-10, 10);
m.insert(x, ());
}
for _ in 0..1000 {
let x = rng.gen_range(-10, 10);
match m.entry(x) {
Vacant(_) => {}
Occupied(e) => {
e.remove();
}
}
check(&m);
}
}
#[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
let mut b = HashMap::new();
b.insert(2, "two");
b.insert(3, "three");
a.extend(&b);
assert_eq!(a.len(), 3);
assert_eq!(a[&1], "one");
assert_eq!(a[&2], "two");
assert_eq!(a[&3], "three");
}
#[test]
fn test_capacity_not_less_than_len() {
let mut a = HashMap::new();
let mut item = 0;
for _ in 0..116 {
a.insert(item, 0);
item += 1;
}
assert!(a.capacity() > a.len());
let free = a.capacity() - a.len();
for _ in 0..free {
a.insert(item, 0);
item += 1;
}
assert_eq!(a.len(), a.capacity());
// Insert at capacity should cause allocation.
a.insert(item, 0);
assert!(a.capacity() > a.len());
}
#[test]
fn test_occupied_entry_key() {
let mut a = HashMap::new();
let key = "hello there";
let value = "value goes here";
assert!(a.is_empty());
a.insert(key.clone(), value.clone());
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
match a.entry(key.clone()) {
Vacant(_) => panic!(),
Occupied(e) => assert_eq!(key, *e.key()),
}
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
}
#[test]
fn test_vacant_entry_key() {
let mut a = HashMap::new();
let key = "hello there";
let value = "value goes here";
assert!(a.is_empty());
match a.entry(key.clone()) {
Occupied(_) => panic!(),
Vacant(e) => {
assert_eq!(key, *e.key());
e.insert(value.clone());
}
}
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
}
#[test]
fn test_retain() {
let mut map: HashMap<i32, i32> = (0..100).map(|x|(x, x*10)).collect();
map.retain(|&k, _| k % 2 == 0);
assert_eq!(map.len(), 50);
assert_eq!(map[&2], 20);
assert_eq!(map[&4], 40);
assert_eq!(map[&6], 60);
}
#[test]
fn test_adaptive() {
const TEST_LEN: usize = 5000;
// by cloning we get maps with the same hasher seed
let mut first = HashMap::new();
let mut second = first.clone();
first.extend((0..TEST_LEN).map(|i| (i, i)));
second.extend((TEST_LEN..TEST_LEN * 2).map(|i| (i, i)));
for (&k, &v) in &second {
let prev_cap = first.capacity();
let expect_grow = first.len() == prev_cap;
first.insert(k, v);
if !expect_grow && first.capacity() != prev_cap {
return;
}
}
panic!("Adaptive early resize failed");
}
#[test]
fn test_try_reserve() {
let mut empty_bytes: HashMap<u8,u8> = HashMap::new();
const MAX_USIZE: usize = usize::MAX;
// HashMap and RawTables use complicated size calculations
// hashes_size is sizeof(HashUint) * capacity;
// pairs_size is sizeof((K. V)) * capacity;
// alignment_hashes_size is 8
// alignment_pairs size is 4
let size_of_multiplier = (size_of::<usize>() + size_of::<(u8, u8)>()).next_power_of_two();
// The following formula is used to calculate the new capacity
let max_no_ovf = ((MAX_USIZE / 11) * 10) / size_of_multiplier - 1;
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
} else { panic!("usize::MAX should trigger an overflow!"); }
if size_of::<usize>() < 8 {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(max_no_ovf) {
} else { panic!("isize::MAX + 1 should trigger a CapacityOverflow!") }
} else {
if let Err(AllocErr) = empty_bytes.try_reserve(max_no_ovf) {
} else { panic!("isize::MAX + 1 should trigger an OOM!") }
}
}
#[test]
fn test_raw_entry() {
use super::RawEntryMut::{Occupied, Vacant};
let xs = [(1i32, 10i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
use core::hash::{BuildHasher, Hash, Hasher};
let mut hasher = map.hasher().build_hasher();
k.hash(&mut hasher);
hasher.finish()
};
// Existing key (insert)
match map.raw_entry_mut().from_key(&1) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
assert_eq!(view.get(), &10);
assert_eq!(view.insert(100), 10);
}
}
let hash1 = compute_hash(&map, 1);
assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100));
assert_eq!(map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), (&1, &100));
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), (&1, &100));
assert_eq!(map.raw_entry().search_bucket(hash1, |k| *k == 1).unwrap(), (&1, &100));
assert_eq!(map.len(), 6);
// Existing key (update)
match map.raw_entry_mut().from_key(&2) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
let v = view.get_mut();
let new_v = (*v) * 10;
*v = new_v;
}
}
let hash2 = compute_hash(&map, 2);
assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200));
assert_eq!(map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), (&2, &200));
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), (&2, &200));
assert_eq!(map.raw_entry().search_bucket(hash2, |k| *k == 2).unwrap(), (&2, &200));
assert_eq!(map.len(), 6);
// Existing key (take)
let hash3 = compute_hash(&map, 3);
match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) {
Vacant(_) => unreachable!(),
Occupied(view) => {
assert_eq!(view.remove_entry(), (3, 30));
}
}
assert_eq!(map.raw_entry().from_key(&3), None);
assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None);
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None);
assert_eq!(map.raw_entry().search_bucket(hash3, |k| *k == 3), None);
assert_eq!(map.len(), 5);
// Nonexistent key (insert)
match map.raw_entry_mut().from_key(&10) {
Occupied(_) => unreachable!(),
Vacant(view) => {
assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000));
}
}
assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000));
assert_eq!(map.len(), 6);
// Ensure all lookup methods produce equivalent results.
for k in 0..12 {
let hash = compute_hash(&map, k);
let v = map.get(&k).cloned();
let kv = v.as_ref().map(|v| (&k, v));
assert_eq!(map.raw_entry().from_key(&k), kv);
assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv);
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv);
assert_eq!(map.raw_entry().search_bucket(hash, |q| *q == k), kv);
match map.raw_entry_mut().from_key(&k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
match map.raw_entry_mut().from_hash(hash, |q| *q == k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
match map.raw_entry_mut().search_bucket(hash, |q| *q == k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
}
}
}
Rollup merge of #56561 - Zoxc:too-raw, r=Gankro
Fix bug in from_key_hashed_nocheck
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use self::Entry::*;
use self::VacantEntryState::*;
use intrinsics::unlikely;
use collections::CollectionAllocErr;
use cell::Cell;
use borrow::Borrow;
use cmp::max;
use fmt::{self, Debug};
#[allow(deprecated)]
use hash::{Hash, Hasher, BuildHasher, SipHasher13};
use iter::{FromIterator, FusedIterator};
use mem::{self, replace};
use ops::{Deref, DerefMut, Index};
use sys;
use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable,
SafeHash};
use super::table::BucketState::{Empty, Full};
use super::table::Fallibility::{Fallible, Infallible};
const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two
/// The default behavior of HashMap implements a maximum load factor of 90.9%.
#[derive(Clone)]
struct DefaultResizePolicy;
impl DefaultResizePolicy {
#[inline]
fn new() -> DefaultResizePolicy {
DefaultResizePolicy
}
/// A hash map's "capacity" is the number of elements it can hold without
/// being resized. Its "raw capacity" is the number of slots required to
/// provide that capacity, accounting for maximum loading. The raw capacity
/// is always zero or a power of two.
#[inline]
fn try_raw_capacity(&self, len: usize) -> Result<usize, CollectionAllocErr> {
if len == 0 {
Ok(0)
} else {
// 1. Account for loading: `raw_capacity >= len * 1.1`.
// 2. Ensure it is a power of two.
// 3. Ensure it is at least the minimum size.
let mut raw_cap = len.checked_mul(11)
.map(|l| l / 10)
.and_then(|l| l.checked_next_power_of_two())
.ok_or(CollectionAllocErr::CapacityOverflow)?;
raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap);
Ok(raw_cap)
}
}
#[inline]
fn raw_capacity(&self, len: usize) -> usize {
self.try_raw_capacity(len).expect("raw_capacity overflow")
}
/// The capacity of the given raw capacity.
#[inline]
fn capacity(&self, raw_cap: usize) -> usize {
// This doesn't have to be checked for overflow since allocation size
// in bytes will overflow earlier than multiplication by 10.
//
// As per https://github.com/rust-lang/rust/pull/30991 this is updated
// to be: (raw_cap * den + den - 1) / num
(raw_cap * 10 + 10 - 1) / 11
}
}
// The main performance trick in this hashmap is called Robin Hood Hashing.
// It gains its excellent performance from one essential operation:
//
// If an insertion collides with an existing element, and that element's
// "probe distance" (how far away the element is from its ideal location)
// is higher than how far we've already probed, swap the elements.
//
// This massively lowers variance in probe distance, and allows us to get very
// high load factors with good performance. The 90% load factor I use is rather
// conservative.
//
// > Why a load factor of approximately 90%?
//
// In general, all the distances to initial buckets will converge on the mean.
// At a load factor of α, the odds of finding the target bucket after k
// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
// this down to make the math easier on the CPU and avoid its FPU.
// Since on average we start the probing in the middle of a cache line, this
// strategy pulls in two cache lines of hashes on every lookup. I think that's
// pretty good, but if you want to trade off some space, it could go down to one
// cache line on average with an α of 0.84.
//
// > Wait, what? Where did you get 1-α^k from?
//
// On the first probe, your odds of a collision with an existing element is α.
// The odds of doing this twice in a row is approximately α^2. For three times,
// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
// colliding after k tries is 1-α^k.
//
// The paper from 1986 cited below mentions an implementation which keeps track
// of the distance-to-initial-bucket histogram. This approach is not suitable
// for modern architectures because it requires maintaining an internal data
// structure. This allows very good first guesses, but we are most concerned
// with guessing entire cache lines, not individual indexes. Furthermore, array
// accesses are no longer linear and in one direction, as we have now. There
// is also memory and cache pressure that this would entail that would be very
// difficult to properly see in a microbenchmark.
//
// ## Future Improvements (FIXME!)
//
// Allow the load factor to be changed dynamically and/or at initialization.
//
// Also, would it be possible for us to reuse storage when growing the
// underlying table? This is exactly the use case for 'realloc', and may
// be worth exploring.
//
// ## Future Optimizations (FIXME!)
//
// Another possible design choice that I made without any real reason is
// parameterizing the raw table over keys and values. Technically, all we need
// is the size and alignment of keys and values, and the code should be just as
// efficient (well, we might need one for power-of-two size and one for not...).
// This has the potential to reduce code bloat in rust executables, without
// really losing anything except 4 words (key size, key alignment, val size,
// val alignment) which can be passed in to every call of a `RawTable` function.
// This would definitely be an avenue worth exploring if people start complaining
// about the size of rust executables.
//
// Annotate exceedingly likely branches in `table::make_hash`
// and `search_hashed` to reduce instruction cache pressure
// and mispredictions once it becomes possible (blocked on issue #11092).
//
// Shrinking the table could simply reallocate in place after moving buckets
// to the first half.
//
// The growth algorithm (fragment of the Proof of Correctness)
// --------------------
//
// The growth algorithm is basically a fast path of the naive reinsertion-
// during-resize algorithm. Other paths should never be taken.
//
// Consider growing a robin hood hashtable of capacity n. Normally, we do this
// by allocating a new table of capacity `2n`, and then individually reinsert
// each element in the old table into the new one. This guarantees that the
// new table is a valid robin hood hashtable with all the desired statistical
// properties. Remark that the order we reinsert the elements in should not
// matter. For simplicity and efficiency, we will consider only linear
// reinsertions, which consist of reinserting all elements in the old table
// into the new one by increasing order of index. However we will not be
// starting our reinsertions from index 0 in general. If we start from index
// i, for the purpose of reinsertion we will consider all elements with real
// index j < i to have virtual index n + j.
//
// Our hash generation scheme consists of generating a 64-bit hash and
// truncating the most significant bits. When moving to the new table, we
// simply introduce a new bit to the front of the hash. Therefore, if an
// element has ideal index i in the old table, it can have one of two ideal
// locations in the new table. If the new bit is 0, then the new ideal index
// is i. If the new bit is 1, then the new ideal index is n + i. Intuitively,
// we are producing two independent tables of size n, and for each element we
// independently choose which table to insert it into with equal probability.
// However, rather than wrapping around themselves on overflowing their
// indexes, the first table overflows into the second, and the second into the
// first. Visually, our new table will look something like:
//
// [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy]
//
// Where x's are elements inserted into the first table, y's are elements
// inserted into the second, and _'s are empty sections. We now define a few
// key concepts that we will use later. Note that this is a very abstract
// perspective of the table. A real resized table would be at least half
// empty.
//
// Theorem: A linear robin hood reinsertion from the first ideal element
// produces identical results to a linear naive reinsertion from the same
// element.
//
// FIXME(Gankro, pczarn): review the proof and put it all in a separate README.md
//
// Adaptive early resizing
// ----------------------
// To protect against degenerate performance scenarios (including DOS attacks),
// the implementation includes an adaptive behavior that can resize the map
// early (before its capacity is exceeded) when suspiciously long probe sequences
// are encountered.
//
// With this algorithm in place it would be possible to turn a CPU attack into
// a memory attack due to the aggressive resizing. To prevent that the
// adaptive behavior only triggers when the map is at least half full.
// This reduces the effectiveness of the algorithm but also makes it completely safe.
//
// The previous safety measure also prevents degenerate interactions with
// really bad quality hash algorithms that can make normal inputs look like a
// DOS attack.
//
const DISPLACEMENT_THRESHOLD: usize = 128;
//
// The threshold of 128 is chosen to minimize the chance of exceeding it.
// In particular, we want that chance to be less than 10^-8 with a load of 90%.
// For displacement, the smallest constant that fits our needs is 90,
// so we round that up to 128.
//
// At a load factor of α, the odds of finding the target bucket after exactly n
// unsuccessful probes[1] are
//
// Pr_α{displacement = n} =
// (1 - α) / α * ∑_{k≥1} e^(-kα) * (kα)^(k+n) / (k + n)! * (1 - kα / (k + n + 1))
//
// We use this formula to find the probability of triggering the adaptive behavior
//
// Pr_0.909{displacement > 128} = 1.601 * 10^-11
//
// 1. Alfredo Viola (2005). Distributional analysis of Robin Hood linear probing
// hashing with buckets.
/// A hash map implemented with linear probing and Robin Hood bucket stealing.
///
/// By default, `HashMap` uses a hashing algorithm selected to provide
/// resistance against HashDoS attacks. The algorithm is randomly seeded, and a
/// reasonable best-effort is made to generate this seed from a high quality,
/// secure source of randomness provided by the host without blocking the
/// program. Because of this, the randomness of the seed depends on the output
/// quality of the system's random number generator when the seed is created.
/// In particular, seeds generated when the system's entropy pool is abnormally
/// low such as during system boot may be of a lower quality.
///
/// The default hashing algorithm is currently SipHash 1-3, though this is
/// subject to change at any point in the future. While its performance is very
/// competitive for medium sized keys, other hashing algorithms will outperform
/// it for small keys such as integers as well as large keys such as long
/// strings, though those algorithms will typically *not* protect against
/// attacks such as HashDoS.
///
/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many
/// alternative algorithms are available on crates.io, such as the [`fnv`] crate.
///
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
/// If you implement these yourself, it is important that the following
/// property holds:
///
/// ```text
/// k1 == k2 -> hash(k1) == hash(k2)
/// ```
///
/// In other words, if two keys are equal, their hashes must be equal.
///
/// It is a logic error for a key to be modified in such a way that the key's
/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
/// the [`Eq`] trait, changes while it is in the map. This is normally only
/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
///
/// Relevant papers/articles:
///
/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
/// 2. Emmanuel Goossaert. ["Robin Hood
/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// // Type inference lets us omit an explicit type signature (which
/// // would be `HashMap<String, String>` in this example).
/// let mut book_reviews = HashMap::new();
///
/// // Review some books.
/// book_reviews.insert(
/// "Adventures of Huckleberry Finn".to_string(),
/// "My favorite book.".to_string(),
/// );
/// book_reviews.insert(
/// "Grimms' Fairy Tales".to_string(),
/// "Masterpiece.".to_string(),
/// );
/// book_reviews.insert(
/// "Pride and Prejudice".to_string(),
/// "Very enjoyable.".to_string(),
/// );
/// book_reviews.insert(
/// "The Adventures of Sherlock Holmes".to_string(),
/// "Eye lyked it alot.".to_string(),
/// );
///
/// // Check for a specific one.
/// // When collections store owned values (String), they can still be
/// // queried using references (&str).
/// if !book_reviews.contains_key("Les Misérables") {
/// println!("We've got {} reviews, but Les Misérables ain't one.",
/// book_reviews.len());
/// }
///
/// // oops, this review has a lot of spelling mistakes, let's delete it.
/// book_reviews.remove("The Adventures of Sherlock Holmes");
///
/// // Look up the values associated with some keys.
/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
/// for &book in &to_find {
/// match book_reviews.get(book) {
/// Some(review) => println!("{}: {}", book, review),
/// None => println!("{} is unreviewed.", book)
/// }
/// }
///
/// // Iterate over everything.
/// for (book, review) in &book_reviews {
/// println!("{}: \"{}\"", book, review);
/// }
/// ```
///
/// `HashMap` also implements an [`Entry API`](#method.entry), which allows
/// for more complex methods of getting, setting, updating and removing keys and
/// their values:
///
/// ```
/// use std::collections::HashMap;
///
/// // type inference lets us omit an explicit type signature (which
/// // would be `HashMap<&str, u8>` in this example).
/// let mut player_stats = HashMap::new();
///
/// fn random_stat_buff() -> u8 {
/// // could actually return some random value here - let's just return
/// // some fixed value for now
/// 42
/// }
///
/// // insert a key only if it doesn't already exist
/// player_stats.entry("health").or_insert(100);
///
/// // insert a key using a function that provides a new value only if it
/// // doesn't already exist
/// player_stats.entry("defence").or_insert_with(random_stat_buff);
///
/// // update a key, guarding against the key possibly not being set
/// let stat = player_stats.entry("attack").or_insert(100);
/// *stat += random_stat_buff();
/// ```
///
/// The easiest way to use `HashMap` with a custom type as key is to derive [`Eq`] and [`Hash`].
/// We must also derive [`PartialEq`].
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
/// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html
/// [`RefCell`]: ../../std/cell/struct.RefCell.html
/// [`Cell`]: ../../std/cell/struct.Cell.html
/// [`default`]: #method.default
/// [`with_hasher`]: #method.with_hasher
/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher
/// [`fnv`]: https://crates.io/crates/fnv
///
/// ```
/// use std::collections::HashMap;
///
/// #[derive(Hash, Eq, PartialEq, Debug)]
/// struct Viking {
/// name: String,
/// country: String,
/// }
///
/// impl Viking {
/// /// Create a new Viking.
/// fn new(name: &str, country: &str) -> Viking {
/// Viking { name: name.to_string(), country: country.to_string() }
/// }
/// }
///
/// // Use a HashMap to store the vikings' health points.
/// let mut vikings = HashMap::new();
///
/// vikings.insert(Viking::new("Einar", "Norway"), 25);
/// vikings.insert(Viking::new("Olaf", "Denmark"), 24);
/// vikings.insert(Viking::new("Harald", "Iceland"), 12);
///
/// // Use derived implementation to print the status of the vikings.
/// for (viking, health) in &vikings {
/// println!("{:?} has {} hp", viking, health);
/// }
/// ```
///
/// A `HashMap` with fixed list of elements can be initialized from an array:
///
/// ```
/// use std::collections::HashMap;
///
/// fn main() {
/// let timber_resources: HashMap<&str, i32> =
/// [("Norway", 100),
/// ("Denmark", 50),
/// ("Iceland", 10)]
/// .iter().cloned().collect();
/// // use the values stored in map
/// }
/// ```
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct HashMap<K, V, S = RandomState> {
// All hashes are keyed on these values, to prevent hash collision attacks.
hash_builder: S,
table: RawTable<K, V>,
resize_policy: DefaultResizePolicy,
}
/// Search for a pre-hashed key.
/// If you don't already know the hash, use search or search_mut instead
#[inline]
fn search_hashed<K, V, M, F>(table: M, hash: SafeHash, is_match: F) -> InternalEntry<K, V, M>
where M: Deref<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
{
// This is the only function where capacity can be zero. To avoid
// undefined behavior when Bucket::new gets the raw bucket in this
// case, immediately return the appropriate search result.
if table.capacity() == 0 {
return InternalEntry::TableIsEmpty;
}
search_hashed_nonempty(table, hash, is_match, true)
}
/// Search for a pre-hashed key when the hash map is known to be non-empty.
#[inline]
fn search_hashed_nonempty<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F,
compare_hashes: bool)
-> InternalEntry<K, V, M>
where M: Deref<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
{
// Do not check the capacity as an extra branch could slow the lookup.
let size = table.size();
let mut probe = Bucket::new(table, hash);
let mut displacement = 0;
loop {
let full = match probe.peek() {
Empty(bucket) => {
// Found a hole!
return InternalEntry::Vacant {
hash,
elem: NoElem(bucket, displacement),
};
}
Full(bucket) => bucket,
};
let probe_displacement = full.displacement();
if probe_displacement < displacement {
// Found a luckier bucket than me.
// We can finish the search early if we hit any bucket
// with a lower distance to initial bucket than we've probed.
return InternalEntry::Vacant {
hash,
elem: NeqElem(full, probe_displacement),
};
}
// If the hash doesn't match, it can't be this one..
if !compare_hashes || hash == full.hash() {
// If the key doesn't match, it can't be this one..
if is_match(full.read().0) {
return InternalEntry::Occupied { elem: full };
}
}
displacement += 1;
probe = full.next();
debug_assert!(displacement <= size);
}
}
/// Same as `search_hashed_nonempty` but for mutable access.
#[inline]
fn search_hashed_nonempty_mut<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F,
compare_hashes: bool)
-> InternalEntry<K, V, M>
where M: DerefMut<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
{
// Do not check the capacity as an extra branch could slow the lookup.
let size = table.size();
let mut probe = Bucket::new(table, hash);
let mut displacement = 0;
loop {
let mut full = match probe.peek() {
Empty(bucket) => {
// Found a hole!
return InternalEntry::Vacant {
hash,
elem: NoElem(bucket, displacement),
};
}
Full(bucket) => bucket,
};
let probe_displacement = full.displacement();
if probe_displacement < displacement {
// Found a luckier bucket than me.
// We can finish the search early if we hit any bucket
// with a lower distance to initial bucket than we've probed.
return InternalEntry::Vacant {
hash,
elem: NeqElem(full, probe_displacement),
};
}
// If the hash doesn't match, it can't be this one..
if hash == full.hash() || !compare_hashes {
// If the key doesn't match, it can't be this one..
if is_match(full.read_mut().0) {
return InternalEntry::Occupied { elem: full };
}
}
displacement += 1;
probe = full.next();
debug_assert!(displacement <= size);
}
}
fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>)
-> (K, V, &mut RawTable<K, V>)
{
let (empty, retkey, retval) = starting_bucket.take();
let mut gap = match empty.gap_peek() {
Ok(b) => b,
Err(b) => return (retkey, retval, b.into_table()),
};
while gap.full().displacement() != 0 {
gap = match gap.shift() {
Ok(b) => b,
Err(b) => {
return (retkey, retval, b.into_table());
},
};
}
// Now we've done all our shifting. Return the value we grabbed earlier.
(retkey, retval, gap.into_table())
}
/// Perform robin hood bucket stealing at the given `bucket`. You must
/// also pass that bucket's displacement so we don't have to recalculate it.
///
/// `hash`, `key`, and `val` are the elements to "robin hood" into the hashtable.
fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>,
mut displacement: usize,
mut hash: SafeHash,
mut key: K,
mut val: V)
-> FullBucketMut<'a, K, V> {
let size = bucket.table().size();
let raw_capacity = bucket.table().capacity();
// There can be at most `size - dib` buckets to displace, because
// in the worst case, there are `size` elements and we already are
// `displacement` buckets away from the initial one.
let idx_end = (bucket.index() + size - bucket.displacement()) % raw_capacity;
// Save the *starting point*.
let mut bucket = bucket.stash();
loop {
let (old_hash, old_key, old_val) = bucket.replace(hash, key, val);
hash = old_hash;
key = old_key;
val = old_val;
loop {
displacement += 1;
let probe = bucket.next();
debug_assert!(probe.index() != idx_end);
let full_bucket = match probe.peek() {
Empty(bucket) => {
// Found a hole!
let bucket = bucket.put(hash, key, val);
// Now that it's stolen, just read the value's pointer
// right out of the table! Go back to the *starting point*.
//
// This use of `into_table` is misleading. It turns the
// bucket, which is a FullBucket on top of a
// FullBucketMut, into just one FullBucketMut. The "table"
// refers to the inner FullBucketMut in this context.
return bucket.into_table();
}
Full(bucket) => bucket,
};
let probe_displacement = full_bucket.displacement();
bucket = full_bucket;
// Robin hood! Steal the spot.
if probe_displacement < displacement {
displacement = probe_displacement;
break;
}
}
}
}
impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
fn make_hash<X: ?Sized>(&self, x: &X) -> SafeHash
where X: Hash
{
table::make_hash(&self.hash_builder, x)
}
/// Search for a key, yielding the index if it's found in the hashtable.
/// If you already have the hash for the key lying around, or if you need an
/// InternalEntry, use search_hashed or search_hashed_nonempty.
#[inline]
fn search<'a, Q: ?Sized>(&'a self, q: &Q)
-> Option<FullBucket<K, V, &'a RawTable<K, V>>>
where K: Borrow<Q>,
Q: Eq + Hash
{
if self.is_empty() {
return None;
}
let hash = self.make_hash(q);
search_hashed_nonempty(&self.table, hash, |k| q.eq(k.borrow()), true)
.into_occupied_bucket()
}
#[inline]
fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q)
-> Option<FullBucket<K, V, &'a mut RawTable<K, V>>>
where K: Borrow<Q>,
Q: Eq + Hash
{
if self.is_empty() {
return None;
}
let hash = self.make_hash(q);
search_hashed_nonempty(&mut self.table, hash, |k| q.eq(k.borrow()), true)
.into_occupied_bucket()
}
// The caller should ensure that invariants by Robin Hood Hashing hold
// and that there's space in the underlying table.
fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
let mut buckets = Bucket::new(&mut self.table, hash);
let start_index = buckets.index();
loop {
// We don't need to compare hashes for value swap.
// Not even DIBs for Robin Hood.
buckets = match buckets.peek() {
Empty(empty) => {
empty.put(hash, k, v);
return;
}
Full(b) => b.into_bucket(),
};
buckets.next();
debug_assert!(buckets.index() != start_index);
}
}
}
impl<K: Hash + Eq, V> HashMap<K, V, RandomState> {
/// Creates an empty `HashMap`.
///
/// The hash map is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, i32> = HashMap::new();
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> HashMap<K, V, RandomState> {
Default::default()
}
/// Creates an empty `HashMap` with the specified capacity.
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> HashMap<K, V, RandomState> {
HashMap::with_capacity_and_hasher(capacity, Default::default())
}
}
impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
/// Creates an empty `HashMap` which will use the given hash builder to hash
/// keys.
///
/// The created map has the default initial capacity.
///
/// Warning: `hash_builder` is normally randomly generated, and
/// is designed to allow HashMaps to be resistant to attacks that
/// cause many collisions and very poor performance. Setting it
/// manually using this function can expose a DoS attack vector.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
/// map.insert(1, 2);
/// ```
#[inline]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn with_hasher(hash_builder: S) -> HashMap<K, V, S> {
HashMap {
hash_builder,
resize_policy: DefaultResizePolicy::new(),
table: RawTable::new(0),
}
}
/// Creates an empty `HashMap` with the specified capacity, using `hash_builder`
/// to hash the keys.
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
/// Warning: `hash_builder` is normally randomly generated, and
/// is designed to allow HashMaps to be resistant to attacks that
/// cause many collisions and very poor performance. Setting it
/// manually using this function can expose a DoS attack vector.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_capacity_and_hasher(10, s);
/// map.insert(1, 2);
/// ```
#[inline]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap<K, V, S> {
let resize_policy = DefaultResizePolicy::new();
let raw_cap = resize_policy.raw_capacity(capacity);
HashMap {
hash_builder,
resize_policy,
table: RawTable::new(raw_cap),
}
}
/// Returns a reference to the map's [`BuildHasher`].
///
/// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let hasher = RandomState::new();
/// let map: HashMap<i32, i32> = HashMap::with_hasher(hasher);
/// let hasher: &RandomState = map.hasher();
/// ```
#[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
pub fn hasher(&self) -> &S {
&self.hash_builder
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the `HashMap<K, V>` might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// assert!(map.capacity() >= 100);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.resize_policy.capacity(self.raw_capacity())
}
/// Returns the hash map's raw capacity.
#[inline]
fn raw_capacity(&self) -> usize {
self.table.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the `HashMap`. The collection may reserve more space to avoid
/// frequent reallocations.
///
/// # Panics
///
/// Panics if the new allocation size overflows [`usize`].
///
/// [`usize`]: ../../std/primitive.usize.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, i32> = HashMap::new();
/// map.reserve(10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
match self.reserve_internal(additional, Infallible) {
Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
Err(CollectionAllocErr::AllocErr) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the given `HashMap<K,V>`. The collection may reserve more space to avoid
/// frequent reallocations.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::HashMap;
/// let mut map: HashMap<&str, isize> = HashMap::new();
/// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
self.reserve_internal(additional, Fallible)
}
#[inline]
fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility)
-> Result<(), CollectionAllocErr> {
let remaining = self.capacity() - self.len(); // this can't overflow
if remaining < additional {
let min_cap = self.len()
.checked_add(additional)
.ok_or(CollectionAllocErr::CapacityOverflow)?;
let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?;
self.try_resize(raw_cap, fallibility)?;
} else if self.table.tag() && remaining <= self.len() {
// Probe sequence is too long and table is half full,
// resize early to reduce probing length.
let new_capacity = self.table.capacity() * 2;
self.try_resize(new_capacity, fallibility)?;
}
Ok(())
}
/// Resizes the internal vectors to a new capacity. It's your
/// responsibility to:
/// 1) Ensure `new_raw_cap` is enough for all the elements, accounting
/// for the load factor.
/// 2) Ensure `new_raw_cap` is a power of two or zero.
#[inline(never)]
#[cold]
fn try_resize(
&mut self,
new_raw_cap: usize,
fallibility: Fallibility,
) -> Result<(), CollectionAllocErr> {
assert!(self.table.size() <= new_raw_cap);
assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
let mut old_table = replace(
&mut self.table,
match fallibility {
Infallible => RawTable::new(new_raw_cap),
Fallible => RawTable::try_new(new_raw_cap)?,
}
);
let old_size = old_table.size();
if old_table.size() == 0 {
return Ok(());
}
let mut bucket = Bucket::head_bucket(&mut old_table);
// This is how the buckets might be laid out in memory:
// ($ marks an initialized bucket)
// ________________
// |$$$_$$$$$$_$$$$$|
//
// But we've skipped the entire initial cluster of buckets
// and will continue iteration in this order:
// ________________
// |$$$$$$_$$$$$
// ^ wrap around once end is reached
// ________________
// $$$_____________|
// ^ exit once table.size == 0
loop {
bucket = match bucket.peek() {
Full(bucket) => {
let h = bucket.hash();
let (b, k, v) = bucket.take();
self.insert_hashed_ordered(h, k, v);
if b.table().size() == 0 {
break;
}
b.into_bucket()
}
Empty(b) => b.into_bucket(),
};
bucket.next();
}
assert_eq!(self.table.size(), old_size);
Ok(())
}
/// Shrinks the capacity of the map as much as possible. It will drop
/// down as much as possible while maintaining the internal rules
/// and possibly leaving some space in accordance with the resize policy.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// map.insert(1, 2);
/// map.insert(3, 4);
/// assert!(map.capacity() >= 100);
/// map.shrink_to_fit();
/// assert!(map.capacity() >= 2);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
let new_raw_cap = self.resize_policy.raw_capacity(self.len());
if self.raw_capacity() != new_raw_cap {
let old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
let old_size = old_table.size();
// Shrink the table. Naive algorithm for resizing:
for (h, k, v) in old_table.into_iter() {
self.insert_hashed_nocheck(h, k, v);
}
debug_assert_eq!(self.table.size(), old_size);
}
}
/// Shrinks the capacity of the map with a lower limit. It will drop
/// down no lower than the supplied limit while maintaining the internal rules
/// and possibly leaving some space in accordance with the resize policy.
///
/// Panics if the current capacity is smaller than the supplied
/// minimum capacity.
///
/// # Examples
///
/// ```
/// #![feature(shrink_to)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// map.insert(1, 2);
/// map.insert(3, 4);
/// assert!(map.capacity() >= 100);
/// map.shrink_to(10);
/// assert!(map.capacity() >= 10);
/// map.shrink_to(0);
/// assert!(map.capacity() >= 2);
/// ```
#[unstable(feature = "shrink_to", reason = "new API", issue="56431")]
pub fn shrink_to(&mut self, min_capacity: usize) {
assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity");
let new_raw_cap = self.resize_policy.raw_capacity(max(self.len(), min_capacity));
if self.raw_capacity() != new_raw_cap {
let old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
let old_size = old_table.size();
// Shrink the table. Naive algorithm for resizing:
for (h, k, v) in old_table.into_iter() {
self.insert_hashed_nocheck(h, k, v);
}
debug_assert_eq!(self.table.size(), old_size);
}
}
/// Insert a pre-hashed key-value pair, without first checking
/// that there's enough room in the buckets. Returns a reference to the
/// newly insert value.
///
/// If the key already exists, the hashtable will be returned untouched
/// and a reference to the existing element will be returned.
fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> Option<V> {
let entry = search_hashed(&mut self.table, hash, |key| *key == k).into_entry(k);
match entry {
Some(Occupied(mut elem)) => Some(elem.insert(v)),
Some(Vacant(elem)) => {
elem.insert(v);
None
}
None => unreachable!(),
}
}
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for key in map.keys() {
/// println!("{}", key);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn keys(&self) -> Keys<K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn values(&self) -> Values<K, V> {
Values { inner: self.iter() }
}
/// An iterator visiting all values mutably in arbitrary order.
/// The iterator element type is `&'a mut V`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
///
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for val in map.values_mut() {
/// *val = *val + 10;
/// }
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub fn values_mut(&mut self) -> ValuesMut<K, V> {
ValuesMut { inner: self.iter_mut() }
}
/// An iterator visiting all key-value pairs in arbitrary order.
/// The iterator element type is `(&'a K, &'a V)`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// for (key, val) in map.iter() {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<K, V> {
Iter { inner: self.table.iter() }
}
/// An iterator visiting all key-value pairs in arbitrary order,
/// with mutable references to the values.
/// The iterator element type is `(&'a K, &'a mut V)`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// for (key, val) in &map {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut { inner: self.table.iter_mut() }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut letters = HashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn entry(&mut self, key: K) -> Entry<K, V> {
// Gotta resize now.
self.reserve(1);
let hash = self.make_hash(&key);
search_hashed(&mut self.table, hash, |q| q.eq(&key))
.into_entry(key).expect("unreachable")
}
/// Returns the number of elements in the map.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a");
/// assert_eq!(a.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
self.table.size()
}
/// Returns true if the map contains no elements.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a");
/// assert!(!a.is_empty());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Clears the map, returning all key-value pairs as an iterator. Keeps the
/// allocated memory for reuse.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
///
/// for (k, v) in a.drain().take(1) {
/// assert!(k == 1 || k == 2);
/// assert!(v == "a" || v == "b");
/// }
///
/// assert!(a.is_empty());
/// ```
#[inline]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain(&mut self) -> Drain<K, V> {
Drain { inner: self.table.drain() }
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory
/// for reuse.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
/// a.insert(1, "a");
/// a.clear();
/// assert!(a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn clear(&mut self) {
self.drain();
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search(k).map(|bucket| bucket.into_refs().1)
}
/// Returns the key-value pair corresponding to the supplied key.
///
/// The supplied key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// #![feature(map_get_key_value)]
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.get_key_value(&1), Some((&1, &"a")));
/// assert_eq!(map.get_key_value(&2), None);
/// ```
#[unstable(feature = "map_get_key_value", issue = "49347")]
pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search(k).map(|bucket| bucket.into_refs())
}
/// Returns true if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search(k).is_some()
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search_mut(k).map(|bucket| bucket.into_mut_refs().1)
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, [`None`] is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical. See the [module-level
/// documentation] for more.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [module-level documentation]: index.html#insert-and-complex-keys
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
let hash = self.make_hash(&k);
self.reserve(1);
self.insert_hashed_nocheck(hash, k, v)
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search_mut(k).map(|bucket| pop_internal(bucket).1)
}
/// Removes a key from the map, returning the stored key and value if the
/// key was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but
/// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the key type.
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`Hash`]: ../../std/hash/trait.Hash.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// # fn main() {
/// let mut map = HashMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove_entry(&1), Some((1, "a")));
/// assert_eq!(map.remove(&1), None);
/// # }
/// ```
#[stable(feature = "hash_map_remove_entry", since = "1.27.0")]
pub fn remove_entry<Q: ?Sized>(&mut self, k: &Q) -> Option<(K, V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.search_mut(k)
.map(|bucket| {
let (k, v, _) = pop_internal(bucket);
(k, v)
})
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns `false`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x|(x, x*10)).collect();
/// map.retain(|&k, _| k % 2 == 0);
/// assert_eq!(map.len(), 4);
/// ```
#[stable(feature = "retain_hash_collection", since = "1.18.0")]
pub fn retain<F>(&mut self, mut f: F)
where F: FnMut(&K, &mut V) -> bool
{
if self.table.size() == 0 {
return;
}
let mut elems_left = self.table.size();
let mut bucket = Bucket::head_bucket(&mut self.table);
bucket.prev();
let start_index = bucket.index();
while elems_left != 0 {
bucket = match bucket.peek() {
Full(mut full) => {
elems_left -= 1;
let should_remove = {
let (k, v) = full.read_mut();
!f(k, v)
};
if should_remove {
let prev_raw = full.raw();
let (_, _, t) = pop_internal(full);
Bucket::new_from(prev_raw, t)
} else {
full.into_bucket()
}
},
Empty(b) => {
b.into_bucket()
}
};
bucket.prev(); // reverse iteration
debug_assert!(elems_left == 0 || bucket.index() != start_index);
}
}
}
impl<K, V, S> HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline(always)]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<K, V, S> {
self.reserve(1);
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn raw_entry(&self) -> RawEntryBuilder<K, V, S> {
RawEntryBuilder { map: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> PartialEq for HashMap<K, V, S>
where K: Eq + Hash,
V: PartialEq,
S: BuildHasher
{
fn eq(&self, other: &HashMap<K, V, S>) -> bool {
if self.len() != other.len() {
return false;
}
self.iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Eq for HashMap<K, V, S>
where K: Eq + Hash,
V: Eq,
S: BuildHasher
{
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Debug for HashMap<K, V, S>
where K: Eq + Hash + Debug,
V: Debug,
S: BuildHasher
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Default for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher + Default
{
/// Creates an empty `HashMap<K, V, S>`, with the `Default` value for the hasher.
fn default() -> HashMap<K, V, S> {
HashMap::with_hasher(Default::default())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, Q: ?Sized, V, S> Index<&'a Q> for HashMap<K, V, S>
where K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
S: BuildHasher
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the `HashMap`.
#[inline]
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}
/// An iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`iter`]: struct.HashMap.html#method.iter
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
inner: table::Iter<'a, K, V>,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Iter<'a, K, V> {
Iter { inner: self.inner.clone() }
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V: Debug> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.clone())
.finish()
}
}
/// A mutable iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.HashMap.html#method.iter_mut
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, K: 'a, V: 'a> {
inner: table::IterMut<'a, K, V>,
}
/// An owning iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`into_iter`] method on [`HashMap`][`HashMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.HashMap.html#method.into_iter
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K, V> {
pub(super) inner: table::IntoIter<K, V>,
}
/// An iterator over the keys of a `HashMap`.
///
/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`keys`]: struct.HashMap.html#method.keys
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Keys<'a, K, V> {
Keys { inner: self.inner.clone() }
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V> fmt::Debug for Keys<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.clone())
.finish()
}
}
/// An iterator over the values of a `HashMap`.
///
/// This `struct` is created by the [`values`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`values`]: struct.HashMap.html#method.values
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Values<'a, K, V> {
Values { inner: self.inner.clone() }
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V: Debug> fmt::Debug for Values<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.clone())
.finish()
}
}
/// A draining iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`drain`]: struct.HashMap.html#method.drain
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, K: 'a, V: 'a> {
pub(super) inner: table::Drain<'a, K, V>,
}
/// A mutable iterator over the values of a `HashMap`.
///
/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its
/// documentation for more.
///
/// [`values_mut`]: struct.HashMap.html#method.values_mut
/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub struct ValuesMut<'a, K: 'a, V: 'a> {
inner: IterMut<'a, K, V>,
}
enum InternalEntry<K, V, M> {
Occupied { elem: FullBucket<K, V, M> },
Vacant {
hash: SafeHash,
elem: VacantEntryState<K, V, M>,
},
TableIsEmpty,
}
impl<K, V, M> InternalEntry<K, V, M> {
#[inline]
fn into_occupied_bucket(self) -> Option<FullBucket<K, V, M>> {
match self {
InternalEntry::Occupied { elem } => Some(elem),
_ => None,
}
}
}
impl<'a, K, V> InternalEntry<K, V, &'a mut RawTable<K, V>> {
#[inline]
fn into_entry(self, key: K) -> Option<Entry<'a, K, V>> {
match self {
InternalEntry::Occupied { elem } => {
Some(Occupied(OccupiedEntry {
key: Some(key),
elem,
}))
}
InternalEntry::Vacant { hash, elem } => {
Some(Vacant(VacantEntry {
hash,
key,
elem,
}))
}
InternalEntry::TableIsEmpty => None,
}
}
}
/// A builder for computing where in a HashMap a key-value pair would be stored.
///
/// See the [`HashMap::raw_entry_mut`] docs for usage examples.
///
/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawEntryBuilderMut<'a, K: 'a, V: 'a, S: 'a> {
map: &'a mut HashMap<K, V, S>,
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This is a lower-level version of [`Entry`].
///
/// This `enum` is constructed from the [`raw_entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`Entry`]: enum.Entry.html
/// [`raw_entry`]: struct.HashMap.html#method.raw_entry
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub enum RawEntryMut<'a, K: 'a, V: 'a, S: 'a> {
/// An occupied entry.
Occupied(RawOccupiedEntryMut<'a, K, V>),
/// A vacant entry.
Vacant(RawVacantEntryMut<'a, K, V, S>),
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`RawEntryMut`] enum.
///
/// [`RawEntryMut`]: enum.RawEntryMut.html
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawOccupiedEntryMut<'a, K: 'a, V: 'a> {
elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`RawEntryMut`] enum.
///
/// [`RawEntryMut`]: enum.RawEntryMut.html
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawVacantEntryMut<'a, K: 'a, V: 'a, S: 'a> {
elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
hash_builder: &'a S,
}
/// A builder for computing where in a HashMap a key-value pair would be stored.
///
/// See the [`HashMap::raw_entry`] docs for usage examples.
///
/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub struct RawEntryBuilder<'a, K: 'a, V: 'a, S: 'a> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S>
where S: BuildHasher,
K: Eq + Hash,
{
/// Create a `RawEntryMut` from the given key.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key<Q: ?Sized>(self, k: &Q) -> RawEntryMut<'a, K, V, S>
where K: Borrow<Q>,
Q: Hash + Eq
{
let mut hasher = self.map.hash_builder.build_hasher();
k.hash(&mut hasher);
self.from_key_hashed_nocheck(hasher.finish(), k)
}
/// Create a `RawEntryMut` from the given key and its hash.
#[inline]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S>
where K: Borrow<Q>,
Q: Eq
{
self.from_hash(hash, |q| q.borrow().eq(k))
}
#[inline]
fn search<F>(self, hash: u64, is_match: F, compare_hashes: bool) -> RawEntryMut<'a, K, V, S>
where for<'b> F: FnMut(&'b K) -> bool,
{
match search_hashed_nonempty_mut(&mut self.map.table,
SafeHash::new(hash),
is_match,
compare_hashes) {
InternalEntry::Occupied { elem } => {
RawEntryMut::Occupied(RawOccupiedEntryMut { elem })
}
InternalEntry::Vacant { elem, .. } => {
RawEntryMut::Vacant(RawVacantEntryMut {
elem,
hash_builder: &self.map.hash_builder,
})
}
InternalEntry::TableIsEmpty => {
unreachable!()
}
}
}
/// Create a `RawEntryMut` from the given hash.
#[inline]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_hash<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
where for<'b> F: FnMut(&'b K) -> bool,
{
self.search(hash, is_match, true)
}
/// Search possible locations for an element with hash `hash` until `is_match` returns true for
/// one of them. There is no guarantee that all keys passed to `is_match` will have the provided
/// hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn search_bucket<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
where for<'b> F: FnMut(&'b K) -> bool,
{
self.search(hash, is_match, false)
}
}
impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S>
where S: BuildHasher,
{
/// Access an entry by key.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key<Q: ?Sized>(self, k: &Q) -> Option<(&'a K, &'a V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
let mut hasher = self.map.hash_builder.build_hasher();
k.hash(&mut hasher);
self.from_key_hashed_nocheck(hasher.finish(), k)
}
/// Access an entry by a key and its hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)>
where K: Borrow<Q>,
Q: Hash + Eq
{
self.from_hash(hash, |q| q.borrow().eq(k))
}
fn search<F>(self, hash: u64, is_match: F, compare_hashes: bool) -> Option<(&'a K, &'a V)>
where F: FnMut(&K) -> bool
{
if unsafe { unlikely(self.map.table.size() == 0) } {
return None;
}
match search_hashed_nonempty(&self.map.table,
SafeHash::new(hash),
is_match,
compare_hashes) {
InternalEntry::Occupied { elem } => Some(elem.into_refs()),
InternalEntry::Vacant { .. } => None,
InternalEntry::TableIsEmpty => unreachable!(),
}
}
/// Access an entry by hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn from_hash<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
where F: FnMut(&K) -> bool
{
self.search(hash, is_match, true)
}
/// Search possible locations for an element with hash `hash` until `is_match` returns true for
/// one of them. There is no guarantee that all keys passed to `is_match` will have the provided
/// hash.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn search_bucket<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
where F: FnMut(&K) -> bool
{
self.search(hash, is_match, false)
}
}
impl<'a, K, V, S> RawEntryMut<'a, K, V, S> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// mutable references to the key and value in the entry.
///
/// # Examples
///
/// ```
/// #![feature(hash_raw_entry)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V)
where K: Hash,
S: BuildHasher,
{
match self {
RawEntryMut::Occupied(entry) => entry.into_key_value(),
RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns mutable references to the key and value in the entry.
///
/// # Examples
///
/// ```
/// #![feature(hash_raw_entry)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
///
/// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| {
/// ("poneyland", "hoho".to_string())
/// });
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn or_insert_with<F>(self, default: F) -> (&'a mut K, &'a mut V)
where F: FnOnce() -> (K, V),
K: Hash,
S: BuildHasher,
{
match self {
RawEntryMut::Occupied(entry) => entry.into_key_value(),
RawEntryMut::Vacant(entry) => {
let (k, v) = default();
entry.insert(k, v)
}
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// #![feature(hash_raw_entry)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.raw_entry_mut()
/// .from_key("poneyland")
/// .and_modify(|_k, v| { *v += 1 })
/// .or_insert("poneyland", 42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.raw_entry_mut()
/// .from_key("poneyland")
/// .and_modify(|_k, v| { *v += 1 })
/// .or_insert("poneyland", 0);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn and_modify<F>(self, f: F) -> Self
where F: FnOnce(&mut K, &mut V)
{
match self {
RawEntryMut::Occupied(mut entry) => {
{
let (k, v) = entry.get_key_value_mut();
f(k, v);
}
RawEntryMut::Occupied(entry)
},
RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry),
}
}
}
impl<'a, K, V> RawOccupiedEntryMut<'a, K, V> {
/// Gets a reference to the key in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn key(&self) -> &K {
self.elem.read().0
}
/// Gets a mutable reference to the key in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn key_mut(&mut self) -> &mut K {
self.elem.read_mut().0
}
/// Converts the entry into a mutable reference to the key in the entry
/// with a lifetime bound to the map itself.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn into_key(self) -> &'a mut K {
self.elem.into_mut_refs().0
}
/// Gets a reference to the value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get(&self) -> &V {
self.elem.read().1
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn into_mut(self) -> &'a mut V {
self.elem.into_mut_refs().1
}
/// Gets a mutable reference to the value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get_mut(&mut self) -> &mut V {
self.elem.read_mut().1
}
/// Gets a reference to the key and value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get_key_value(&mut self) -> (&K, &V) {
self.elem.read()
}
/// Gets a mutable reference to the key and value in the entry.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) {
self.elem.read_mut()
}
/// Converts the OccupiedEntry into a mutable reference to the key and value in the entry
/// with a lifetime bound to the map itself.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn into_key_value(self) -> (&'a mut K, &'a mut V) {
self.elem.into_mut_refs()
}
/// Sets the value of the entry, and returns the entry's old value.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert(&mut self, value: V) -> V {
mem::replace(self.get_mut(), value)
}
/// Sets the value of the entry, and returns the entry's old value.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert_key(&mut self, key: K) -> K {
mem::replace(self.key_mut(), key)
}
/// Takes the value out of the entry, and returns it.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn remove(self) -> V {
pop_internal(self.elem).1
}
/// Take the ownership of the key and value from the map.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn remove_entry(self) -> (K, V) {
let (k, v, _) = pop_internal(self.elem);
(k, v)
}
}
impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> {
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V)
where K: Hash,
S: BuildHasher,
{
let mut hasher = self.hash_builder.build_hasher();
key.hash(&mut hasher);
self.insert_hashed_nocheck(hasher.finish(), key, value)
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
#[inline]
#[unstable(feature = "hash_raw_entry", issue = "56167")]
pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) {
let hash = SafeHash::new(hash);
let b = match self.elem {
NeqElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
robin_hood(bucket, disp, hash, key, value)
},
NoElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
bucket.put(hash, key, value)
},
};
b.into_mut_refs()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K, V, S> Debug for RawEntryBuilderMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawEntryBuilder")
.finish()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K: Debug, V: Debug, S> Debug for RawEntryMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
RawEntryMut::Vacant(ref v) => {
f.debug_tuple("RawEntry")
.field(v)
.finish()
}
RawEntryMut::Occupied(ref o) => {
f.debug_tuple("RawEntry")
.field(o)
.finish()
}
}
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K: Debug, V: Debug> Debug for RawOccupiedEntryMut<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawOccupiedEntryMut")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K, V, S> Debug for RawVacantEntryMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawVacantEntryMut")
.finish()
}
}
#[unstable(feature = "hash_raw_entry", issue = "56167")]
impl<'a, K, V, S> Debug for RawEntryBuilder<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RawEntryBuilder")
.finish()
}
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`entry`]: struct.HashMap.html#method.entry
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Entry<'a, K: 'a, V: 'a> {
/// An occupied entry.
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(#[stable(feature = "rust1", since = "1.0.0")]
OccupiedEntry<'a, K, V>),
/// A vacant entry.
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(#[stable(feature = "rust1", since = "1.0.0")]
VacantEntry<'a, K, V>),
}
#[stable(feature= "debug_hash_map", since = "1.12.0")]
impl<'a, K: 'a + Debug, V: 'a + Debug> Debug for Entry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Vacant(ref v) => {
f.debug_tuple("Entry")
.field(v)
.finish()
}
Occupied(ref o) => {
f.debug_tuple("Entry")
.field(o)
.finish()
}
}
}
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
key: Option<K>,
elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
}
#[stable(feature= "debug_hash_map", since = "1.12.0")]
impl<'a, K: 'a + Debug, V: 'a + Debug> Debug for OccupiedEntry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<'a, K: 'a, V: 'a> {
hash: SafeHash,
key: K,
elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
}
#[stable(feature= "debug_hash_map", since = "1.12.0")]
impl<'a, K: 'a + Debug, V: 'a> Debug for VacantEntry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VacantEntry")
.field(self.key())
.finish()
}
}
/// Possible states of a VacantEntry.
enum VacantEntryState<K, V, M> {
/// The index is occupied, but the key to insert has precedence,
/// and will kick the current one out on insertion.
NeqElem(FullBucket<K, V, M>, usize),
/// The index is genuinely vacant.
NoElem(EmptyBucket<K, V, M>, usize),
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V, S> IntoIterator for &'a HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V, S> IntoIterator for &'a mut HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(self) -> IterMut<'a, K, V> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> IntoIterator for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
/// Creates a consuming iterator, that is, one that moves each key-value
/// pair out of the map in arbitrary order. The map cannot be used after
/// calling this.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Not possible with .iter()
/// let vec: Vec<(&str, i32)> = map.into_iter().collect();
/// ```
fn into_iter(self) -> IntoIter<K, V> {
IntoIter { inner: self.table.into_iter() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
#[inline]
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Iter<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[inline]
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for IterMut<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter())
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
self.inner.next().map(|(_, k, v)| (k, v))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<K, V> FusedIterator for IntoIter<K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<K: Debug, V: Debug> fmt::Debug for IntoIter<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter())
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<(&'a K)> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Keys<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<(&'a V)> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Values<'a, K, V> {}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
type Item = &'a mut V;
#[inline]
fn next(&mut self) -> Option<(&'a mut V)> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for ValuesMut<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.inner.iter())
.finish()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, K, V> Iterator for Drain<'a, K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
self.inner.next().map(|(_, k, v)| (k, v))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, K, V> FusedIterator for Drain<'a, K, V> {}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for Drain<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter())
.finish()
}
}
impl<'a, K, V> Entry<'a, K, V> {
#[stable(feature = "rust1", since = "1.0.0")]
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.entry("poneyland").or_insert(3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.entry("poneyland").or_insert(10) *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
#[stable(feature = "rust1", since = "1.0.0")]
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
/// let s = "hoho".to_string();
///
/// map.entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[stable(feature = "entry_and_modify", since = "1.26.0")]
pub fn and_modify<F>(self, f: F) -> Self
where F: FnOnce(&mut V)
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
},
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K, V: Default> Entry<'a, K, V> {
#[stable(feature = "entry_or_default", since = "1.28.0")]
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// # fn main() {
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
/// map.entry("poneyland").or_default();
///
/// assert_eq!(map["poneyland"], None);
/// # }
/// ```
pub fn or_default(self) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K, V> OccupiedEntry<'a, K, V> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
self.elem.read().0
}
/// Take the ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn remove_entry(self) -> (K, V) {
let (k, v, _) = pop_internal(self.elem);
(k, v)
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self) -> &V {
self.elem.read().1
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `OccupiedEntry` which may outlive the
/// destruction of the `Entry` value, see [`into_mut`].
///
/// [`into_mut`]: #method.into_mut
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same Entry multiple times.
/// *o.get_mut() += 2;
/// }
///
/// assert_eq!(map["poneyland"], 24);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut V {
self.elem.read_mut().1
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
///
/// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// *o.into_mut() += 10;
/// }
///
/// assert_eq!(map["poneyland"], 22);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_mut(self) -> &'a mut V {
self.elem.into_mut_refs().1
}
/// Sets the value of the entry, and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
///
/// assert_eq!(map["poneyland"], 15);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, mut value: V) -> V {
let old_value = self.get_mut();
mem::swap(&mut value, old_value);
value
}
/// Takes the value out of the entry, and returns it.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(self) -> V {
pop_internal(self.elem).1
}
/// Returns a key that was used for search.
///
/// The key was retained for further use.
fn take_key(&mut self) -> Option<K> {
self.key.take()
}
/// Replaces the entry, returning the old key and value. The new key in the hash map will be
/// the key used to create this entry.
///
/// # Examples
///
/// ```
/// #![feature(map_entry_replace)]
/// use std::collections::hash_map::{Entry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// map.insert(Rc::new("Stringthing".to_string()), 15);
///
/// let my_key = Rc::new("Stringthing".to_string());
///
/// if let Entry::Occupied(entry) = map.entry(my_key) {
/// // Also replace the key with a handle to our other key.
/// let (old_key, old_value): (Rc<String>, u32) = entry.replace_entry(16);
/// }
///
/// ```
#[unstable(feature = "map_entry_replace", issue = "44286")]
pub fn replace_entry(mut self, value: V) -> (K, V) {
let (old_key, old_value) = self.elem.read_mut();
let old_key = mem::replace(old_key, self.key.unwrap());
let old_value = mem::replace(old_value, value);
(old_key, old_value)
}
/// Replaces the key in the hash map with the key used to create this entry.
///
/// # Examples
///
/// ```
/// #![feature(map_entry_replace)]
/// use std::collections::hash_map::{Entry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// let mut known_strings: Vec<Rc<String>> = Vec::new();
///
/// // Initialise known strings, run program, etc.
///
/// reclaim_memory(&mut map, &known_strings);
///
/// fn reclaim_memory(map: &mut HashMap<Rc<String>, u32>, known_strings: &[Rc<String>] ) {
/// for s in known_strings {
/// if let Entry::Occupied(entry) = map.entry(s.clone()) {
/// // Replaces the entry's key with our version of it in `known_strings`.
/// entry.replace_key();
/// }
/// }
/// }
/// ```
#[unstable(feature = "map_entry_replace", issue = "44286")]
pub fn replace_key(mut self) -> K {
let (old_key, _) = self.elem.read_mut();
mem::replace(old_key, self.key.unwrap())
}
}
impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `VacantEntry`.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let Entry::Vacant(v) = map.entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::Entry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let Entry::Vacant(o) = map.entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(self, value: V) -> &'a mut V {
let b = match self.elem {
NeqElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
robin_hood(bucket, disp, self.hash, self.key, value)
},
NoElem(mut bucket, disp) => {
if disp >= DISPLACEMENT_THRESHOLD {
bucket.table_mut().set_tag(true);
}
bucket.put(self.hash, self.key, value)
},
};
b.into_mut_refs().1
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> FromIterator<(K, V)> for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher + Default
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> HashMap<K, V, S> {
let mut map = HashMap::with_hasher(Default::default());
map.extend(iter);
map
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> Extend<(K, V)> for HashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
// Keys may be already present or show multiple times in the iterator.
// Reserve the entire hint lower bound if the map is empty.
// Otherwise reserve half the hint (rounded up), so the map
// will only resize twice in the worst case.
let iter = iter.into_iter();
let reserve = if self.is_empty() {
iter.size_hint().0
} else {
(iter.size_hint().0 + 1) / 2
};
self.reserve(reserve);
for (k, v) in iter {
self.insert(k, v);
}
}
}
#[stable(feature = "hash_extend_copy", since = "1.4.0")]
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for HashMap<K, V, S>
where K: Eq + Hash + Copy,
V: Copy,
S: BuildHasher
{
fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
}
}
/// `RandomState` is the default state for [`HashMap`] types.
///
/// A particular instance `RandomState` will create the same instances of
/// [`Hasher`], but the hashers created by two different `RandomState`
/// instances are unlikely to produce the same result for the same values.
///
/// [`HashMap`]: struct.HashMap.html
/// [`Hasher`]: ../../hash/trait.Hasher.html
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
/// map.insert(1, 2);
/// ```
#[derive(Clone)]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub struct RandomState {
k0: u64,
k1: u64,
}
impl RandomState {
/// Constructs a new `RandomState` that is initialized with random keys.
///
/// # Examples
///
/// ```
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// ```
#[inline]
#[allow(deprecated)]
// rand
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn new() -> RandomState {
// Historically this function did not cache keys from the OS and instead
// simply always called `rand::thread_rng().gen()` twice. In #31356 it
// was discovered, however, that because we re-seed the thread-local RNG
// from the OS periodically that this can cause excessive slowdown when
// many hash maps are created on a thread. To solve this performance
// trap we cache the first set of randomly generated keys per-thread.
//
// Later in #36481 it was discovered that exposing a deterministic
// iteration order allows a form of DOS attack. To counter that we
// increment one of the seeds on every RandomState creation, giving
// every corresponding HashMap a different iteration order.
thread_local!(static KEYS: Cell<(u64, u64)> = {
Cell::new(sys::hashmap_random_keys())
});
KEYS.with(|keys| {
let (k0, k1) = keys.get();
keys.set((k0.wrapping_add(1), k1));
RandomState { k0: k0, k1: k1 }
})
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl BuildHasher for RandomState {
type Hasher = DefaultHasher;
#[inline]
#[allow(deprecated)]
fn build_hasher(&self) -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
}
}
/// The default [`Hasher`] used by [`RandomState`].
///
/// The internal algorithm is not specified, and so it and its hashes should
/// not be relied upon over releases.
///
/// [`RandomState`]: struct.RandomState.html
/// [`Hasher`]: ../../hash/trait.Hasher.html
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[allow(deprecated)]
#[derive(Clone, Debug)]
pub struct DefaultHasher(SipHasher13);
impl DefaultHasher {
/// Creates a new `DefaultHasher`.
///
/// This hasher is not guaranteed to be the same as all other
/// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
/// instances created through `new` or `default`.
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[allow(deprecated)]
pub fn new() -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(0, 0))
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Default for DefaultHasher {
/// Creates a new `DefaultHasher` using [`new`][DefaultHasher::new].
/// See its documentation for more.
fn default() -> DefaultHasher {
DefaultHasher::new()
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Hasher for DefaultHasher {
#[inline]
fn write(&mut self, msg: &[u8]) {
self.0.write(msg)
}
#[inline]
fn finish(&self) -> u64 {
self.0.finish()
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl Default for RandomState {
/// Constructs a new `RandomState`.
#[inline]
fn default() -> RandomState {
RandomState::new()
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for RandomState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("RandomState { .. }")
}
}
impl<K, S, Q: ?Sized> super::Recover<Q> for HashMap<K, (), S>
where K: Eq + Hash + Borrow<Q>,
S: BuildHasher,
Q: Eq + Hash
{
type Key = K;
#[inline]
fn get(&self, key: &Q) -> Option<&K> {
self.search(key).map(|bucket| bucket.into_refs().0)
}
fn take(&mut self, key: &Q) -> Option<K> {
self.search_mut(key).map(|bucket| pop_internal(bucket).0)
}
#[inline]
fn replace(&mut self, key: K) -> Option<K> {
self.reserve(1);
match self.entry(key) {
Occupied(mut occupied) => {
let key = occupied.take_key().unwrap();
Some(mem::replace(occupied.elem.read_mut().0, key))
}
Vacant(vacant) => {
vacant.insert(());
None
}
}
}
}
#[allow(dead_code)]
fn assert_covariance() {
fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> {
v
}
fn map_val<'new>(v: HashMap<u8, &'static str>) -> HashMap<u8, &'new str> {
v
}
fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> {
v
}
fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> {
v
}
fn into_iter_key<'new>(v: IntoIter<&'static str, u8>) -> IntoIter<&'new str, u8> {
v
}
fn into_iter_val<'new>(v: IntoIter<u8, &'static str>) -> IntoIter<u8, &'new str> {
v
}
fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> {
v
}
fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> {
v
}
fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> {
v
}
fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> {
v
}
fn drain<'new>(d: Drain<'static, &'static str, &'static str>)
-> Drain<'new, &'new str, &'new str> {
d
}
}
#[cfg(test)]
mod test_map {
use super::HashMap;
use super::Entry::{Occupied, Vacant};
use super::RandomState;
use cell::RefCell;
use rand::{thread_rng, Rng};
use realstd::collections::CollectionAllocErr::*;
use realstd::mem::size_of;
use realstd::usize;
#[test]
fn test_zero_capacities() {
type HM = HashMap<i32, i32>;
let m = HM::new();
assert_eq!(m.capacity(), 0);
let m = HM::default();
assert_eq!(m.capacity(), 0);
let m = HM::with_hasher(RandomState::new());
assert_eq!(m.capacity(), 0);
let m = HM::with_capacity(0);
assert_eq!(m.capacity(), 0);
let m = HM::with_capacity_and_hasher(0, RandomState::new());
assert_eq!(m.capacity(), 0);
let mut m = HM::new();
m.insert(1, 1);
m.insert(2, 2);
m.remove(&1);
m.remove(&2);
m.shrink_to_fit();
assert_eq!(m.capacity(), 0);
let mut m = HM::new();
m.reserve(0);
assert_eq!(m.capacity(), 0);
}
#[test]
fn test_create_capacity_zero() {
let mut m = HashMap::with_capacity(0);
assert!(m.insert(1, 1).is_none());
assert!(m.contains_key(&1));
assert!(!m.contains_key(&0));
}
#[test]
fn test_insert() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert!(m.insert(1, 2).is_none());
assert_eq!(m.len(), 1);
assert!(m.insert(2, 4).is_none());
assert_eq!(m.len(), 2);
assert_eq!(*m.get(&1).unwrap(), 2);
assert_eq!(*m.get(&2).unwrap(), 4);
}
#[test]
fn test_clone() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert!(m.insert(1, 2).is_none());
assert_eq!(m.len(), 1);
assert!(m.insert(2, 4).is_none());
assert_eq!(m.len(), 2);
let m2 = m.clone();
assert_eq!(*m2.get(&1).unwrap(), 2);
assert_eq!(*m2.get(&2).unwrap(), 4);
assert_eq!(m2.len(), 2);
}
thread_local! { static DROP_VECTOR: RefCell<Vec<i32>> = RefCell::new(Vec::new()) }
#[derive(Hash, PartialEq, Eq)]
struct Droppable {
k: usize,
}
impl Droppable {
fn new(k: usize) -> Droppable {
DROP_VECTOR.with(|slot| {
slot.borrow_mut()[k] += 1;
});
Droppable { k }
}
}
impl Drop for Droppable {
fn drop(&mut self) {
DROP_VECTOR.with(|slot| {
slot.borrow_mut()[self.k] -= 1;
});
}
}
impl Clone for Droppable {
fn clone(&self) -> Droppable {
Droppable::new(self.k)
}
}
#[test]
fn test_drops() {
DROP_VECTOR.with(|slot| {
*slot.borrow_mut() = vec![0; 200];
});
{
let mut m = HashMap::new();
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
for i in 0..100 {
let d1 = Droppable::new(i);
let d2 = Droppable::new(i + 100);
m.insert(d1, d2);
}
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 1);
}
});
for i in 0..50 {
let k = Droppable::new(i);
let v = m.remove(&k);
assert!(v.is_some());
DROP_VECTOR.with(|v| {
assert_eq!(v.borrow()[i], 1);
assert_eq!(v.borrow()[i+100], 1);
});
}
DROP_VECTOR.with(|v| {
for i in 0..50 {
assert_eq!(v.borrow()[i], 0);
assert_eq!(v.borrow()[i+100], 0);
}
for i in 50..100 {
assert_eq!(v.borrow()[i], 1);
assert_eq!(v.borrow()[i+100], 1);
}
});
}
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
}
#[test]
fn test_into_iter_drops() {
DROP_VECTOR.with(|v| {
*v.borrow_mut() = vec![0; 200];
});
let hm = {
let mut hm = HashMap::new();
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
for i in 0..100 {
let d1 = Droppable::new(i);
let d2 = Droppable::new(i + 100);
hm.insert(d1, d2);
}
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 1);
}
});
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
{
let mut half = hm.into_iter().take(50);
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 1);
}
});
for _ in half.by_ref() {}
DROP_VECTOR.with(|v| {
let nk = (0..100)
.filter(|&i| v.borrow()[i] == 1)
.count();
let nv = (0..100)
.filter(|&i| v.borrow()[i + 100] == 1)
.count();
assert_eq!(nk, 50);
assert_eq!(nv, 50);
});
};
DROP_VECTOR.with(|v| {
for i in 0..200 {
assert_eq!(v.borrow()[i], 0);
}
});
}
#[test]
fn test_empty_remove() {
let mut m: HashMap<i32, bool> = HashMap::new();
assert_eq!(m.remove(&0), None);
}
#[test]
fn test_empty_entry() {
let mut m: HashMap<i32, bool> = HashMap::new();
match m.entry(0) {
Occupied(_) => panic!(),
Vacant(_) => {}
}
assert!(*m.entry(0).or_insert(true));
assert_eq!(m.len(), 1);
}
#[test]
fn test_empty_iter() {
let mut m: HashMap<i32, bool> = HashMap::new();
assert_eq!(m.drain().next(), None);
assert_eq!(m.keys().next(), None);
assert_eq!(m.values().next(), None);
assert_eq!(m.values_mut().next(), None);
assert_eq!(m.iter().next(), None);
assert_eq!(m.iter_mut().next(), None);
assert_eq!(m.len(), 0);
assert!(m.is_empty());
assert_eq!(m.into_iter().next(), None);
}
#[test]
fn test_lots_of_insertions() {
let mut m = HashMap::new();
// Try this a few times to make sure we never screw up the hashmap's
// internal state.
for _ in 0..10 {
assert!(m.is_empty());
for i in 1..1001 {
assert!(m.insert(i, i).is_none());
for j in 1..=i {
let r = m.get(&j);
assert_eq!(r, Some(&j));
}
for j in i + 1..1001 {
let r = m.get(&j);
assert_eq!(r, None);
}
}
for i in 1001..2001 {
assert!(!m.contains_key(&i));
}
// remove forwards
for i in 1..1001 {
assert!(m.remove(&i).is_some());
for j in 1..=i {
assert!(!m.contains_key(&j));
}
for j in i + 1..1001 {
assert!(m.contains_key(&j));
}
}
for i in 1..1001 {
assert!(!m.contains_key(&i));
}
for i in 1..1001 {
assert!(m.insert(i, i).is_none());
}
// remove backwards
for i in (1..1001).rev() {
assert!(m.remove(&i).is_some());
for j in i..1001 {
assert!(!m.contains_key(&j));
}
for j in 1..i {
assert!(m.contains_key(&j));
}
}
}
}
#[test]
fn test_find_mut() {
let mut m = HashMap::new();
assert!(m.insert(1, 12).is_none());
assert!(m.insert(2, 8).is_none());
assert!(m.insert(5, 14).is_none());
let new = 100;
match m.get_mut(&5) {
None => panic!(),
Some(x) => *x = new,
}
assert_eq!(m.get(&5), Some(&new));
}
#[test]
fn test_insert_overwrite() {
let mut m = HashMap::new();
assert!(m.insert(1, 2).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert!(!m.insert(1, 3).is_none());
assert_eq!(*m.get(&1).unwrap(), 3);
}
#[test]
fn test_insert_conflicts() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
assert!(m.insert(5, 3).is_none());
assert!(m.insert(9, 4).is_none());
assert_eq!(*m.get(&9).unwrap(), 4);
assert_eq!(*m.get(&5).unwrap(), 3);
assert_eq!(*m.get(&1).unwrap(), 2);
}
#[test]
fn test_conflict_remove() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert!(m.insert(5, 3).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert_eq!(*m.get(&5).unwrap(), 3);
assert!(m.insert(9, 4).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert_eq!(*m.get(&5).unwrap(), 3);
assert_eq!(*m.get(&9).unwrap(), 4);
assert!(m.remove(&1).is_some());
assert_eq!(*m.get(&9).unwrap(), 4);
assert_eq!(*m.get(&5).unwrap(), 3);
}
#[test]
fn test_is_empty() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
assert!(!m.is_empty());
assert!(m.remove(&1).is_some());
assert!(m.is_empty());
}
#[test]
fn test_remove() {
let mut m = HashMap::new();
m.insert(1, 2);
assert_eq!(m.remove(&1), Some(2));
assert_eq!(m.remove(&1), None);
}
#[test]
fn test_remove_entry() {
let mut m = HashMap::new();
m.insert(1, 2);
assert_eq!(m.remove_entry(&1), Some((1, 2)));
assert_eq!(m.remove(&1), None);
}
#[test]
fn test_iterate() {
let mut m = HashMap::with_capacity(4);
for i in 0..32 {
assert!(m.insert(i, i*2).is_none());
}
assert_eq!(m.len(), 32);
let mut observed: u32 = 0;
for (k, v) in &m {
assert_eq!(*v, *k * 2);
observed |= 1 << *k;
}
assert_eq!(observed, 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_iter().collect();
let keys: Vec<_> = map.keys().cloned().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_iter().collect();
let values: Vec<_> = map.values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
#[test]
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_iter().collect();
for value in map.values_mut() {
*value = (*value) * 2
}
let values: Vec<_> = map.values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_find() {
let mut m = HashMap::new();
assert!(m.get(&1).is_none());
m.insert(1, 2);
match m.get(&1) {
None => panic!(),
Some(v) => assert_eq!(*v, 2),
}
}
#[test]
fn test_eq() {
let mut m1 = HashMap::new();
m1.insert(1, 2);
m1.insert(2, 3);
m1.insert(3, 4);
let mut m2 = HashMap::new();
m2.insert(1, 2);
m2.insert(2, 3);
assert!(m1 != m2);
m2.insert(3, 4);
assert_eq!(m1, m2);
}
#[test]
fn test_show() {
let mut map = HashMap::new();
let empty: HashMap<i32, i32> = HashMap::new();
map.insert(1, 2);
map.insert(3, 4);
let map_str = format!("{:?}", map);
assert!(map_str == "{1: 2, 3: 4}" ||
map_str == "{3: 4, 1: 2}");
assert_eq!(format!("{:?}", empty), "{}");
}
#[test]
fn test_expand() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert!(m.is_empty());
let mut i = 0;
let old_raw_cap = m.raw_capacity();
while old_raw_cap == m.raw_capacity() {
m.insert(i, i);
i += 1;
}
assert_eq!(m.len(), i);
assert!(!m.is_empty());
}
#[test]
fn test_behavior_resize_policy() {
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
assert_eq!(m.raw_capacity(), 0);
assert!(m.is_empty());
m.insert(0, 0);
m.remove(&0);
assert!(m.is_empty());
let initial_raw_cap = m.raw_capacity();
m.reserve(initial_raw_cap);
let raw_cap = m.raw_capacity();
assert_eq!(raw_cap, initial_raw_cap * 2);
let mut i = 0;
for _ in 0..raw_cap * 3 / 4 {
m.insert(i, i);
i += 1;
}
// three quarters full
assert_eq!(m.len(), i);
assert_eq!(m.raw_capacity(), raw_cap);
for _ in 0..raw_cap / 4 {
m.insert(i, i);
i += 1;
}
// half full
let new_raw_cap = m.raw_capacity();
assert_eq!(new_raw_cap, raw_cap * 2);
for _ in 0..raw_cap / 2 - 1 {
i -= 1;
m.remove(&i);
assert_eq!(m.raw_capacity(), new_raw_cap);
}
// A little more than one quarter full.
m.shrink_to_fit();
assert_eq!(m.raw_capacity(), raw_cap);
// again, a little more than half full
for _ in 0..raw_cap / 2 - 1 {
i -= 1;
m.remove(&i);
}
m.shrink_to_fit();
assert_eq!(m.len(), i);
assert!(!m.is_empty());
assert_eq!(m.raw_capacity(), initial_raw_cap);
}
#[test]
fn test_reserve_shrink_to_fit() {
let mut m = HashMap::new();
m.insert(0, 0);
m.remove(&0);
assert!(m.capacity() >= m.len());
for i in 0..128 {
m.insert(i, i);
}
m.reserve(256);
let usable_cap = m.capacity();
for i in 128..(128 + 256) {
m.insert(i, i);
assert_eq!(m.capacity(), usable_cap);
}
for i in 100..(128 + 256) {
assert_eq!(m.remove(&i), Some(i));
}
m.shrink_to_fit();
assert_eq!(m.len(), 100);
assert!(!m.is_empty());
assert!(m.capacity() >= m.len());
for i in 0..100 {
assert_eq!(m.remove(&i), Some(i));
}
m.shrink_to_fit();
m.insert(0, 0);
assert_eq!(m.len(), 1);
assert!(m.capacity() >= m.len());
assert_eq!(m.remove(&0), Some(0));
}
#[test]
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.iter().cloned().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
}
}
#[test]
fn test_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_iter_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.len(), 3);
}
#[test]
fn test_mut_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_iter_mut_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.len(), 3);
}
#[test]
fn test_index() {
let mut map = HashMap::new();
map.insert(1, 2);
map.insert(2, 1);
map.insert(3, 4);
assert_eq!(map[&2], 1);
}
#[test]
#[should_panic]
fn test_index_nonexistent() {
let mut map = HashMap::new();
map.insert(1, 2);
map.insert(2, 1);
map.insert(3, 4);
map[&4];
}
#[test]
fn test_entry() {
let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
// Existing key (insert)
match map.entry(1) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
assert_eq!(view.get(), &10);
assert_eq!(view.insert(100), 10);
}
}
assert_eq!(map.get(&1).unwrap(), &100);
assert_eq!(map.len(), 6);
// Existing key (update)
match map.entry(2) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
let v = view.get_mut();
let new_v = (*v) * 10;
*v = new_v;
}
}
assert_eq!(map.get(&2).unwrap(), &200);
assert_eq!(map.len(), 6);
// Existing key (take)
match map.entry(3) {
Vacant(_) => unreachable!(),
Occupied(view) => {
assert_eq!(view.remove(), 30);
}
}
assert_eq!(map.get(&3), None);
assert_eq!(map.len(), 5);
// Inexistent key (insert)
match map.entry(10) {
Occupied(_) => unreachable!(),
Vacant(view) => {
assert_eq!(*view.insert(1000), 1000);
}
}
assert_eq!(map.get(&10).unwrap(), &1000);
assert_eq!(map.len(), 6);
}
#[test]
fn test_entry_take_doesnt_corrupt() {
#![allow(deprecated)] //rand
// Test for #19292
fn check(m: &HashMap<i32, ()>) {
for k in m.keys() {
assert!(m.contains_key(k),
"{} is in keys() but not in the map?", k);
}
}
let mut m = HashMap::new();
let mut rng = thread_rng();
// Populate the map with some items.
for _ in 0..50 {
let x = rng.gen_range(-10, 10);
m.insert(x, ());
}
for _ in 0..1000 {
let x = rng.gen_range(-10, 10);
match m.entry(x) {
Vacant(_) => {}
Occupied(e) => {
e.remove();
}
}
check(&m);
}
}
#[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
let mut b = HashMap::new();
b.insert(2, "two");
b.insert(3, "three");
a.extend(&b);
assert_eq!(a.len(), 3);
assert_eq!(a[&1], "one");
assert_eq!(a[&2], "two");
assert_eq!(a[&3], "three");
}
#[test]
fn test_capacity_not_less_than_len() {
let mut a = HashMap::new();
let mut item = 0;
for _ in 0..116 {
a.insert(item, 0);
item += 1;
}
assert!(a.capacity() > a.len());
let free = a.capacity() - a.len();
for _ in 0..free {
a.insert(item, 0);
item += 1;
}
assert_eq!(a.len(), a.capacity());
// Insert at capacity should cause allocation.
a.insert(item, 0);
assert!(a.capacity() > a.len());
}
#[test]
fn test_occupied_entry_key() {
let mut a = HashMap::new();
let key = "hello there";
let value = "value goes here";
assert!(a.is_empty());
a.insert(key.clone(), value.clone());
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
match a.entry(key.clone()) {
Vacant(_) => panic!(),
Occupied(e) => assert_eq!(key, *e.key()),
}
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
}
#[test]
fn test_vacant_entry_key() {
let mut a = HashMap::new();
let key = "hello there";
let value = "value goes here";
assert!(a.is_empty());
match a.entry(key.clone()) {
Occupied(_) => panic!(),
Vacant(e) => {
assert_eq!(key, *e.key());
e.insert(value.clone());
}
}
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
}
#[test]
fn test_retain() {
let mut map: HashMap<i32, i32> = (0..100).map(|x|(x, x*10)).collect();
map.retain(|&k, _| k % 2 == 0);
assert_eq!(map.len(), 50);
assert_eq!(map[&2], 20);
assert_eq!(map[&4], 40);
assert_eq!(map[&6], 60);
}
#[test]
fn test_adaptive() {
const TEST_LEN: usize = 5000;
// by cloning we get maps with the same hasher seed
let mut first = HashMap::new();
let mut second = first.clone();
first.extend((0..TEST_LEN).map(|i| (i, i)));
second.extend((TEST_LEN..TEST_LEN * 2).map(|i| (i, i)));
for (&k, &v) in &second {
let prev_cap = first.capacity();
let expect_grow = first.len() == prev_cap;
first.insert(k, v);
if !expect_grow && first.capacity() != prev_cap {
return;
}
}
panic!("Adaptive early resize failed");
}
#[test]
fn test_try_reserve() {
let mut empty_bytes: HashMap<u8,u8> = HashMap::new();
const MAX_USIZE: usize = usize::MAX;
// HashMap and RawTables use complicated size calculations
// hashes_size is sizeof(HashUint) * capacity;
// pairs_size is sizeof((K. V)) * capacity;
// alignment_hashes_size is 8
// alignment_pairs size is 4
let size_of_multiplier = (size_of::<usize>() + size_of::<(u8, u8)>()).next_power_of_two();
// The following formula is used to calculate the new capacity
let max_no_ovf = ((MAX_USIZE / 11) * 10) / size_of_multiplier - 1;
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
} else { panic!("usize::MAX should trigger an overflow!"); }
if size_of::<usize>() < 8 {
if let Err(CapacityOverflow) = empty_bytes.try_reserve(max_no_ovf) {
} else { panic!("isize::MAX + 1 should trigger a CapacityOverflow!") }
} else {
if let Err(AllocErr) = empty_bytes.try_reserve(max_no_ovf) {
} else { panic!("isize::MAX + 1 should trigger an OOM!") }
}
}
#[test]
fn test_raw_entry() {
use super::RawEntryMut::{Occupied, Vacant};
let xs = [(1i32, 10i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
use core::hash::{BuildHasher, Hash, Hasher};
let mut hasher = map.hasher().build_hasher();
k.hash(&mut hasher);
hasher.finish()
};
// Existing key (insert)
match map.raw_entry_mut().from_key(&1) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
assert_eq!(view.get(), &10);
assert_eq!(view.insert(100), 10);
}
}
let hash1 = compute_hash(&map, 1);
assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100));
assert_eq!(map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), (&1, &100));
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), (&1, &100));
assert_eq!(map.raw_entry().search_bucket(hash1, |k| *k == 1).unwrap(), (&1, &100));
assert_eq!(map.len(), 6);
// Existing key (update)
match map.raw_entry_mut().from_key(&2) {
Vacant(_) => unreachable!(),
Occupied(mut view) => {
let v = view.get_mut();
let new_v = (*v) * 10;
*v = new_v;
}
}
let hash2 = compute_hash(&map, 2);
assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200));
assert_eq!(map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), (&2, &200));
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), (&2, &200));
assert_eq!(map.raw_entry().search_bucket(hash2, |k| *k == 2).unwrap(), (&2, &200));
assert_eq!(map.len(), 6);
// Existing key (take)
let hash3 = compute_hash(&map, 3);
match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) {
Vacant(_) => unreachable!(),
Occupied(view) => {
assert_eq!(view.remove_entry(), (3, 30));
}
}
assert_eq!(map.raw_entry().from_key(&3), None);
assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None);
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None);
assert_eq!(map.raw_entry().search_bucket(hash3, |k| *k == 3), None);
assert_eq!(map.len(), 5);
// Nonexistent key (insert)
match map.raw_entry_mut().from_key(&10) {
Occupied(_) => unreachable!(),
Vacant(view) => {
assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000));
}
}
assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000));
assert_eq!(map.len(), 6);
// Ensure all lookup methods produce equivalent results.
for k in 0..12 {
let hash = compute_hash(&map, k);
let v = map.get(&k).cloned();
let kv = v.as_ref().map(|v| (&k, v));
assert_eq!(map.raw_entry().from_key(&k), kv);
assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv);
assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv);
assert_eq!(map.raw_entry().search_bucket(hash, |q| *q == k), kv);
match map.raw_entry_mut().from_key(&k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
match map.raw_entry_mut().from_hash(hash, |q| *q == k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
match map.raw_entry_mut().search_bucket(hash, |q| *q == k) {
Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
Vacant(_) => assert_eq!(v, None),
}
}
}
}
|
use std::ops::Bound;
use super::*;
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Node {
pub(crate) data: Data,
pub(crate) next: Option<PageId>,
pub(crate) lo: IVec,
pub(crate) hi: IVec,
pub(crate) merging_child: Option<PageId>,
pub(crate) merging: bool,
prefix_len: u8,
}
impl Node {
fn prefix_decode(&self, key: &[u8]) -> IVec {
prefix::decode(self.prefix(), key)
}
fn prefix_encode<'a>(&self, key: &'a [u8]) -> &'a [u8] {
assert!(*self.lo <= *key);
if !self.hi.is_empty() {
assert!(*self.hi > *key);
}
&key[self.prefix_len as usize..]
}
fn prefix(&self) -> &[u8] {
&self.lo[..self.prefix_len as usize]
}
pub(crate) fn apply(&mut self, frag: &Frag) {
use self::Frag::*;
assert!(
!self.merging,
"somehow a frag was applied to a node after it was merged"
);
match *frag {
Set(ref k, ref v) => {
self.set_leaf(k.clone(), v.clone());
}
Del(ref k) => {
self.del_leaf(k);
}
Base(_) => panic!("trying to apply a Base to frag {:?}", self),
ParentMergeIntention(pid) => {
assert!(
self.merging_child.is_none(),
"trying to merge {:?} into node {:?} which \
is already merging another child",
frag,
self
);
self.merging_child = Some(pid);
}
ParentMergeConfirm => {
assert!(self.merging_child.is_some());
let merged_child = self.merging_child.take().expect(
"we should have a specific \
child that was merged if this \
frag appears here",
);
self.data.parent_merge_confirm(merged_child);
}
ChildMergeCap => {
self.merging = true;
}
}
}
pub(crate) fn set_leaf(&mut self, key: IVec, val: IVec) {
if let Data::Leaf(ref mut records) = self.data {
let search = records.binary_search_by_key(&&key, |(k, _)| k);
match search {
Ok(idx) => records[idx] = (key, val),
Err(idx) => records.insert(idx, (key, val)),
}
} else {
panic!("tried to Set a value to an index");
}
}
pub(crate) fn del_leaf(&mut self, key: &IVec) {
if let Data::Leaf(ref mut records) = self.data {
let search = records.binary_search_by_key(&key, |(k, _)| k);
if let Ok(idx) = search {
records.remove(idx);
}
} else {
panic!("tried to attach a Del to an Index chain");
}
}
pub(crate) fn parent_split(&mut self, at: &[u8], to: PageId) -> bool {
if let Data::Index(ref mut pointers) = self.data {
let encoded_sep = &at[self.prefix_len as usize..];
match pointers.binary_search_by_key(&encoded_sep, |(k, _)| k) {
Ok(_) => {
debug!(
"parent_split skipped because \
parent already contains child at split point \
due to deep race"
);
return false;
}
Err(idx) => pointers.insert(idx, (IVec::from(encoded_sep), to)),
}
} else {
panic!("tried to attach a ParentSplit to a Leaf chain");
}
true
}
pub(crate) fn split(mut self) -> (Node, Node) {
fn split_inner<T>(
xs: &mut Vec<(IVec, T)>,
left_prefix: &[u8],
right_max: &[u8],
suffix_truncation: bool,
) -> (IVec, u8, Vec<(IVec, T)>)
where
T: Clone + Ord,
{
let right = xs.split_off(xs.len() / 2 + 1);
let right_min = &right[0].0;
let left_max = &xs.last().unwrap().0;
// When splitting, the prefix can only grow or stay the
// same size, because all keys already shared the same
// prefix before. Here we figure out if we can shave
// off additional bytes in the key.
let max_additional =
u8::max_value() - u8::try_from(left_prefix.len()).unwrap();
let right_additional_prefix_len = right_min
.iter()
.zip(right_max.iter())
.take_while(|(a, b)| a == b)
.take(max_additional as usize)
.count();
let necessary_split_len = if suffix_truncation {
// we can only perform suffix truncation when
// choosing the split points for leaf nodes.
// split points bubble up into indexes, but
// an important invariant is that for indexes
// the first item always matches the lo key,
// otherwise ranges would be permanently
// inaccessible by falling into the gap
// during a split.
let smallest_suffix = right_min
.iter()
.zip(left_max.iter())
.take_while(|(a, b)| a == b)
.count()
+ 1;
// we cannot suffix truncate the split point so
// aggressively that we cause prefix encoding
// to degrade
std::cmp::max(smallest_suffix, right_additional_prefix_len)
} else {
right_min.len()
};
let split_point: IVec =
prefix::decode(left_prefix, &right_min[..necessary_split_len]);
assert!(!split_point.is_empty());
let mut right_data = Vec::with_capacity(right.len());
for (k, v) in right {
let k: IVec = if right_additional_prefix_len > 0 {
// shave off additional prefixed bytes
IVec::from(&k[right_additional_prefix_len..])
} else {
k.clone()
};
right_data.push((k, v.clone()));
}
(
split_point,
u8::try_from(right_additional_prefix_len).unwrap(),
right_data,
)
}
let prefix = &self.lo[..self.prefix_len as usize];
let right_max = &self.hi[self.prefix_len as usize..];
let (split, right_additional_prefix_len, right_data) = match self.data {
Data::Index(ref mut pointers) => {
let (split, prefix_len, right) =
split_inner(pointers, prefix, right_max, false);
(split, prefix_len, Data::Index(right))
}
Data::Leaf(ref mut items) => {
let (split, prefix_len, right) =
split_inner(items, prefix, right_max, true);
(split, prefix_len, Data::Leaf(right))
}
};
let right = Node {
data: right_data,
next: self.next,
lo: split.clone(),
hi: self.hi.clone(),
merging_child: None,
merging: false,
prefix_len: self.prefix_len + right_additional_prefix_len,
};
self.hi = split;
// intentionally make this the end to make
// any issues pop out with setting it
// correctly after the split.
self.next = None;
if self.hi.is_empty() {
assert_eq!(self.prefix_len, 0);
}
assert!(!(self.lo.is_empty() && self.hi.is_empty()));
assert!(!(self.lo.is_empty() && (self.prefix_len > 0)));
assert!(self.lo.len() >= self.prefix_len as usize);
assert!(self.hi.len() >= self.prefix_len as usize);
assert!(!(right.lo.is_empty() && right.hi.is_empty()));
assert!(!(right.lo.is_empty() && (right.prefix_len > 0)));
assert!(right.lo.len() >= right.prefix_len as usize);
assert!(right.hi.len() >= right.prefix_len as usize);
(self, right)
}
pub(crate) fn receive_merge(&self, right: &Node) -> Node {
fn receive_merge_inner<T>(
old_prefix: &[u8],
new_prefix_len: usize,
left_data: &mut Vec<(IVec, T)>,
right_data: &[(IVec, T)],
) where
T: Clone,
{
// When merging, the prefix can only shrink or
// stay the same length. Here we figure out if
// we need to add previous prefixed bytes.
for (k, v) in right_data {
let k = if new_prefix_len == old_prefix.len() {
k.clone()
} else {
prefix::reencode(old_prefix, k, new_prefix_len)
};
left_data.push((k, v.clone()));
}
}
let mut merged = self.clone();
let new_prefix_len = right
.hi
.iter()
.zip(self.lo.iter())
.take_while(|(a, b)| a == b)
.take(u8::max_value() as usize)
.count();
match (&mut merged.data, &right.data) {
(
Data::Index(ref mut left_pointers),
Data::Index(ref right_pointers),
) => {
receive_merge_inner(
right.prefix(),
new_prefix_len,
left_pointers,
right_pointers.as_ref(),
);
}
(Data::Leaf(ref mut left_items), Data::Leaf(ref right_items)) => {
receive_merge_inner(
right.prefix(),
new_prefix_len,
left_items,
right_items.as_ref(),
);
}
_ => panic!("Can't merge incompatible Data!"),
}
merged.hi = right.hi.clone();
merged.next = right.next;
merged
}
pub(crate) fn contains_upper_bound(&self, bound: &Bound<IVec>) -> bool {
match bound {
Bound::Excluded(bound) if self.hi >= *bound => true,
Bound::Included(bound) if self.hi > *bound => true,
_ => self.hi.is_empty(),
}
}
pub(crate) fn contains_lower_bound(
&self,
bound: &Bound<IVec>,
is_forward: bool,
) -> bool {
match bound {
Bound::Excluded(bound)
if self.lo < *bound || (is_forward && *bound == self.lo) =>
{
true
}
Bound::Included(bound) if self.lo <= *bound => true,
Bound::Unbounded if !is_forward => self.hi.is_empty(),
_ => self.lo.is_empty(),
}
}
pub(crate) fn successor(
&self,
bound: &Bound<IVec>,
) -> Option<(IVec, IVec)> {
assert!(!self.data.is_index());
// This encoding happens this way because
// keys cannot be lower than the node's lo key.
let predecessor_key = match bound {
Bound::Unbounded => self.prefix_encode(&self.lo),
Bound::Included(b) | Bound::Excluded(b) => {
let max = std::cmp::max(b, &self.lo);
self.prefix_encode(max)
}
};
let records = self.data.leaf_ref().unwrap();
let search = records.binary_search_by_key(&predecessor_key, |(k, _)| k);
let idx = match search {
Ok(idx) => idx,
Err(idx) if idx < records.len() => idx,
_ => return None,
};
for (k, v) in &records[idx..] {
match bound {
Bound::Excluded(b) if b[self.prefix_len as usize..] == **k => {
// keep going because we wanted to exclude
// this key.
continue;
}
_ => {}
}
let decoded_key = self.prefix_decode(&k);
return Some((decoded_key, v.clone()));
}
None
}
pub(crate) fn predecessor(
&self,
bound: &Bound<IVec>,
) -> Option<(IVec, IVec)> {
static MAX_IVEC: Lazy<IVec, fn() -> IVec> = Lazy::new(init_max_ivec);
fn init_max_ivec() -> IVec {
let base = vec![255; 1024 * 1024];
IVec::from(base)
}
assert!(!self.data.is_index());
// This encoding happens this way because
// the rightmost (unbounded) node has
// a hi key represented by the empty slice
let successor_key = match bound {
Bound::Unbounded => {
if self.hi.is_empty() {
MAX_IVEC.clone()
} else {
IVec::from(self.prefix_encode(&self.hi))
}
}
Bound::Included(b) => {
let min = if self.hi.is_empty() {
b
} else {
std::cmp::min(b, &self.hi)
};
IVec::from(self.prefix_encode(min))
}
Bound::Excluded(b) => {
let min = if self.hi.is_empty() {
b
} else {
std::cmp::min(b, &self.hi)
};
let encoded = &min[self.prefix_len as usize..];
IVec::from(encoded)
}
};
let records = self.data.leaf_ref().unwrap();
let search = records.binary_search_by_key(&&successor_key, |(k, _)| k);
let idx = match search {
Ok(idx) => idx,
Err(idx) if idx > 0 => idx - 1,
_ => return None,
};
for (k, v) in records[0..=idx].iter().rev() {
match bound {
Bound::Excluded(b)
if b.len() >= self.prefix_len as usize
&& b[self.prefix_len as usize..] == **k =>
{
// keep going because we wanted to exclude
// this key.
continue;
}
_ => {}
}
let decoded_key = self.prefix_decode(&k);
return Some((decoded_key, v.clone()));
}
None
}
/// leaf_pair_for_key finds an existing value pair for a given key.
pub(crate) fn leaf_pair_for_key(
&self,
key: &[u8],
) -> Option<(&IVec, &IVec)> {
let records = self
.data
.leaf_ref()
.expect("leaf_pair_for_key called on index node");
let suffix = &key[self.prefix_len as usize..];
let search = records.binary_search_by_key(&suffix, |(k, _)| k).ok();
search.map(|idx| (&records[idx].0, &records[idx].1))
}
/// node_kv_pair returns either existing (node/key, value) pair or
/// (node/key, none) where a node/key is node level encoded key.
pub fn node_kv_pair(&self, key: &[u8]) -> (IVec, Option<IVec>) {
if let Some((k, v)) = self.leaf_pair_for_key(key.as_ref()) {
(k.clone(), Some(v.clone()))
} else {
let encoded_key = IVec::from(&key[self.prefix_len as usize..]);
let encoded_val = None;
(encoded_key, encoded_val)
}
}
pub(crate) fn should_split(&self) -> bool {
let threshold = if cfg!(any(test, feature = "lock_free_delays")) {
2
} else if self.data.is_index() {
256
} else {
16
};
let size_checks = self.data.len() > threshold;
let safety_checks = self.merging_child.is_none() && !self.merging;
size_checks && safety_checks
}
pub(crate) fn should_merge(&self) -> bool {
let threshold = if cfg!(any(test, feature = "lock_free_delays")) {
1
} else if self.data.is_index() {
64
} else {
4
};
let size_checks = self.data.len() < threshold;
let safety_checks = self.merging_child.is_none() && !self.merging;
size_checks && safety_checks
}
pub(crate) fn can_merge_child(&self) -> bool {
self.merging_child.is_none() && !self.merging
}
pub(crate) fn index_next_node(&self, key: &[u8]) -> (usize, PageId) {
let records =
self.data.index_ref().expect("index_next_node called on leaf");
let suffix = &key[self.prefix_len as usize..];
let search = binary_search_lub(suffix, records);
let index = search.expect("failed to traverse index");
(index, records[index].1)
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Data {
Index(Vec<(IVec, PageId)>),
Leaf(Vec<(IVec, IVec)>),
}
impl Default for Data {
fn default() -> Data {
Data::Leaf(vec![])
}
}
impl Data {
pub(crate) fn len(&self) -> usize {
match *self {
Data::Index(ref pointers) => pointers.len(),
Data::Leaf(ref items) => items.len(),
}
}
pub(crate) fn parent_merge_confirm(&mut self, merged_child_pid: PageId) {
match self {
Data::Index(ref mut pointers) => {
let idx = pointers
.iter()
.position(|(_k, c)| *c == merged_child_pid)
.unwrap();
let _ = pointers.remove(idx);
}
_ => panic!("parent_merge_confirm called on leaf data"),
}
}
pub(crate) fn leaf_ref(&self) -> Option<&Vec<(IVec, IVec)>> {
match *self {
Data::Index(_) => None,
Data::Leaf(ref items) => Some(items),
}
}
pub(crate) fn index_ref(&self) -> Option<&Vec<(IVec, PageId)>> {
match *self {
Data::Index(ref pointers) => Some(pointers),
Data::Leaf(_) => None,
}
}
pub(crate) fn is_index(&self) -> bool {
if let Data::Index(..) = self {
true
} else {
false
}
}
pub(crate) fn is_leaf(&self) -> bool {
if let Data::Leaf(..) = self {
true
} else {
false
}
}
}
Put more types in backticks in docs
use std::ops::Bound;
use super::*;
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Node {
pub(crate) data: Data,
pub(crate) next: Option<PageId>,
pub(crate) lo: IVec,
pub(crate) hi: IVec,
pub(crate) merging_child: Option<PageId>,
pub(crate) merging: bool,
prefix_len: u8,
}
impl Node {
fn prefix_decode(&self, key: &[u8]) -> IVec {
prefix::decode(self.prefix(), key)
}
fn prefix_encode<'a>(&self, key: &'a [u8]) -> &'a [u8] {
assert!(*self.lo <= *key);
if !self.hi.is_empty() {
assert!(*self.hi > *key);
}
&key[self.prefix_len as usize..]
}
fn prefix(&self) -> &[u8] {
&self.lo[..self.prefix_len as usize]
}
pub(crate) fn apply(&mut self, frag: &Frag) {
use self::Frag::*;
assert!(
!self.merging,
"somehow a frag was applied to a node after it was merged"
);
match *frag {
Set(ref k, ref v) => {
self.set_leaf(k.clone(), v.clone());
}
Del(ref k) => {
self.del_leaf(k);
}
Base(_) => panic!("trying to apply a Base to frag {:?}", self),
ParentMergeIntention(pid) => {
assert!(
self.merging_child.is_none(),
"trying to merge {:?} into node {:?} which \
is already merging another child",
frag,
self
);
self.merging_child = Some(pid);
}
ParentMergeConfirm => {
assert!(self.merging_child.is_some());
let merged_child = self.merging_child.take().expect(
"we should have a specific \
child that was merged if this \
frag appears here",
);
self.data.parent_merge_confirm(merged_child);
}
ChildMergeCap => {
self.merging = true;
}
}
}
pub(crate) fn set_leaf(&mut self, key: IVec, val: IVec) {
if let Data::Leaf(ref mut records) = self.data {
let search = records.binary_search_by_key(&&key, |(k, _)| k);
match search {
Ok(idx) => records[idx] = (key, val),
Err(idx) => records.insert(idx, (key, val)),
}
} else {
panic!("tried to Set a value to an index");
}
}
pub(crate) fn del_leaf(&mut self, key: &IVec) {
if let Data::Leaf(ref mut records) = self.data {
let search = records.binary_search_by_key(&key, |(k, _)| k);
if let Ok(idx) = search {
records.remove(idx);
}
} else {
panic!("tried to attach a Del to an Index chain");
}
}
pub(crate) fn parent_split(&mut self, at: &[u8], to: PageId) -> bool {
if let Data::Index(ref mut pointers) = self.data {
let encoded_sep = &at[self.prefix_len as usize..];
match pointers.binary_search_by_key(&encoded_sep, |(k, _)| k) {
Ok(_) => {
debug!(
"parent_split skipped because \
parent already contains child at split point \
due to deep race"
);
return false;
}
Err(idx) => pointers.insert(idx, (IVec::from(encoded_sep), to)),
}
} else {
panic!("tried to attach a ParentSplit to a Leaf chain");
}
true
}
pub(crate) fn split(mut self) -> (Node, Node) {
fn split_inner<T>(
xs: &mut Vec<(IVec, T)>,
left_prefix: &[u8],
right_max: &[u8],
suffix_truncation: bool,
) -> (IVec, u8, Vec<(IVec, T)>)
where
T: Clone + Ord,
{
let right = xs.split_off(xs.len() / 2 + 1);
let right_min = &right[0].0;
let left_max = &xs.last().unwrap().0;
// When splitting, the prefix can only grow or stay the
// same size, because all keys already shared the same
// prefix before. Here we figure out if we can shave
// off additional bytes in the key.
let max_additional =
u8::max_value() - u8::try_from(left_prefix.len()).unwrap();
let right_additional_prefix_len = right_min
.iter()
.zip(right_max.iter())
.take_while(|(a, b)| a == b)
.take(max_additional as usize)
.count();
let necessary_split_len = if suffix_truncation {
// we can only perform suffix truncation when
// choosing the split points for leaf nodes.
// split points bubble up into indexes, but
// an important invariant is that for indexes
// the first item always matches the lo key,
// otherwise ranges would be permanently
// inaccessible by falling into the gap
// during a split.
let smallest_suffix = right_min
.iter()
.zip(left_max.iter())
.take_while(|(a, b)| a == b)
.count()
+ 1;
// we cannot suffix truncate the split point so
// aggressively that we cause prefix encoding
// to degrade
std::cmp::max(smallest_suffix, right_additional_prefix_len)
} else {
right_min.len()
};
let split_point: IVec =
prefix::decode(left_prefix, &right_min[..necessary_split_len]);
assert!(!split_point.is_empty());
let mut right_data = Vec::with_capacity(right.len());
for (k, v) in right {
let k: IVec = if right_additional_prefix_len > 0 {
// shave off additional prefixed bytes
IVec::from(&k[right_additional_prefix_len..])
} else {
k.clone()
};
right_data.push((k, v.clone()));
}
(
split_point,
u8::try_from(right_additional_prefix_len).unwrap(),
right_data,
)
}
let prefix = &self.lo[..self.prefix_len as usize];
let right_max = &self.hi[self.prefix_len as usize..];
let (split, right_additional_prefix_len, right_data) = match self.data {
Data::Index(ref mut pointers) => {
let (split, prefix_len, right) =
split_inner(pointers, prefix, right_max, false);
(split, prefix_len, Data::Index(right))
}
Data::Leaf(ref mut items) => {
let (split, prefix_len, right) =
split_inner(items, prefix, right_max, true);
(split, prefix_len, Data::Leaf(right))
}
};
let right = Node {
data: right_data,
next: self.next,
lo: split.clone(),
hi: self.hi.clone(),
merging_child: None,
merging: false,
prefix_len: self.prefix_len + right_additional_prefix_len,
};
self.hi = split;
// intentionally make this the end to make
// any issues pop out with setting it
// correctly after the split.
self.next = None;
if self.hi.is_empty() {
assert_eq!(self.prefix_len, 0);
}
assert!(!(self.lo.is_empty() && self.hi.is_empty()));
assert!(!(self.lo.is_empty() && (self.prefix_len > 0)));
assert!(self.lo.len() >= self.prefix_len as usize);
assert!(self.hi.len() >= self.prefix_len as usize);
assert!(!(right.lo.is_empty() && right.hi.is_empty()));
assert!(!(right.lo.is_empty() && (right.prefix_len > 0)));
assert!(right.lo.len() >= right.prefix_len as usize);
assert!(right.hi.len() >= right.prefix_len as usize);
(self, right)
}
pub(crate) fn receive_merge(&self, right: &Node) -> Node {
fn receive_merge_inner<T>(
old_prefix: &[u8],
new_prefix_len: usize,
left_data: &mut Vec<(IVec, T)>,
right_data: &[(IVec, T)],
) where
T: Clone,
{
// When merging, the prefix can only shrink or
// stay the same length. Here we figure out if
// we need to add previous prefixed bytes.
for (k, v) in right_data {
let k = if new_prefix_len == old_prefix.len() {
k.clone()
} else {
prefix::reencode(old_prefix, k, new_prefix_len)
};
left_data.push((k, v.clone()));
}
}
let mut merged = self.clone();
let new_prefix_len = right
.hi
.iter()
.zip(self.lo.iter())
.take_while(|(a, b)| a == b)
.take(u8::max_value() as usize)
.count();
match (&mut merged.data, &right.data) {
(
Data::Index(ref mut left_pointers),
Data::Index(ref right_pointers),
) => {
receive_merge_inner(
right.prefix(),
new_prefix_len,
left_pointers,
right_pointers.as_ref(),
);
}
(Data::Leaf(ref mut left_items), Data::Leaf(ref right_items)) => {
receive_merge_inner(
right.prefix(),
new_prefix_len,
left_items,
right_items.as_ref(),
);
}
_ => panic!("Can't merge incompatible Data!"),
}
merged.hi = right.hi.clone();
merged.next = right.next;
merged
}
pub(crate) fn contains_upper_bound(&self, bound: &Bound<IVec>) -> bool {
match bound {
Bound::Excluded(bound) if self.hi >= *bound => true,
Bound::Included(bound) if self.hi > *bound => true,
_ => self.hi.is_empty(),
}
}
pub(crate) fn contains_lower_bound(
&self,
bound: &Bound<IVec>,
is_forward: bool,
) -> bool {
match bound {
Bound::Excluded(bound)
if self.lo < *bound || (is_forward && *bound == self.lo) =>
{
true
}
Bound::Included(bound) if self.lo <= *bound => true,
Bound::Unbounded if !is_forward => self.hi.is_empty(),
_ => self.lo.is_empty(),
}
}
pub(crate) fn successor(
&self,
bound: &Bound<IVec>,
) -> Option<(IVec, IVec)> {
assert!(!self.data.is_index());
// This encoding happens this way because
// keys cannot be lower than the node's lo key.
let predecessor_key = match bound {
Bound::Unbounded => self.prefix_encode(&self.lo),
Bound::Included(b) | Bound::Excluded(b) => {
let max = std::cmp::max(b, &self.lo);
self.prefix_encode(max)
}
};
let records = self.data.leaf_ref().unwrap();
let search = records.binary_search_by_key(&predecessor_key, |(k, _)| k);
let idx = match search {
Ok(idx) => idx,
Err(idx) if idx < records.len() => idx,
_ => return None,
};
for (k, v) in &records[idx..] {
match bound {
Bound::Excluded(b) if b[self.prefix_len as usize..] == **k => {
// keep going because we wanted to exclude
// this key.
continue;
}
_ => {}
}
let decoded_key = self.prefix_decode(&k);
return Some((decoded_key, v.clone()));
}
None
}
pub(crate) fn predecessor(
&self,
bound: &Bound<IVec>,
) -> Option<(IVec, IVec)> {
static MAX_IVEC: Lazy<IVec, fn() -> IVec> = Lazy::new(init_max_ivec);
fn init_max_ivec() -> IVec {
let base = vec![255; 1024 * 1024];
IVec::from(base)
}
assert!(!self.data.is_index());
// This encoding happens this way because
// the rightmost (unbounded) node has
// a hi key represented by the empty slice
let successor_key = match bound {
Bound::Unbounded => {
if self.hi.is_empty() {
MAX_IVEC.clone()
} else {
IVec::from(self.prefix_encode(&self.hi))
}
}
Bound::Included(b) => {
let min = if self.hi.is_empty() {
b
} else {
std::cmp::min(b, &self.hi)
};
IVec::from(self.prefix_encode(min))
}
Bound::Excluded(b) => {
let min = if self.hi.is_empty() {
b
} else {
std::cmp::min(b, &self.hi)
};
let encoded = &min[self.prefix_len as usize..];
IVec::from(encoded)
}
};
let records = self.data.leaf_ref().unwrap();
let search = records.binary_search_by_key(&&successor_key, |(k, _)| k);
let idx = match search {
Ok(idx) => idx,
Err(idx) if idx > 0 => idx - 1,
_ => return None,
};
for (k, v) in records[0..=idx].iter().rev() {
match bound {
Bound::Excluded(b)
if b.len() >= self.prefix_len as usize
&& b[self.prefix_len as usize..] == **k =>
{
// keep going because we wanted to exclude
// this key.
continue;
}
_ => {}
}
let decoded_key = self.prefix_decode(&k);
return Some((decoded_key, v.clone()));
}
None
}
/// leaf_pair_for_key finds an existing value pair for a given key.
pub(crate) fn leaf_pair_for_key(
&self,
key: &[u8],
) -> Option<(&IVec, &IVec)> {
let records = self
.data
.leaf_ref()
.expect("leaf_pair_for_key called on index node");
let suffix = &key[self.prefix_len as usize..];
let search = records.binary_search_by_key(&suffix, |(k, _)| k).ok();
search.map(|idx| (&records[idx].0, &records[idx].1))
}
/// `node_kv_pair` returns either existing (node/key, value) pair or
/// (node/key, none) where a node/key is node level encoded key.
pub fn node_kv_pair(&self, key: &[u8]) -> (IVec, Option<IVec>) {
if let Some((k, v)) = self.leaf_pair_for_key(key.as_ref()) {
(k.clone(), Some(v.clone()))
} else {
let encoded_key = IVec::from(&key[self.prefix_len as usize..]);
let encoded_val = None;
(encoded_key, encoded_val)
}
}
pub(crate) fn should_split(&self) -> bool {
let threshold = if cfg!(any(test, feature = "lock_free_delays")) {
2
} else if self.data.is_index() {
256
} else {
16
};
let size_checks = self.data.len() > threshold;
let safety_checks = self.merging_child.is_none() && !self.merging;
size_checks && safety_checks
}
pub(crate) fn should_merge(&self) -> bool {
let threshold = if cfg!(any(test, feature = "lock_free_delays")) {
1
} else if self.data.is_index() {
64
} else {
4
};
let size_checks = self.data.len() < threshold;
let safety_checks = self.merging_child.is_none() && !self.merging;
size_checks && safety_checks
}
pub(crate) fn can_merge_child(&self) -> bool {
self.merging_child.is_none() && !self.merging
}
pub(crate) fn index_next_node(&self, key: &[u8]) -> (usize, PageId) {
let records =
self.data.index_ref().expect("index_next_node called on leaf");
let suffix = &key[self.prefix_len as usize..];
let search = binary_search_lub(suffix, records);
let index = search.expect("failed to traverse index");
(index, records[index].1)
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Data {
Index(Vec<(IVec, PageId)>),
Leaf(Vec<(IVec, IVec)>),
}
impl Default for Data {
fn default() -> Data {
Data::Leaf(vec![])
}
}
impl Data {
pub(crate) fn len(&self) -> usize {
match *self {
Data::Index(ref pointers) => pointers.len(),
Data::Leaf(ref items) => items.len(),
}
}
pub(crate) fn parent_merge_confirm(&mut self, merged_child_pid: PageId) {
match self {
Data::Index(ref mut pointers) => {
let idx = pointers
.iter()
.position(|(_k, c)| *c == merged_child_pid)
.unwrap();
let _ = pointers.remove(idx);
}
_ => panic!("parent_merge_confirm called on leaf data"),
}
}
pub(crate) fn leaf_ref(&self) -> Option<&Vec<(IVec, IVec)>> {
match *self {
Data::Index(_) => None,
Data::Leaf(ref items) => Some(items),
}
}
pub(crate) fn index_ref(&self) -> Option<&Vec<(IVec, PageId)>> {
match *self {
Data::Index(ref pointers) => Some(pointers),
Data::Leaf(_) => None,
}
}
pub(crate) fn is_index(&self) -> bool {
if let Data::Index(..) = self {
true
} else {
false
}
}
pub(crate) fn is_leaf(&self) -> bool {
if let Data::Leaf(..) = self {
true
} else {
false
}
}
}
|
//!
//! Desync pipes provide a way to generate and process streams via a `Desync` object
//!
//! Pipes are an excellent way to interface `Desync` objects and the futures library. Piping
//! a stream into a `Desync` object is equivalent to spawning it with an executor, except
//! without the need to dedicate a thread to running it.
//!
//! There are two kinds of pipe. The `pipe_in` function creates a pipe that processes each
//! value made available from a stream on a desync object as they arrive, producing no
//! results. This is useful for cases where a `Desync` object is being used as the endpoint
//! for some data processing (for example, to insert the results of an operation into an
//! asynchronous database object).
//!
//! The `pipe` function pipes data through an object. For every input value, it produces
//! an output value. This is good for creating streams that perform some kind of asynchronous
//! processing operation or that need to access data from inside a `Desync` object.
//!
//! Here's an example of using `pipe_in` to store data in a `HashSet`:
//!
//! ```
//! # extern crate futures;
//! # extern crate desync;
//! # use std::collections::HashSet;
//! # use std::sync::*;
//! #
//! use futures::sync::mpsc;
//! use futures::executor;
//! use desync::*;
//!
//! let desync_hashset = Arc::new(Desync::new(HashSet::new()));
//! let (sender, receiver) = mpsc::channel(5);
//!
//! pipe_in(Arc::clone(&desync_hashset), receiver, |hashset, value| { value.map(|value| hashset.insert(value)); });
//!
//! let mut sender = executor::spawn(sender);
//! sender.wait_send("Test".to_string());
//! sender.wait_send("Another value".to_string());
//! #
//! # assert!(desync_hashset.sync(|hashset| hashset.contains(&("Test".to_string()))))
//! ```
//!
use super::desync::*;
use futures::*;
use futures::executor;
use futures::executor::Spawn;
use std::sync::*;
use std::result::Result;
use std::collections::VecDeque;
lazy_static! {
/// The shared queue where we monitor for updates to the active pipe streams
static ref PIPE_MONITOR: PipeMonitor = PipeMonitor::new();
}
/// The maximum number of items to queue on a pipe stream before we stop accepting new input
const PIPE_BACKPRESSURE_COUNT: usize = 5;
///
/// Pipes a stream into a desync object. Whenever an item becomes available on the stream, the
/// processing function is called asynchronously with the item that was received.
///
/// This takes a weak reference to the passed in `Desync` object, so the pipe will stop if it's
/// the only thing referencing this object.
///
/// Piping a stream to a `Desync` like this will cause it to start executing: ie, this is
/// similar to calling `executor::spawn(stream)`, except that the stream will immediately
/// start draining into the `Desync` object.
///
pub fn pipe_in<Core, S, ProcessFn>(desync: Arc<Desync<Core>>, stream: S, process: ProcessFn)
where Core: 'static+Send,
S: 'static+Send+Stream,
S::Item: Send,
S::Error: Send,
ProcessFn: 'static+Send+FnMut(&mut Core, Result<S::Item, S::Error>) -> () {
// Need a mutable version of the stream
let mut stream = stream;
// We stop processing once the desync object is no longer used anywhere else
let desync = Arc::downgrade(&desync);
// Wrap the process fn up so we can call it asynchronously
// (it doesn't really need to be in a mutex as it's only called by our object but we need to make it pass Rust's checks and we don't have a way to specify this at the moment)
let process = Arc::new(Mutex::new(process));
// Monitor the stream
PIPE_MONITOR.monitor(move || {
loop {
let desync = desync.upgrade();
if let Some(desync) = desync {
// Read the current status of the stream
let process = Arc::clone(&process);
let next = stream.poll();
match next {
// Just wait if the stream is not ready
Ok(Async::NotReady) => { return Ok(Async::NotReady); },
// Stop processing when the stream is finished
Ok(Async::Ready(None)) => { return Ok(Async::Ready(())); }
// Stream returned a value
Ok(Async::Ready(Some(next))) => {
// Process the value on the stream
desync.async(move |core| {
let mut process = process.lock().unwrap();
let process = &mut *process;
process(core, Ok(next));
});
},
// Stream returned an error
Err(e) => {
// Process the error on the stream
desync.async(move |core| {
let mut process = process.lock().unwrap();
let process = &mut *process;
process(core, Err(e));
});
},
}
} else {
// The desync target is no longer available - indicate that we've completed monitoring
return Ok(Async::Ready(()));
}
}
});
}
///
/// Pipes a stream into this object. Whenever an item becomes available on the stream, the
/// processing function is called asynchronously with the item that was received. The
/// return value is placed onto the output stream.
///
/// Unlike `pipe_in`, this keeps a strong reference to the `Desync` object so the processing
/// will continue so long as the input stream has data and the output stream is not dropped.
///
/// The input stream will start executing and reading values immediately when this is called.
/// Dropping the output stream will cause the pipe to be closed (the input stream will be
/// dropped and no further processing will occur).
///
/// This example demonstrates how to create a simple demonstration pipe that takes hashset values
/// and returns a stream indicating whether or not they were already included:
///
/// ```
/// # extern crate futures;
/// # extern crate desync;
/// # use std::collections::HashSet;
/// # use std::sync::*;
/// #
/// use futures::sync::mpsc;
/// use futures::executor;
/// use desync::*;
///
/// let desync_hashset = Arc::new(Desync::new(HashSet::new()));
/// let (sender, receiver) = mpsc::channel::<String>(5);
///
/// let value_inserted = pipe(Arc::clone(&desync_hashset), receiver,
/// |hashset, value| { value.map(|value| (value.clone(), hashset.insert(value))) });
///
/// let mut sender = executor::spawn(sender);
/// sender.wait_send("Test".to_string());
/// sender.wait_send("Another value".to_string());
/// sender.wait_send("Test".to_string());
///
/// let mut value_inserted = executor::spawn(value_inserted);
/// assert!(value_inserted.wait_stream() == Some(Ok(("Test".to_string(), true))));
/// assert!(value_inserted.wait_stream() == Some(Ok(("Another value".to_string(), true))));
/// assert!(value_inserted.wait_stream() == Some(Ok(("Test".to_string(), false))));
/// ```
///
pub fn pipe<Core, S, Output, OutputErr, ProcessFn>(desync: Arc<Desync<Core>>, stream: S, process: ProcessFn) -> PipeStream<Output, OutputErr>
where Core: 'static+Send,
S: 'static+Send+Stream,
S::Item: Send,
S::Error: Send,
Output: 'static+Send,
OutputErr: 'static+Send,
ProcessFn: 'static+Send+FnMut(&mut Core, Result<S::Item, S::Error>) -> Result<Output, OutputErr> {
// Fetch the input stream and prepare the process function for async calling
let mut input_stream = stream;
let process = Arc::new(Mutex::new(process));
// Create the output stream
let output_stream = PipeStream::new();
let stream_core = Arc::clone(&output_stream.core);
let stream_core = Arc::downgrade(&stream_core);
// Monitor the input stream and pass data to the output stream
PIPE_MONITOR.monitor(move || {
loop {
let stream_core = stream_core.upgrade();
if let Some(stream_core) = stream_core {
// Defer processing if the stream core is full
{
// Fetch the core
let mut stream_core = stream_core.lock().unwrap();
// If the pending queue is full, then stop processing events
if stream_core.pending.len() >= stream_core.max_pipe_depth {
// Wake when the stream accepts some input
stream_core.backpressure_release_notify = Some(task::current());
// Go back to sleep without reading from the stream
return Ok(Async::NotReady);
}
}
// Read the current status of the stream
let process = Arc::clone(&process);
let next = input_stream.poll();
let mut next_item;
// Work out what the next item to pass to the process function should be
match next {
// Just wait if the stream is not ready
Ok(Async::NotReady) => { return Ok(Async::NotReady); },
// Stop processing when the input stream is finished
Ok(Async::Ready(None)) => {
desync.async(move |_core| {
// Mark the target stream as closed
let notify = {
let mut stream_core = stream_core.lock().unwrap();
stream_core.closed = true;
stream_core.notify.take()
};
notify.map(|notify| notify.notify());
});
// Pipe has finished
return Ok(Async::Ready(()));
}
// Stream returned a value
Ok(Async::Ready(Some(next))) => next_item = Ok(next),
// Stream returned an error
Err(e) => next_item = Err(e),
}
// Send the next item to be processed
desync.async(move |core| {
// Process the next item
let mut process = process.lock().unwrap();
let process = &mut *process;
let next_item = process(core, next_item);
// Send to the pipe stream
let notify = {
let mut stream_core = stream_core.lock().unwrap();
stream_core.pending.push_back(next_item);
stream_core.notify.take()
};
notify.map(|notify| notify.notify());
});
} else {
// We stop processing once nothing is reading from the target stream
return Ok(Async::Ready(()));
}
}
});
// The pipe stream is the result
output_stream
}
///
/// The shared data for a pipe stream
///
struct PipeStreamCore<Item, Error> {
/// The maximum number of items we allow to be queued in this stream before producing backpressure
max_pipe_depth: usize,
/// The pending data for this stream
pending: VecDeque<Result<Item, Error>>,
/// True if the input stream has closed (the stream is closed once this is true and there are no more pending items)
closed: bool,
/// The task to notify when the stream changes
notify: Option<task::Task>,
/// The task to notify when we reduce the amount of pending data
backpressure_release_notify: Option<task::Task>
}
///
/// A stream generated by a pipe
///
pub struct PipeStream<Item, Error> {
core: Arc<Mutex<PipeStreamCore<Item, Error>>>
}
impl<Item, Error> PipeStream<Item, Error> {
///
/// Creates a new, empty, pipestream
///
fn new() -> PipeStream<Item, Error> {
PipeStream {
core: Arc::new(Mutex::new(PipeStreamCore {
max_pipe_depth: PIPE_BACKPRESSURE_COUNT,
pending: VecDeque::new(),
closed: false,
notify: None,
backpressure_release_notify: None
}))
}
}
///
/// Sets the number of items that this pipe stream will buffer before producing backpressure
///
/// If this call is not made, this will be set to 5.
///
pub fn set_backpressure_depth(&mut self, max_depth: usize) {
self.core.lock().unwrap().max_pipe_depth = max_depth;
}
}
impl<Item, Error> Drop for PipeStream<Item, Error> {
fn drop(&mut self) {
let mut core = self.core.lock().unwrap();
// Flush the pending queue
core.pending = VecDeque::new();
// TODO: wake the monitor and stop listening to the source stream
// (Right now this will happen next time the source stream produces data)
}
}
impl<Item, Error> Stream for PipeStream<Item, Error> {
type Item = Item;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Item>, Error> {
// Fetch the core
let mut core = self.core.lock().unwrap();
if let Some(item) = core.pending.pop_front() {
// Value waiting at the start of the stream
core.backpressure_release_notify.take().map(|notify| notify.notify());
match item {
Ok(item) => Ok(Async::Ready(Some(item))),
Err(erm) => Err(erm)
}
} else if core.closed {
// No more data will be returned from this stream
Ok(Async::Ready(None))
} else {
// Stream not ready
core.notify = Some(task::current());
Ok(Async::NotReady)
}
}
}
///
/// The main polling component for that implements the stream pipes
///
struct PipeMonitor {
}
///
/// Provides the 'Notify' interface for a polling function with a particular ID
///
struct PipeNotify<PollFn: Send> {
next_poll: Arc<Desync<Option<Spawn<PollFn>>>>
}
impl PipeMonitor {
///
/// Creates a new poll thread
///
pub fn new() -> PipeMonitor {
PipeMonitor {
}
}
///
/// Performs a polling operation on a poll
///
fn poll<PollFn>(this_poll: &mut Option<Spawn<PollFn>>, next_poll: Arc<Desync<Option<Spawn<PollFn>>>>)
where PollFn: 'static+Send+Future<Item=(), Error=()> {
// If the polling function exists...
if let Some(mut poll) = this_poll.take() {
// Create a notification
let notify = PipeNotify {
next_poll: next_poll
};
let notify = Arc::new(notify);
// Poll the function
let poll_result = poll.poll_future_notify(¬ify, 0);
// Keep the polling function alive if it has not finished yet
if poll_result != Ok(Async::Ready(())) {
// The take() call means that the polling won't continue unless we pass it forward like this
*this_poll = Some(poll);
}
}
}
///
/// Adds a polling function to the current thread. It will be called using the futures
/// notification system (ie, can call things like the stream poll function)
///
pub fn monitor<PollFn>(&self, poll_fn: PollFn)
where PollFn: 'static+Send+FnMut() -> Poll<(), ()> {
// Turn the polling function into a future (it will complete when monitoring is complete)
let poll_fn = future::poll_fn(poll_fn);
// Spawn it with an executor
let poll_fn = executor::spawn(poll_fn);
// Create a desync object for polling
let poll_fn = Arc::new(Desync::new(Some(poll_fn)));
let next_poll = Arc::clone(&poll_fn);
// Perform the initial polling
poll_fn.sync(move |poll_fn| Self::poll(poll_fn, next_poll));
}
}
impl<PollFn> executor::Notify for PipeNotify<PollFn>
where PollFn: 'static+Send+Future<Item=(), Error=()> {
fn notify(&self, _id: usize) {
// Poll the future whenever we're notified
let next_poll = Arc::clone(&self.next_poll);
self.next_poll.sync(move |poll_fn| PipeMonitor::poll(poll_fn, next_poll));
}
}
Don't immediately poll for a new value from the stream until the old one has finished processing
This creates backpressure when the function is running, which is a thing we want
//!
//! Desync pipes provide a way to generate and process streams via a `Desync` object
//!
//! Pipes are an excellent way to interface `Desync` objects and the futures library. Piping
//! a stream into a `Desync` object is equivalent to spawning it with an executor, except
//! without the need to dedicate a thread to running it.
//!
//! There are two kinds of pipe. The `pipe_in` function creates a pipe that processes each
//! value made available from a stream on a desync object as they arrive, producing no
//! results. This is useful for cases where a `Desync` object is being used as the endpoint
//! for some data processing (for example, to insert the results of an operation into an
//! asynchronous database object).
//!
//! The `pipe` function pipes data through an object. For every input value, it produces
//! an output value. This is good for creating streams that perform some kind of asynchronous
//! processing operation or that need to access data from inside a `Desync` object.
//!
//! Here's an example of using `pipe_in` to store data in a `HashSet`:
//!
//! ```
//! # extern crate futures;
//! # extern crate desync;
//! # use std::collections::HashSet;
//! # use std::sync::*;
//! #
//! use futures::sync::mpsc;
//! use futures::executor;
//! use desync::*;
//!
//! let desync_hashset = Arc::new(Desync::new(HashSet::new()));
//! let (sender, receiver) = mpsc::channel(5);
//!
//! pipe_in(Arc::clone(&desync_hashset), receiver, |hashset, value| { value.map(|value| hashset.insert(value)); });
//!
//! let mut sender = executor::spawn(sender);
//! sender.wait_send("Test".to_string());
//! sender.wait_send("Another value".to_string());
//! #
//! # assert!(desync_hashset.sync(|hashset| hashset.contains(&("Test".to_string()))))
//! ```
//!
use super::desync::*;
use futures::*;
use futures::executor;
use futures::executor::Spawn;
use std::sync::*;
use std::result::Result;
use std::collections::VecDeque;
lazy_static! {
/// The shared queue where we monitor for updates to the active pipe streams
static ref PIPE_MONITOR: PipeMonitor = PipeMonitor::new();
}
/// The maximum number of items to queue on a pipe stream before we stop accepting new input
const PIPE_BACKPRESSURE_COUNT: usize = 5;
///
/// Pipes a stream into a desync object. Whenever an item becomes available on the stream, the
/// processing function is called asynchronously with the item that was received.
///
/// This takes a weak reference to the passed in `Desync` object, so the pipe will stop if it's
/// the only thing referencing this object.
///
/// Piping a stream to a `Desync` like this will cause it to start executing: ie, this is
/// similar to calling `executor::spawn(stream)`, except that the stream will immediately
/// start draining into the `Desync` object.
///
pub fn pipe_in<Core, S, ProcessFn>(desync: Arc<Desync<Core>>, stream: S, process: ProcessFn)
where Core: 'static+Send,
S: 'static+Send+Stream,
S::Item: Send,
S::Error: Send,
ProcessFn: 'static+Send+FnMut(&mut Core, Result<S::Item, S::Error>) -> () {
// Need a mutable version of the stream
let mut stream = stream;
// We stop processing once the desync object is no longer used anywhere else
let desync = Arc::downgrade(&desync);
// Wrap the process fn up so we can call it asynchronously
// (it doesn't really need to be in a mutex as it's only called by our object but we need to make it pass Rust's checks and we don't have a way to specify this at the moment)
let process = Arc::new(Mutex::new(process));
// Monitor the stream
PIPE_MONITOR.monitor(move || {
loop {
let desync = desync.upgrade();
if let Some(desync) = desync {
// Read the current status of the stream
let process = Arc::clone(&process);
let next = stream.poll();
match next {
// Just wait if the stream is not ready
Ok(Async::NotReady) => { return Ok(Async::NotReady); },
// Stop processing when the stream is finished
Ok(Async::Ready(None)) => { return Ok(Async::Ready(())); }
// Stream returned a value
Ok(Async::Ready(Some(next))) => {
let when_ready = task::current();
// Process the value on the stream
desync.async(move |core| {
let mut process = process.lock().unwrap();
let process = &mut *process;
process(core, Ok(next));
when_ready.notify();
});
// Wake again when the processing finishes
return Ok(Async::NotReady);
},
// Stream returned an error
Err(e) => {
// Process the error on the stream
desync.async(move |core| {
let mut process = process.lock().unwrap();
let process = &mut *process;
process(core, Err(e));
});
},
}
} else {
// The desync target is no longer available - indicate that we've completed monitoring
return Ok(Async::Ready(()));
}
}
});
}
///
/// Pipes a stream into this object. Whenever an item becomes available on the stream, the
/// processing function is called asynchronously with the item that was received. The
/// return value is placed onto the output stream.
///
/// Unlike `pipe_in`, this keeps a strong reference to the `Desync` object so the processing
/// will continue so long as the input stream has data and the output stream is not dropped.
///
/// The input stream will start executing and reading values immediately when this is called.
/// Dropping the output stream will cause the pipe to be closed (the input stream will be
/// dropped and no further processing will occur).
///
/// This example demonstrates how to create a simple demonstration pipe that takes hashset values
/// and returns a stream indicating whether or not they were already included:
///
/// ```
/// # extern crate futures;
/// # extern crate desync;
/// # use std::collections::HashSet;
/// # use std::sync::*;
/// #
/// use futures::sync::mpsc;
/// use futures::executor;
/// use desync::*;
///
/// let desync_hashset = Arc::new(Desync::new(HashSet::new()));
/// let (sender, receiver) = mpsc::channel::<String>(5);
///
/// let value_inserted = pipe(Arc::clone(&desync_hashset), receiver,
/// |hashset, value| { value.map(|value| (value.clone(), hashset.insert(value))) });
///
/// let mut sender = executor::spawn(sender);
/// sender.wait_send("Test".to_string());
/// sender.wait_send("Another value".to_string());
/// sender.wait_send("Test".to_string());
///
/// let mut value_inserted = executor::spawn(value_inserted);
/// assert!(value_inserted.wait_stream() == Some(Ok(("Test".to_string(), true))));
/// assert!(value_inserted.wait_stream() == Some(Ok(("Another value".to_string(), true))));
/// assert!(value_inserted.wait_stream() == Some(Ok(("Test".to_string(), false))));
/// ```
///
pub fn pipe<Core, S, Output, OutputErr, ProcessFn>(desync: Arc<Desync<Core>>, stream: S, process: ProcessFn) -> PipeStream<Output, OutputErr>
where Core: 'static+Send,
S: 'static+Send+Stream,
S::Item: Send,
S::Error: Send,
Output: 'static+Send,
OutputErr: 'static+Send,
ProcessFn: 'static+Send+FnMut(&mut Core, Result<S::Item, S::Error>) -> Result<Output, OutputErr> {
// Fetch the input stream and prepare the process function for async calling
let mut input_stream = stream;
let process = Arc::new(Mutex::new(process));
// Create the output stream
let output_stream = PipeStream::new();
let stream_core = Arc::clone(&output_stream.core);
let stream_core = Arc::downgrade(&stream_core);
// Monitor the input stream and pass data to the output stream
PIPE_MONITOR.monitor(move || {
loop {
let stream_core = stream_core.upgrade();
if let Some(stream_core) = stream_core {
// Defer processing if the stream core is full
{
// Fetch the core
let mut stream_core = stream_core.lock().unwrap();
// If the pending queue is full, then stop processing events
if stream_core.pending.len() >= stream_core.max_pipe_depth {
// Wake when the stream accepts some input
stream_core.backpressure_release_notify = Some(task::current());
// Go back to sleep without reading from the stream
return Ok(Async::NotReady);
}
}
// Read the current status of the stream
let process = Arc::clone(&process);
let next = input_stream.poll();
let mut next_item;
// Work out what the next item to pass to the process function should be
match next {
// Just wait if the stream is not ready
Ok(Async::NotReady) => { return Ok(Async::NotReady); },
// Stop processing when the input stream is finished
Ok(Async::Ready(None)) => {
desync.async(move |_core| {
// Mark the target stream as closed
let notify = {
let mut stream_core = stream_core.lock().unwrap();
stream_core.closed = true;
stream_core.notify.take()
};
notify.map(|notify| notify.notify());
});
// Pipe has finished
return Ok(Async::Ready(()));
}
// Stream returned a value
Ok(Async::Ready(Some(next))) => next_item = Ok(next),
// Stream returned an error
Err(e) => next_item = Err(e),
}
// Send the next item to be processed
let when_finished = task::current();
desync.async(move |core| {
// Process the next item
let mut process = process.lock().unwrap();
let process = &mut *process;
let next_item = process(core, next_item);
// Send to the pipe stream
let notify = {
let mut stream_core = stream_core.lock().unwrap();
stream_core.pending.push_back(next_item);
stream_core.notify.take()
};
notify.map(|notify| notify.notify());
when_finished.notify();
});
// Poll again when the task is complete
return Ok(Async::NotReady);
} else {
// We stop processing once nothing is reading from the target stream
return Ok(Async::Ready(()));
}
}
});
// The pipe stream is the result
output_stream
}
///
/// The shared data for a pipe stream
///
struct PipeStreamCore<Item, Error> {
/// The maximum number of items we allow to be queued in this stream before producing backpressure
max_pipe_depth: usize,
/// The pending data for this stream
pending: VecDeque<Result<Item, Error>>,
/// True if the input stream has closed (the stream is closed once this is true and there are no more pending items)
closed: bool,
/// The task to notify when the stream changes
notify: Option<task::Task>,
/// The task to notify when we reduce the amount of pending data
backpressure_release_notify: Option<task::Task>
}
///
/// A stream generated by a pipe
///
pub struct PipeStream<Item, Error> {
core: Arc<Mutex<PipeStreamCore<Item, Error>>>
}
impl<Item, Error> PipeStream<Item, Error> {
///
/// Creates a new, empty, pipestream
///
fn new() -> PipeStream<Item, Error> {
PipeStream {
core: Arc::new(Mutex::new(PipeStreamCore {
max_pipe_depth: PIPE_BACKPRESSURE_COUNT,
pending: VecDeque::new(),
closed: false,
notify: None,
backpressure_release_notify: None
}))
}
}
///
/// Sets the number of items that this pipe stream will buffer before producing backpressure
///
/// If this call is not made, this will be set to 5.
///
pub fn set_backpressure_depth(&mut self, max_depth: usize) {
self.core.lock().unwrap().max_pipe_depth = max_depth;
}
}
impl<Item, Error> Drop for PipeStream<Item, Error> {
fn drop(&mut self) {
let mut core = self.core.lock().unwrap();
// Flush the pending queue
core.pending = VecDeque::new();
// TODO: wake the monitor and stop listening to the source stream
// (Right now this will happen next time the source stream produces data)
}
}
impl<Item, Error> Stream for PipeStream<Item, Error> {
type Item = Item;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Item>, Error> {
// Fetch the core
let mut core = self.core.lock().unwrap();
if let Some(item) = core.pending.pop_front() {
// Value waiting at the start of the stream
core.backpressure_release_notify.take().map(|notify| notify.notify());
match item {
Ok(item) => Ok(Async::Ready(Some(item))),
Err(erm) => Err(erm)
}
} else if core.closed {
// No more data will be returned from this stream
Ok(Async::Ready(None))
} else {
// Stream not ready
core.notify = Some(task::current());
Ok(Async::NotReady)
}
}
}
///
/// The main polling component for that implements the stream pipes
///
struct PipeMonitor {
}
///
/// Provides the 'Notify' interface for a polling function with a particular ID
///
struct PipeNotify<PollFn: Send> {
next_poll: Arc<Desync<Option<Spawn<PollFn>>>>
}
impl PipeMonitor {
///
/// Creates a new poll thread
///
pub fn new() -> PipeMonitor {
PipeMonitor {
}
}
///
/// Performs a polling operation on a poll
///
fn poll<PollFn>(this_poll: &mut Option<Spawn<PollFn>>, next_poll: Arc<Desync<Option<Spawn<PollFn>>>>)
where PollFn: 'static+Send+Future<Item=(), Error=()> {
// If the polling function exists...
if let Some(mut poll) = this_poll.take() {
// Create a notification
let notify = PipeNotify {
next_poll: next_poll
};
let notify = Arc::new(notify);
// Poll the function
let poll_result = poll.poll_future_notify(¬ify, 0);
// Keep the polling function alive if it has not finished yet
if poll_result != Ok(Async::Ready(())) {
// The take() call means that the polling won't continue unless we pass it forward like this
*this_poll = Some(poll);
}
}
}
///
/// Adds a polling function to the current thread. It will be called using the futures
/// notification system (ie, can call things like the stream poll function)
///
pub fn monitor<PollFn>(&self, poll_fn: PollFn)
where PollFn: 'static+Send+FnMut() -> Poll<(), ()> {
// Turn the polling function into a future (it will complete when monitoring is complete)
let poll_fn = future::poll_fn(poll_fn);
// Spawn it with an executor
let poll_fn = executor::spawn(poll_fn);
// Create a desync object for polling
let poll_fn = Arc::new(Desync::new(Some(poll_fn)));
let next_poll = Arc::clone(&poll_fn);
// Perform the initial polling
poll_fn.sync(move |poll_fn| Self::poll(poll_fn, next_poll));
}
}
impl<PollFn> executor::Notify for PipeNotify<PollFn>
where PollFn: 'static+Send+Future<Item=(), Error=()> {
fn notify(&self, _id: usize) {
// Poll the future whenever we're notified
let next_poll = Arc::clone(&self.next_poll);
self.next_poll.sync(move |poll_fn| PipeMonitor::poll(poll_fn, next_poll));
}
}
|
use std::cell::RefCell;
use std::collections::HashSet;
use std::convert::TryFrom;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::{atomic::AtomicBool, Arc};
use calloop::{generic::Generic, InsertError, LoopHandle, Source};
use drm::control::{
connector, crtc, plane, property, Device as ControlDevice, Event, Mode, PlaneResourceHandles, PlaneType,
ResourceHandles,
};
use drm::{ClientCapability, Device as BasicDevice, DriverCapability};
use nix::libc::dev_t;
use nix::sys::stat::fstat;
pub(super) mod atomic;
pub(super) mod legacy;
use super::error::Error;
use super::surface::{atomic::AtomicDrmSurface, legacy::LegacyDrmSurface, DrmSurface, DrmSurfaceInternal};
use crate::backend::allocator::{Format, Fourcc, Modifier};
use atomic::AtomicDrmDevice;
use legacy::LegacyDrmDevice;
/// An open drm device
pub struct DrmDevice<A: AsRawFd + 'static> {
pub(super) dev_id: dev_t,
pub(crate) internal: Arc<DrmDeviceInternal<A>>,
handler: Rc<RefCell<Option<Box<dyn DeviceHandler>>>>,
#[cfg(feature = "backend_session")]
pub(super) links: RefCell<Vec<crate::signaling::SignalToken>>,
has_universal_planes: bool,
resources: ResourceHandles,
planes: PlaneResourceHandles,
pub(super) logger: ::slog::Logger,
}
impl<A: AsRawFd + 'static> AsRawFd for DrmDevice<A> {
fn as_raw_fd(&self) -> RawFd {
match &*self.internal {
DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(),
DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(),
}
}
}
impl<A: AsRawFd + 'static> BasicDevice for DrmDevice<A> {}
impl<A: AsRawFd + 'static> ControlDevice for DrmDevice<A> {}
pub struct FdWrapper<A: AsRawFd + 'static> {
fd: A,
pub(super) privileged: bool,
logger: ::slog::Logger,
}
impl<A: AsRawFd + 'static> AsRawFd for FdWrapper<A> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<A: AsRawFd + 'static> BasicDevice for FdWrapper<A> {}
impl<A: AsRawFd + 'static> ControlDevice for FdWrapper<A> {}
impl<A: AsRawFd + 'static> Drop for FdWrapper<A> {
fn drop(&mut self) {
info!(self.logger, "Dropping device: {:?}", self.dev_path());
if self.privileged {
if let Err(err) = self.release_master_lock() {
error!(self.logger, "Failed to drop drm master state. Error: {}", err);
}
}
}
}
pub enum DrmDeviceInternal<A: AsRawFd + 'static> {
Atomic(AtomicDrmDevice<A>),
Legacy(LegacyDrmDevice<A>),
}
impl<A: AsRawFd + 'static> AsRawFd for DrmDeviceInternal<A> {
fn as_raw_fd(&self) -> RawFd {
match self {
DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(),
DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(),
}
}
}
impl<A: AsRawFd + 'static> BasicDevice for DrmDeviceInternal<A> {}
impl<A: AsRawFd + 'static> ControlDevice for DrmDeviceInternal<A> {}
impl<A: AsRawFd + 'static> DrmDevice<A> {
/// Create a new [`DrmDevice`] from an open drm node
///
/// # Arguments
///
/// - `fd` - Open drm node
/// - `disable_connectors` - Setting this to true will initialize all connectors \
/// as disabled on device creation. smithay enables connectors, when attached \
/// to a surface, and disables them, when detached. Setting this to `false` \
/// requires usage of `drm-rs` to disable unused connectors to prevent them \
/// showing garbage, but will also prevent flickering of already turned on \
/// connectors (assuming you won't change the resolution).
/// - `logger` - Optional [`slog::Logger`] to be used by this device.
///
/// # Return
///
/// Returns an error if the file is no valid drm node or the device is not accessible.
pub fn new<L>(fd: A, disable_connectors: bool, logger: L) -> Result<Self, Error>
where
A: AsRawFd + 'static,
L: Into<Option<::slog::Logger>>,
{
let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_drm"));
info!(log, "DrmDevice initializing");
let dev_id = fstat(fd.as_raw_fd()).map_err(Error::UnableToGetDeviceId)?.st_rdev;
let active = Arc::new(AtomicBool::new(true));
let dev = Arc::new({
let mut dev = FdWrapper {
fd,
privileged: false,
logger: log.clone(),
};
// We want to modeset, so we better be the master, if we run via a tty session.
// This is only needed on older kernels. Newer kernels grant this permission,
// if no other process is already the *master*. So we skip over this error.
if dev.acquire_master_lock().is_err() {
warn!(log, "Unable to become drm master, assuming unprivileged mode");
} else {
dev.privileged = true;
}
dev
});
let has_universal_planes = dev
.set_client_capability(ClientCapability::UniversalPlanes, true)
.is_ok();
let resources = dev.resource_handles().map_err(|source| Error::Access {
errmsg: "Error loading resource handles",
dev: dev.dev_path(),
source,
})?;
let planes = dev.plane_handles().map_err(|source| Error::Access {
errmsg: "Error loading plane handles",
dev: dev.dev_path(),
source,
})?;
let internal = Arc::new(DrmDevice::create_internal(
dev,
active,
disable_connectors,
log.clone(),
)?);
Ok(DrmDevice {
dev_id,
internal,
handler: Rc::new(RefCell::new(None)),
#[cfg(feature = "backend_session")]
links: RefCell::new(Vec::new()),
has_universal_planes,
resources,
planes,
logger: log,
})
}
fn create_internal(
dev: Arc<FdWrapper<A>>,
active: Arc<AtomicBool>,
disable_connectors: bool,
log: ::slog::Logger,
) -> Result<DrmDeviceInternal<A>, Error> {
let force_legacy = std::env::var("SMITHAY_USE_LEGACY")
.map(|x| {
x == "1" || x.to_lowercase() == "true" || x.to_lowercase() == "yes" || x.to_lowercase() == "y"
})
.unwrap_or(false);
if force_legacy {
info!(log, "SMITHAY_USE_LEGACY is set. Forcing LegacyDrmDevice.");
};
Ok(
if dev.set_client_capability(ClientCapability::Atomic, true).is_ok() && !force_legacy {
DrmDeviceInternal::Atomic(AtomicDrmDevice::new(dev, active, disable_connectors, log)?)
} else {
info!(log, "Falling back to LegacyDrmDevice");
DrmDeviceInternal::Legacy(LegacyDrmDevice::new(dev, active, disable_connectors, log)?)
},
)
}
/// Processes any open events of the underlying file descriptor.
///
/// You should not call this function manually, but rather use
/// [`device_bind`] to register the device
/// to an [`EventLoop`](calloop::EventLoop)
/// and call this function when the device becomes readable
/// to synchronize your rendering to the vblank events of the open crtc's
pub fn process_events(&mut self) {
match self.receive_events() {
Ok(events) => {
for event in events {
if let Event::PageFlip(event) = event {
trace!(self.logger, "Got a page-flip event for crtc ({:?})", event.crtc);
if let Some(handler) = self.handler.borrow_mut().as_mut() {
handler.vblank(event.crtc);
}
} else {
trace!(
self.logger,
"Got a non-page-flip event of device '{:?}'.",
self.dev_path()
);
}
}
}
Err(source) => {
if let Some(handler) = self.handler.borrow_mut().as_mut() {
handler.error(Error::Access {
errmsg: "Error processing drm events",
dev: self.dev_path(),
source,
});
}
}
}
}
/// Returns if the underlying implementation uses atomic-modesetting or not.
pub fn is_atomic(&self) -> bool {
match *self.internal {
DrmDeviceInternal::Atomic(_) => true,
DrmDeviceInternal::Legacy(_) => false,
}
}
/// Assigns a [`DeviceHandler`] called during event processing.
///
/// See [`device_bind`] and [`DeviceHandler`]
pub fn set_handler(&mut self, handler: impl DeviceHandler + 'static) {
let handler = Some(Box::new(handler) as Box<dyn DeviceHandler + 'static>);
*self.handler.borrow_mut() = handler;
}
/// Clear a set [`DeviceHandler`](trait.DeviceHandler.html), if any
pub fn clear_handler(&mut self) {
self.handler.borrow_mut().take();
}
/// Returns a list of crtcs for this device
pub fn crtcs(&self) -> &[crtc::Handle] {
self.resources.crtcs()
}
/// Returns a set of available planes for a given crtc
pub fn planes(&self, crtc: &crtc::Handle) -> Result<Planes, Error> {
let mut primary = None;
let mut cursor = None;
let mut overlay = Vec::new();
for plane in self.planes.planes() {
let info = self.get_plane(*plane).map_err(|source| Error::Access {
errmsg: "Failed to get plane information",
dev: self.dev_path(),
source,
})?;
let filter = info.possible_crtcs();
if self.resources.filter_crtcs(filter).contains(crtc) {
match self.plane_type(*plane)? {
PlaneType::Primary => {
primary = Some(*plane);
}
PlaneType::Cursor => {
cursor = Some(*plane);
}
PlaneType::Overlay => {
overlay.push(*plane);
}
};
}
}
Ok(Planes {
primary: primary.expect("Crtc has no primary plane"),
cursor,
overlay: if self.has_universal_planes {
Some(overlay)
} else {
None
},
})
}
fn plane_type(&self, plane: plane::Handle) -> Result<PlaneType, Error> {
let props = self.get_properties(plane).map_err(|source| Error::Access {
errmsg: "Failed to get properties of plane",
dev: self.dev_path(),
source,
})?;
let (ids, vals) = props.as_props_and_values();
for (&id, &val) in ids.iter().zip(vals.iter()) {
let info = self.get_property(id).map_err(|source| Error::Access {
errmsg: "Failed to get property info",
dev: self.dev_path(),
source,
})?;
if info.name().to_str().map(|x| x == "type").unwrap_or(false) {
return Ok(match val {
x if x == (PlaneType::Primary as u64) => PlaneType::Primary,
x if x == (PlaneType::Cursor as u64) => PlaneType::Cursor,
_ => PlaneType::Overlay,
});
}
}
unreachable!()
}
/// Creates a new rendering surface.
///
/// # Arguments
///
/// Initialization of surfaces happens through the types provided by
/// [`drm-rs`](drm).
///
/// - [`crtc`](drm::control::crtc)s represent scanout engines of the device pointing to one framebuffer. \
/// Their responsibility is to read the data of the framebuffer and export it into an "Encoder". \
/// The number of crtc's represent the number of independant output devices the hardware may handle.
/// - [`plane`](drm::control::plane)s represent a single plane on a crtc, which is composite together with
/// other planes on the same crtc to present the final image.
/// - [`mode`](drm::control::Mode) describes the resolution and rate of images produced by the crtc and \
/// has to be compatible with the provided `connectors`.
/// - [`connectors`] - List of connectors driven by the crtc. At least one(!) connector needs to be \
/// attached to a crtc in smithay.
pub fn create_surface(
&self,
crtc: crtc::Handle,
plane: plane::Handle,
mode: Mode,
connectors: &[connector::Handle],
) -> Result<DrmSurface<A>, Error> {
if connectors.is_empty() {
return Err(Error::SurfaceWithoutConnectors(crtc));
}
let info = self.get_plane(plane).map_err(|source| Error::Access {
errmsg: "Failed to get plane info",
dev: self.dev_path(),
source,
})?;
let filter = info.possible_crtcs();
if !self.resources.filter_crtcs(filter).contains(&crtc) {
return Err(Error::PlaneNotCompatible(crtc, plane));
}
let active = match &*self.internal {
DrmDeviceInternal::Atomic(dev) => dev.active.clone(),
DrmDeviceInternal::Legacy(dev) => dev.active.clone(),
};
let internal = if self.is_atomic() {
let mapping = match &*self.internal {
DrmDeviceInternal::Atomic(dev) => dev.prop_mapping.clone(),
_ => unreachable!(),
};
DrmSurfaceInternal::Atomic(AtomicDrmSurface::new(
self.internal.clone(),
active,
crtc,
plane,
mapping,
mode,
connectors,
self.logger.clone(),
)?)
} else {
if self.plane_type(plane)? != PlaneType::Primary {
return Err(Error::NonPrimaryPlane(plane));
}
DrmSurfaceInternal::Legacy(LegacyDrmSurface::new(
self.internal.clone(),
active,
crtc,
mode,
connectors,
self.logger.clone(),
)?)
};
// get plane formats
let plane_info = self.get_plane(plane).map_err(|source| Error::Access {
errmsg: "Error loading plane info",
dev: self.dev_path(),
source,
})?;
let mut formats = HashSet::new();
for code in plane_info
.formats()
.iter()
.flat_map(|x| Fourcc::try_from(*x).ok())
{
formats.insert(Format {
code,
modifier: Modifier::Invalid,
});
}
if let (Ok(1), &DrmSurfaceInternal::Atomic(ref surf)) = (
self.get_driver_capability(DriverCapability::AddFB2Modifiers),
&internal,
) {
let set = self.get_properties(plane).map_err(|source| Error::Access {
errmsg: "Failed to query properties",
dev: self.dev_path(),
source,
})?;
if let Ok(prop) = surf.plane_prop_handle(plane, "IN_FORMATS") {
let prop_info = self.get_property(prop).map_err(|source| Error::Access {
errmsg: "Failed to query property",
dev: self.dev_path(),
source,
})?;
let (handles, raw_values) = set.as_props_and_values();
let raw_value = raw_values[handles
.iter()
.enumerate()
.find_map(|(i, handle)| if *handle == prop { Some(i) } else { None })
.unwrap()];
if let property::Value::Blob(blob) = prop_info.value_type().convert_value(raw_value) {
let data = self.get_property_blob(blob).map_err(|source| Error::Access {
errmsg: "Failed to query property blob data",
dev: self.dev_path(),
source,
})?;
// be careful here, we have no idea about the alignment inside the blob, so always copy using `read_unaligned`,
// although slice::from_raw_parts would be so much nicer to iterate and to read.
unsafe {
let fmt_mod_blob_ptr = data.as_ptr() as *const drm_ffi::drm_format_modifier_blob;
let fmt_mod_blob = &*fmt_mod_blob_ptr;
let formats_ptr: *const u32 = fmt_mod_blob_ptr
.cast::<u8>()
.offset(fmt_mod_blob.formats_offset as isize)
as *const _;
let modifiers_ptr: *const drm_ffi::drm_format_modifier = fmt_mod_blob_ptr
.cast::<u8>()
.offset(fmt_mod_blob.modifiers_offset as isize)
as *const _;
let formats_ptr = formats_ptr as *const u32;
let modifiers_ptr = modifiers_ptr as *const drm_ffi::drm_format_modifier;
for i in 0..fmt_mod_blob.count_modifiers {
let mod_info = modifiers_ptr.offset(i as isize).read_unaligned();
for j in 0..64 {
if mod_info.formats & (1u64 << j) != 0 {
let code = Fourcc::try_from(
formats_ptr
.offset((j + mod_info.offset) as isize)
.read_unaligned(),
)
.ok();
let modifier = Modifier::from(mod_info.modifier);
if let Some(code) = code {
formats.insert(Format { code, modifier });
}
}
}
}
}
}
}
} else if self.plane_type(plane)? == PlaneType::Cursor {
// Force a LINEAR layout for the cursor if the driver doesn't support modifiers
for format in formats.clone() {
formats.insert(Format {
code: format.code,
modifier: Modifier::Linear,
});
}
}
if formats.is_empty() {
formats.insert(Format {
code: Fourcc::Argb8888,
modifier: Modifier::Invalid,
});
}
info!(
self.logger,
"Supported scan-out formats for plane ({:?}): {:?}", plane, formats
);
Ok(DrmSurface {
crtc,
plane,
internal: Arc::new(internal),
formats,
})
}
/// Returns the device_id of the underlying drm node
pub fn device_id(&self) -> dev_t {
self.dev_id
}
}
/// A set of planes as supported by a crtc
pub struct Planes {
/// The primary plane of the crtc
pub primary: plane::Handle,
/// The cursor plane of the crtc, if available
pub cursor: Option<plane::Handle>,
/// Overlay planes supported by the crtc, if available
pub overlay: Option<Vec<plane::Handle>>,
}
impl<A: AsRawFd + 'static> DrmDeviceInternal<A> {
pub(super) fn reset_state(&self) -> Result<(), Error> {
match self {
DrmDeviceInternal::Atomic(dev) => dev.reset_state(),
DrmDeviceInternal::Legacy(dev) => dev.reset_state(),
}
}
}
/// Trait to receive events of a bound [`DrmDevice`]
///
/// See [`device_bind`]
pub trait DeviceHandler {
/// A vblank blank event on the provided crtc has happend
fn vblank(&mut self, crtc: crtc::Handle);
/// An error happend while processing events
fn error(&mut self, error: Error);
}
/// Trait representing open devices that *may* return a `Path`
pub trait DevPath {
/// Returns the path of the open device if possible
fn dev_path(&self) -> Option<PathBuf>;
}
impl<A: AsRawFd> DevPath for A {
fn dev_path(&self) -> Option<PathBuf> {
use std::fs;
fs::read_link(format!("/proc/self/fd/{:?}", self.as_raw_fd())).ok()
}
}
/// calloop source associated with a Device
pub type DrmSource<A> = Generic<DrmDevice<A>>;
/// Bind a `Device` to an [`EventLoop`](calloop::EventLoop),
///
/// This will cause it to recieve events and feed them into a previously
/// set [`DeviceHandler`](DeviceHandler).
pub fn device_bind<A, Data>(
handle: &LoopHandle<Data>,
device: DrmDevice<A>,
) -> ::std::result::Result<Source<DrmSource<A>>, InsertError<DrmSource<A>>>
where
A: AsRawFd + 'static,
Data: 'static,
{
let source = Generic::new(device, calloop::Interest::Readable, calloop::Mode::Level);
handle.insert_source(source, |_, source, _| {
source.process_events();
Ok(())
})
}
drm: Do not spam ALL supported plane formats
use std::cell::RefCell;
use std::collections::HashSet;
use std::convert::TryFrom;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::{atomic::AtomicBool, Arc};
use calloop::{generic::Generic, InsertError, LoopHandle, Source};
use drm::control::{
connector, crtc, plane, property, Device as ControlDevice, Event, Mode, PlaneResourceHandles, PlaneType,
ResourceHandles,
};
use drm::{ClientCapability, Device as BasicDevice, DriverCapability};
use nix::libc::dev_t;
use nix::sys::stat::fstat;
pub(super) mod atomic;
pub(super) mod legacy;
use super::error::Error;
use super::surface::{atomic::AtomicDrmSurface, legacy::LegacyDrmSurface, DrmSurface, DrmSurfaceInternal};
use crate::backend::allocator::{Format, Fourcc, Modifier};
use atomic::AtomicDrmDevice;
use legacy::LegacyDrmDevice;
/// An open drm device
pub struct DrmDevice<A: AsRawFd + 'static> {
pub(super) dev_id: dev_t,
pub(crate) internal: Arc<DrmDeviceInternal<A>>,
handler: Rc<RefCell<Option<Box<dyn DeviceHandler>>>>,
#[cfg(feature = "backend_session")]
pub(super) links: RefCell<Vec<crate::signaling::SignalToken>>,
has_universal_planes: bool,
resources: ResourceHandles,
planes: PlaneResourceHandles,
pub(super) logger: ::slog::Logger,
}
impl<A: AsRawFd + 'static> AsRawFd for DrmDevice<A> {
fn as_raw_fd(&self) -> RawFd {
match &*self.internal {
DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(),
DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(),
}
}
}
impl<A: AsRawFd + 'static> BasicDevice for DrmDevice<A> {}
impl<A: AsRawFd + 'static> ControlDevice for DrmDevice<A> {}
pub struct FdWrapper<A: AsRawFd + 'static> {
fd: A,
pub(super) privileged: bool,
logger: ::slog::Logger,
}
impl<A: AsRawFd + 'static> AsRawFd for FdWrapper<A> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<A: AsRawFd + 'static> BasicDevice for FdWrapper<A> {}
impl<A: AsRawFd + 'static> ControlDevice for FdWrapper<A> {}
impl<A: AsRawFd + 'static> Drop for FdWrapper<A> {
fn drop(&mut self) {
info!(self.logger, "Dropping device: {:?}", self.dev_path());
if self.privileged {
if let Err(err) = self.release_master_lock() {
error!(self.logger, "Failed to drop drm master state. Error: {}", err);
}
}
}
}
pub enum DrmDeviceInternal<A: AsRawFd + 'static> {
Atomic(AtomicDrmDevice<A>),
Legacy(LegacyDrmDevice<A>),
}
impl<A: AsRawFd + 'static> AsRawFd for DrmDeviceInternal<A> {
fn as_raw_fd(&self) -> RawFd {
match self {
DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(),
DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(),
}
}
}
impl<A: AsRawFd + 'static> BasicDevice for DrmDeviceInternal<A> {}
impl<A: AsRawFd + 'static> ControlDevice for DrmDeviceInternal<A> {}
impl<A: AsRawFd + 'static> DrmDevice<A> {
/// Create a new [`DrmDevice`] from an open drm node
///
/// # Arguments
///
/// - `fd` - Open drm node
/// - `disable_connectors` - Setting this to true will initialize all connectors \
/// as disabled on device creation. smithay enables connectors, when attached \
/// to a surface, and disables them, when detached. Setting this to `false` \
/// requires usage of `drm-rs` to disable unused connectors to prevent them \
/// showing garbage, but will also prevent flickering of already turned on \
/// connectors (assuming you won't change the resolution).
/// - `logger` - Optional [`slog::Logger`] to be used by this device.
///
/// # Return
///
/// Returns an error if the file is no valid drm node or the device is not accessible.
pub fn new<L>(fd: A, disable_connectors: bool, logger: L) -> Result<Self, Error>
where
A: AsRawFd + 'static,
L: Into<Option<::slog::Logger>>,
{
let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_drm"));
info!(log, "DrmDevice initializing");
let dev_id = fstat(fd.as_raw_fd()).map_err(Error::UnableToGetDeviceId)?.st_rdev;
let active = Arc::new(AtomicBool::new(true));
let dev = Arc::new({
let mut dev = FdWrapper {
fd,
privileged: false,
logger: log.clone(),
};
// We want to modeset, so we better be the master, if we run via a tty session.
// This is only needed on older kernels. Newer kernels grant this permission,
// if no other process is already the *master*. So we skip over this error.
if dev.acquire_master_lock().is_err() {
warn!(log, "Unable to become drm master, assuming unprivileged mode");
} else {
dev.privileged = true;
}
dev
});
let has_universal_planes = dev
.set_client_capability(ClientCapability::UniversalPlanes, true)
.is_ok();
let resources = dev.resource_handles().map_err(|source| Error::Access {
errmsg: "Error loading resource handles",
dev: dev.dev_path(),
source,
})?;
let planes = dev.plane_handles().map_err(|source| Error::Access {
errmsg: "Error loading plane handles",
dev: dev.dev_path(),
source,
})?;
let internal = Arc::new(DrmDevice::create_internal(
dev,
active,
disable_connectors,
log.clone(),
)?);
Ok(DrmDevice {
dev_id,
internal,
handler: Rc::new(RefCell::new(None)),
#[cfg(feature = "backend_session")]
links: RefCell::new(Vec::new()),
has_universal_planes,
resources,
planes,
logger: log,
})
}
fn create_internal(
dev: Arc<FdWrapper<A>>,
active: Arc<AtomicBool>,
disable_connectors: bool,
log: ::slog::Logger,
) -> Result<DrmDeviceInternal<A>, Error> {
let force_legacy = std::env::var("SMITHAY_USE_LEGACY")
.map(|x| {
x == "1" || x.to_lowercase() == "true" || x.to_lowercase() == "yes" || x.to_lowercase() == "y"
})
.unwrap_or(false);
if force_legacy {
info!(log, "SMITHAY_USE_LEGACY is set. Forcing LegacyDrmDevice.");
};
Ok(
if dev.set_client_capability(ClientCapability::Atomic, true).is_ok() && !force_legacy {
DrmDeviceInternal::Atomic(AtomicDrmDevice::new(dev, active, disable_connectors, log)?)
} else {
info!(log, "Falling back to LegacyDrmDevice");
DrmDeviceInternal::Legacy(LegacyDrmDevice::new(dev, active, disable_connectors, log)?)
},
)
}
/// Processes any open events of the underlying file descriptor.
///
/// You should not call this function manually, but rather use
/// [`device_bind`] to register the device
/// to an [`EventLoop`](calloop::EventLoop)
/// and call this function when the device becomes readable
/// to synchronize your rendering to the vblank events of the open crtc's
pub fn process_events(&mut self) {
match self.receive_events() {
Ok(events) => {
for event in events {
if let Event::PageFlip(event) = event {
trace!(self.logger, "Got a page-flip event for crtc ({:?})", event.crtc);
if let Some(handler) = self.handler.borrow_mut().as_mut() {
handler.vblank(event.crtc);
}
} else {
trace!(
self.logger,
"Got a non-page-flip event of device '{:?}'.",
self.dev_path()
);
}
}
}
Err(source) => {
if let Some(handler) = self.handler.borrow_mut().as_mut() {
handler.error(Error::Access {
errmsg: "Error processing drm events",
dev: self.dev_path(),
source,
});
}
}
}
}
/// Returns if the underlying implementation uses atomic-modesetting or not.
pub fn is_atomic(&self) -> bool {
match *self.internal {
DrmDeviceInternal::Atomic(_) => true,
DrmDeviceInternal::Legacy(_) => false,
}
}
/// Assigns a [`DeviceHandler`] called during event processing.
///
/// See [`device_bind`] and [`DeviceHandler`]
pub fn set_handler(&mut self, handler: impl DeviceHandler + 'static) {
let handler = Some(Box::new(handler) as Box<dyn DeviceHandler + 'static>);
*self.handler.borrow_mut() = handler;
}
/// Clear a set [`DeviceHandler`](trait.DeviceHandler.html), if any
pub fn clear_handler(&mut self) {
self.handler.borrow_mut().take();
}
/// Returns a list of crtcs for this device
pub fn crtcs(&self) -> &[crtc::Handle] {
self.resources.crtcs()
}
/// Returns a set of available planes for a given crtc
pub fn planes(&self, crtc: &crtc::Handle) -> Result<Planes, Error> {
let mut primary = None;
let mut cursor = None;
let mut overlay = Vec::new();
for plane in self.planes.planes() {
let info = self.get_plane(*plane).map_err(|source| Error::Access {
errmsg: "Failed to get plane information",
dev: self.dev_path(),
source,
})?;
let filter = info.possible_crtcs();
if self.resources.filter_crtcs(filter).contains(crtc) {
match self.plane_type(*plane)? {
PlaneType::Primary => {
primary = Some(*plane);
}
PlaneType::Cursor => {
cursor = Some(*plane);
}
PlaneType::Overlay => {
overlay.push(*plane);
}
};
}
}
Ok(Planes {
primary: primary.expect("Crtc has no primary plane"),
cursor,
overlay: if self.has_universal_planes {
Some(overlay)
} else {
None
},
})
}
fn plane_type(&self, plane: plane::Handle) -> Result<PlaneType, Error> {
let props = self.get_properties(plane).map_err(|source| Error::Access {
errmsg: "Failed to get properties of plane",
dev: self.dev_path(),
source,
})?;
let (ids, vals) = props.as_props_and_values();
for (&id, &val) in ids.iter().zip(vals.iter()) {
let info = self.get_property(id).map_err(|source| Error::Access {
errmsg: "Failed to get property info",
dev: self.dev_path(),
source,
})?;
if info.name().to_str().map(|x| x == "type").unwrap_or(false) {
return Ok(match val {
x if x == (PlaneType::Primary as u64) => PlaneType::Primary,
x if x == (PlaneType::Cursor as u64) => PlaneType::Cursor,
_ => PlaneType::Overlay,
});
}
}
unreachable!()
}
/// Creates a new rendering surface.
///
/// # Arguments
///
/// Initialization of surfaces happens through the types provided by
/// [`drm-rs`](drm).
///
/// - [`crtc`](drm::control::crtc)s represent scanout engines of the device pointing to one framebuffer. \
/// Their responsibility is to read the data of the framebuffer and export it into an "Encoder". \
/// The number of crtc's represent the number of independant output devices the hardware may handle.
/// - [`plane`](drm::control::plane)s represent a single plane on a crtc, which is composite together with
/// other planes on the same crtc to present the final image.
/// - [`mode`](drm::control::Mode) describes the resolution and rate of images produced by the crtc and \
/// has to be compatible with the provided `connectors`.
/// - [`connectors`] - List of connectors driven by the crtc. At least one(!) connector needs to be \
/// attached to a crtc in smithay.
pub fn create_surface(
&self,
crtc: crtc::Handle,
plane: plane::Handle,
mode: Mode,
connectors: &[connector::Handle],
) -> Result<DrmSurface<A>, Error> {
if connectors.is_empty() {
return Err(Error::SurfaceWithoutConnectors(crtc));
}
let info = self.get_plane(plane).map_err(|source| Error::Access {
errmsg: "Failed to get plane info",
dev: self.dev_path(),
source,
})?;
let filter = info.possible_crtcs();
if !self.resources.filter_crtcs(filter).contains(&crtc) {
return Err(Error::PlaneNotCompatible(crtc, plane));
}
let active = match &*self.internal {
DrmDeviceInternal::Atomic(dev) => dev.active.clone(),
DrmDeviceInternal::Legacy(dev) => dev.active.clone(),
};
let internal = if self.is_atomic() {
let mapping = match &*self.internal {
DrmDeviceInternal::Atomic(dev) => dev.prop_mapping.clone(),
_ => unreachable!(),
};
DrmSurfaceInternal::Atomic(AtomicDrmSurface::new(
self.internal.clone(),
active,
crtc,
plane,
mapping,
mode,
connectors,
self.logger.clone(),
)?)
} else {
if self.plane_type(plane)? != PlaneType::Primary {
return Err(Error::NonPrimaryPlane(plane));
}
DrmSurfaceInternal::Legacy(LegacyDrmSurface::new(
self.internal.clone(),
active,
crtc,
mode,
connectors,
self.logger.clone(),
)?)
};
// get plane formats
let plane_info = self.get_plane(plane).map_err(|source| Error::Access {
errmsg: "Error loading plane info",
dev: self.dev_path(),
source,
})?;
let mut formats = HashSet::new();
for code in plane_info
.formats()
.iter()
.flat_map(|x| Fourcc::try_from(*x).ok())
{
formats.insert(Format {
code,
modifier: Modifier::Invalid,
});
}
if let (Ok(1), &DrmSurfaceInternal::Atomic(ref surf)) = (
self.get_driver_capability(DriverCapability::AddFB2Modifiers),
&internal,
) {
let set = self.get_properties(plane).map_err(|source| Error::Access {
errmsg: "Failed to query properties",
dev: self.dev_path(),
source,
})?;
if let Ok(prop) = surf.plane_prop_handle(plane, "IN_FORMATS") {
let prop_info = self.get_property(prop).map_err(|source| Error::Access {
errmsg: "Failed to query property",
dev: self.dev_path(),
source,
})?;
let (handles, raw_values) = set.as_props_and_values();
let raw_value = raw_values[handles
.iter()
.enumerate()
.find_map(|(i, handle)| if *handle == prop { Some(i) } else { None })
.unwrap()];
if let property::Value::Blob(blob) = prop_info.value_type().convert_value(raw_value) {
let data = self.get_property_blob(blob).map_err(|source| Error::Access {
errmsg: "Failed to query property blob data",
dev: self.dev_path(),
source,
})?;
// be careful here, we have no idea about the alignment inside the blob, so always copy using `read_unaligned`,
// although slice::from_raw_parts would be so much nicer to iterate and to read.
unsafe {
let fmt_mod_blob_ptr = data.as_ptr() as *const drm_ffi::drm_format_modifier_blob;
let fmt_mod_blob = &*fmt_mod_blob_ptr;
let formats_ptr: *const u32 = fmt_mod_blob_ptr
.cast::<u8>()
.offset(fmt_mod_blob.formats_offset as isize)
as *const _;
let modifiers_ptr: *const drm_ffi::drm_format_modifier = fmt_mod_blob_ptr
.cast::<u8>()
.offset(fmt_mod_blob.modifiers_offset as isize)
as *const _;
let formats_ptr = formats_ptr as *const u32;
let modifiers_ptr = modifiers_ptr as *const drm_ffi::drm_format_modifier;
for i in 0..fmt_mod_blob.count_modifiers {
let mod_info = modifiers_ptr.offset(i as isize).read_unaligned();
for j in 0..64 {
if mod_info.formats & (1u64 << j) != 0 {
let code = Fourcc::try_from(
formats_ptr
.offset((j + mod_info.offset) as isize)
.read_unaligned(),
)
.ok();
let modifier = Modifier::from(mod_info.modifier);
if let Some(code) = code {
formats.insert(Format { code, modifier });
}
}
}
}
}
}
}
} else if self.plane_type(plane)? == PlaneType::Cursor {
// Force a LINEAR layout for the cursor if the driver doesn't support modifiers
for format in formats.clone() {
formats.insert(Format {
code: format.code,
modifier: Modifier::Linear,
});
}
}
if formats.is_empty() {
formats.insert(Format {
code: Fourcc::Argb8888,
modifier: Modifier::Invalid,
});
}
trace!(
self.logger,
"Supported scan-out formats for plane ({:?}): {:?}", plane, formats
);
Ok(DrmSurface {
crtc,
plane,
internal: Arc::new(internal),
formats,
})
}
/// Returns the device_id of the underlying drm node
pub fn device_id(&self) -> dev_t {
self.dev_id
}
}
/// A set of planes as supported by a crtc
pub struct Planes {
/// The primary plane of the crtc
pub primary: plane::Handle,
/// The cursor plane of the crtc, if available
pub cursor: Option<plane::Handle>,
/// Overlay planes supported by the crtc, if available
pub overlay: Option<Vec<plane::Handle>>,
}
impl<A: AsRawFd + 'static> DrmDeviceInternal<A> {
pub(super) fn reset_state(&self) -> Result<(), Error> {
match self {
DrmDeviceInternal::Atomic(dev) => dev.reset_state(),
DrmDeviceInternal::Legacy(dev) => dev.reset_state(),
}
}
}
/// Trait to receive events of a bound [`DrmDevice`]
///
/// See [`device_bind`]
pub trait DeviceHandler {
/// A vblank blank event on the provided crtc has happend
fn vblank(&mut self, crtc: crtc::Handle);
/// An error happend while processing events
fn error(&mut self, error: Error);
}
/// Trait representing open devices that *may* return a `Path`
pub trait DevPath {
/// Returns the path of the open device if possible
fn dev_path(&self) -> Option<PathBuf>;
}
impl<A: AsRawFd> DevPath for A {
fn dev_path(&self) -> Option<PathBuf> {
use std::fs;
fs::read_link(format!("/proc/self/fd/{:?}", self.as_raw_fd())).ok()
}
}
/// calloop source associated with a Device
pub type DrmSource<A> = Generic<DrmDevice<A>>;
/// Bind a `Device` to an [`EventLoop`](calloop::EventLoop),
///
/// This will cause it to recieve events and feed them into a previously
/// set [`DeviceHandler`](DeviceHandler).
pub fn device_bind<A, Data>(
handle: &LoopHandle<Data>,
device: DrmDevice<A>,
) -> ::std::result::Result<Source<DrmSource<A>>, InsertError<DrmSource<A>>>
where
A: AsRawFd + 'static,
Data: 'static,
{
let source = Generic::new(device, calloop::Interest::Readable, calloop::Mode::Level);
handle.insert_source(source, |_, source, _| {
source.process_events();
Ok(())
})
}
|
#![allow(non_snake_case)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate ash;
extern crate byteorder;
#[macro_use]
extern crate derivative;
extern crate gfx_hal as hal;
#[macro_use]
extern crate lazy_static;
#[cfg(feature = "use-rtld-next")]
extern crate shared_library;
extern crate smallvec;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "macos")]
#[macro_use]
extern crate objc;
#[cfg(windows)]
extern crate winapi;
#[cfg(feature = "winit")]
extern crate winit;
#[cfg(all(
feature = "x11",
unix,
not(target_os = "android"),
not(target_os = "macos")
))]
extern crate x11;
#[cfg(all(
feature = "xcb",
unix,
not(target_os = "android"),
not(target_os = "macos")
))]
extern crate xcb;
use ash::extensions::{self, ext::DebugUtils};
use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0};
use ash::vk;
#[cfg(not(feature = "use-rtld-next"))]
use ash::{Entry, LoadingError};
use crate::hal::adapter::DeviceType;
use crate::hal::device::{DeviceLost, OutOfMemory, SurfaceLost};
use crate::hal::error::{DeviceCreationError, HostExecutionError};
use crate::hal::pso::PipelineStage;
use crate::hal::{
format,
image,
memory,
queue,
window::{PresentError, Suboptimal},
};
use crate::hal::{Features, Limits, PatchSize, QueueType, SwapImageIndex};
use std::borrow::{Borrow, Cow};
use std::ffi::{CStr, CString};
use std::sync::Arc;
use std::{fmt, mem, ptr, slice};
#[cfg(feature = "use-rtld-next")]
use ash::{EntryCustom, LoadingError};
#[cfg(feature = "use-rtld-next")]
use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles};
mod command;
mod conv;
mod device;
mod info;
mod native;
mod pool;
mod result;
mod window;
// CStr's cannot be constant yet, until const fn lands we need to use a lazy_static
lazy_static! {
static ref LAYERS: Vec<&'static CStr> = if cfg!(all(target_os = "android", debug_assertions)) {
vec![
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_core_validation\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_object_tracker\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_parameter_validation\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_threading\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_unique_objects\0").unwrap(),
]
} else if cfg!(debug_assertions) {
vec![CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_standard_validation\0").unwrap()]
} else {
vec![]
};
static ref EXTENSIONS: Vec<&'static CStr> = vec![#[cfg(debug_assertions)] CStr::from_bytes_with_nul(b"VK_EXT_debug_utils\0").unwrap()];
static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()];
static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![
extensions::khr::Surface::name(),
// Platform-specific WSI extensions
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::XlibSurface::name(),
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::XcbSurface::name(),
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::WaylandSurface::name(),
#[cfg(target_os = "android")]
extensions::khr::AndroidSurface::name(),
#[cfg(target_os = "windows")]
extensions::khr::Win32Surface::name(),
#[cfg(target_os = "macos")]
extensions::mvk::MacOSSurface::name(),
];
}
#[cfg(not(feature = "use-rtld-next"))]
lazy_static! {
// Entry function pointers
pub static ref VK_ENTRY: Result<Entry, LoadingError> = Entry::new();
}
#[cfg(feature = "use-rtld-next")]
lazy_static! {
// Entry function pointers
pub static ref VK_ENTRY: Result<EntryCustom<V1_0, ()>, LoadingError>
= EntryCustom::new_custom(
|| Ok(()),
|_, name| unsafe {
DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy())
.unwrap_or(ptr::null_mut())
}
);
}
pub struct RawInstance(
pub ash::Instance,
Option<(DebugUtils, vk::DebugUtilsMessengerEXT)>,
);
impl Drop for RawInstance {
fn drop(&mut self) {
unsafe {
#[cfg(debug_assertions)]
{
if let Some((ref ext, callback)) = self.1 {
ext.destroy_debug_utils_messenger(callback, None);
}
}
self.0.destroy_instance(None);
}
}
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Instance {
#[derivative(Debug = "ignore")]
pub raw: Arc<RawInstance>,
/// Supported extensions of this instance.
pub extensions: Vec<&'static CStr>,
}
fn map_queue_type(flags: vk::QueueFlags) -> QueueType {
if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) {
// TRANSFER_BIT optional
QueueType::General
} else if flags.contains(vk::QueueFlags::GRAPHICS) {
// TRANSFER_BIT optional
QueueType::Graphics
} else if flags.contains(vk::QueueFlags::COMPUTE) {
// TRANSFER_BIT optional
QueueType::Compute
} else if flags.contains(vk::QueueFlags::TRANSFER) {
QueueType::Transfer
} else {
// TODO: present only queues?
unimplemented!()
}
}
unsafe fn display_debug_utils_label_ext(
label_structs: *mut vk::DebugUtilsLabelEXT,
count: usize,
) -> Option<String> {
if count == 0 {
return None;
}
Some(
slice::from_raw_parts::<vk::DebugUtilsLabelEXT>(label_structs, count)
.iter()
.flat_map(|dul_obj| {
dul_obj
.p_label_name
.as_ref()
.map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned())
})
.collect::<Vec<String>>()
.join(", "),
)
}
unsafe fn display_debug_utils_object_name_info_ext(
info_structs: *mut vk::DebugUtilsObjectNameInfoEXT,
count: usize,
) -> Option<String> {
if count == 0 {
return None;
}
//TODO: use color field of vk::DebugUtilsLabelsExt in a meaningful way?
Some(
slice::from_raw_parts::<vk::DebugUtilsObjectNameInfoEXT>(info_structs, count)
.iter()
.map(|obj_info| {
let object_name = obj_info
.p_object_name
.as_ref()
.map(|name| CStr::from_ptr(name).to_string_lossy().into_owned());
match object_name {
Some(name) => format!(
"(type: {:?}, hndl: {}, name: {})",
obj_info.object_type,
&obj_info.object_handle.to_string(),
name
),
None => format!(
"(type: {:?}, hndl: {})",
obj_info.object_type,
&obj_info.object_handle.to_string()
),
}
})
.collect::<Vec<String>>()
.join(", "),
)
}
unsafe extern "system" fn debug_utils_messenger_callback(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_user_data: *mut std::os::raw::c_void,
) -> vk::Bool32 {
let callback_data = *p_callback_data;
let message_severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error,
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn,
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info,
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace,
_ => log::Level::Warn,
};
let message_type = &format!("{:?}", message_type);
let message_id_number: i32 = callback_data.message_id_number as i32;
let message_id_name = if callback_data.p_message_id_name.is_null() {
Cow::from("")
} else {
CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy()
};
let message = if callback_data.p_message.is_null() {
Cow::from("")
} else {
CStr::from_ptr(callback_data.p_message).to_string_lossy()
};
let additional_info: [(&str, Option<String>); 3] = [
(
"queue info",
display_debug_utils_label_ext(
callback_data.p_queue_labels as *mut _,
callback_data.queue_label_count as usize,
),
),
(
"cmd buf info",
display_debug_utils_label_ext(
callback_data.p_cmd_buf_labels as *mut _,
callback_data.cmd_buf_label_count as usize,
),
),
(
"object info",
display_debug_utils_object_name_info_ext(
callback_data.p_objects as *mut _,
callback_data.object_count as usize,
),
),
];
log!(message_severity, "{}\n", {
let mut msg = format!(
"\n{} [{} ({})] : {}",
message_type,
message_id_name,
&message_id_number.to_string(),
message
);
for (info_label, info) in additional_info.into_iter() {
match info {
Some(data) => {
msg = format!("{}\n{}: {}", msg, info_label, data);
}
None => {}
}
}
msg
});
vk::FALSE
}
impl Instance {
pub fn create(name: &str, version: u32) -> Self {
// TODO: return errors instead of panic
let entry = VK_ENTRY
.as_ref()
.expect("Unable to load Vulkan entry points");
let app_name = CString::new(name).unwrap();
let app_info = vk::ApplicationInfo {
s_type: vk::StructureType::APPLICATION_INFO,
p_next: ptr::null(),
p_application_name: app_name.as_ptr(),
application_version: version,
p_engine_name: b"gfx-rs\0".as_ptr() as *const _,
engine_version: 1,
api_version: vk_make_version!(1, 0, 0),
};
let instance_extensions = entry
.enumerate_instance_extension_properties()
.expect("Unable to enumerate instance extensions");
let instance_layers = entry
.enumerate_instance_layer_properties()
.expect("Unable to enumerate instance layers");
// Check our xtensions against the available extensions
let extensions = SURFACE_EXTENSIONS
.iter()
.chain(EXTENSIONS.iter())
.filter_map(|&ext| {
instance_extensions
.iter()
.find(|inst_ext| unsafe {
CStr::from_ptr(inst_ext.extension_name.as_ptr()).to_bytes()
== ext.to_bytes()
})
.map(|_| ext)
.or_else(|| {
warn!("Unable to find extension: {}", ext.to_string_lossy());
None
})
})
.collect::<Vec<&CStr>>();
// Check requested layers against the available layers
let layers = LAYERS
.iter()
.filter_map(|&layer| {
instance_layers
.iter()
.find(|inst_layer| unsafe {
CStr::from_ptr(inst_layer.layer_name.as_ptr()).to_bytes()
== layer.to_bytes()
})
.map(|_| layer)
.or_else(|| {
warn!("Unable to find layer: {}", layer.to_string_lossy());
None
})
})
.collect::<Vec<&CStr>>();
let instance = {
let cstrings = layers
.iter()
.chain(extensions.iter())
.map(|&s| CString::from(s))
.collect::<Vec<_>>();
let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
let create_info = vk::InstanceCreateInfo {
s_type: vk::StructureType::INSTANCE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::InstanceCreateFlags::empty(),
p_application_info: &app_info,
enabled_layer_count: layers.len() as _,
pp_enabled_layer_names: str_pointers.as_ptr(),
enabled_extension_count: extensions.len() as _,
pp_enabled_extension_names: str_pointers[layers.len() ..].as_ptr(),
};
unsafe { entry.create_instance(&create_info, None) }
.expect("Unable to create Vulkan instance")
};
#[cfg(debug_assertions)]
let debug_messenger = {
// make sure VK_EXT_debug_utils is available
if instance_extensions.iter().any(|props| unsafe {
CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name()
}) {
let ext = DebugUtils::new(entry, &instance);
let info = vk::DebugUtilsMessengerCreateInfoEXT {
s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::all(),
message_type: vk::DebugUtilsMessageTypeFlagsEXT::all(),
pfn_user_callback: Some(debug_utils_messenger_callback),
p_user_data: ptr::null_mut(),
};
let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap();
Some((ext, handle))
} else {
None
}
};
#[cfg(not(debug_assertions))]
let debug_messenger = None;
Instance {
raw: Arc::new(RawInstance(instance, debug_messenger)),
extensions,
}
}
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<hal::Adapter<Backend>> {
let devices = match unsafe { self.raw.0.enumerate_physical_devices() } {
Ok(devices) => devices,
Err(err) => {
error!("Could not enumerate physical devices! {}", err);
vec![]
}
};
devices
.into_iter()
.map(|device| {
let properties = unsafe { self.raw.0.get_physical_device_properties(device) };
let info = hal::AdapterInfo {
name: unsafe {
CStr::from_ptr(properties.device_name.as_ptr())
.to_str()
.unwrap_or("Unknown")
.to_owned()
},
vendor: properties.vendor_id as usize,
device: properties.device_id as usize,
device_type: match properties.device_type {
ash::vk::PhysicalDeviceType::OTHER => DeviceType::Other,
ash::vk::PhysicalDeviceType::INTEGRATED_GPU => DeviceType::IntegratedGpu,
ash::vk::PhysicalDeviceType::DISCRETE_GPU => DeviceType::DiscreteGpu,
ash::vk::PhysicalDeviceType::VIRTUAL_GPU => DeviceType::VirtualGpu,
ash::vk::PhysicalDeviceType::CPU => DeviceType::Cpu,
_ => DeviceType::Other,
},
};
let physical_device = PhysicalDevice {
instance: self.raw.clone(),
handle: device,
properties,
};
let queue_families = unsafe {
self.raw
.0
.get_physical_device_queue_family_properties(device)
.into_iter()
.enumerate()
.map(|(i, properties)| QueueFamily {
properties,
device,
index: i as u32,
})
.collect()
};
hal::Adapter {
info,
physical_device,
queue_families,
}
})
.collect()
}
}
#[derive(Debug, Clone)]
pub struct QueueFamily {
properties: vk::QueueFamilyProperties,
device: vk::PhysicalDevice,
index: u32,
}
impl hal::queue::QueueFamily for QueueFamily {
fn queue_type(&self) -> QueueType {
map_queue_type(self.properties.queue_flags)
}
fn max_queues(&self) -> usize {
self.properties.queue_count as _
}
fn id(&self) -> queue::QueueFamilyId {
queue::QueueFamilyId(self.index as _)
}
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct PhysicalDevice {
#[derivative(Debug = "ignore")]
instance: Arc<RawInstance>,
handle: vk::PhysicalDevice,
properties: vk::PhysicalDeviceProperties,
}
impl hal::PhysicalDevice<Backend> for PhysicalDevice {
unsafe fn open(
&self,
families: &[(&QueueFamily, &[hal::QueuePriority])],
requested_features: Features,
) -> Result<hal::Gpu<Backend>, DeviceCreationError> {
let family_infos = families
.iter()
.map(|&(family, priorities)| vk::DeviceQueueCreateInfo {
s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceQueueCreateFlags::empty(),
queue_family_index: family.index,
queue_count: priorities.len() as _,
p_queue_priorities: priorities.as_ptr(),
})
.collect::<Vec<_>>();
if !self.features().contains(requested_features) {
return Err(DeviceCreationError::MissingFeature);
}
let enabled_features = conv::map_device_features(requested_features);
// Create device
let device_raw = {
let cstrings = DEVICE_EXTENSIONS
.iter()
.map(|&s| CString::from(s))
.collect::<Vec<_>>();
let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
let info = vk::DeviceCreateInfo {
s_type: vk::StructureType::DEVICE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceCreateFlags::empty(),
queue_create_info_count: family_infos.len() as u32,
p_queue_create_infos: family_infos.as_ptr(),
enabled_layer_count: 0,
pp_enabled_layer_names: ptr::null(),
enabled_extension_count: str_pointers.len() as u32,
pp_enabled_extension_names: str_pointers.as_ptr(),
p_enabled_features: &enabled_features,
};
self.instance
.0
.create_device(self.handle, &info, None)
.map_err(Into::<result::Error>::into)
.map_err(Into::<DeviceCreationError>::into)?
};
let swapchain_fn = vk::KhrSwapchainFn::load(|name| {
mem::transmute(
self.instance
.0
.get_device_proc_addr(device_raw.handle(), name.as_ptr()),
)
});
let device = Device {
raw: Arc::new(RawDevice(device_raw, requested_features)),
};
let device_arc = device.raw.clone();
let queues = families
.into_iter()
.map(|&(family, ref priorities)| {
let family_index = family.index;
let mut family_raw = hal::backend::RawQueueGroup::new(family.clone());
for id in 0 .. priorities.len() {
let queue_raw = device_arc.0.get_device_queue(family_index, id as _);
family_raw.add_queue(CommandQueue {
raw: Arc::new(queue_raw),
device: device_arc.clone(),
swapchain_fn: swapchain_fn.clone(),
});
}
family_raw
})
.collect();
Ok(hal::Gpu {
device,
queues: queue::Queues::new(queues),
})
}
fn format_properties(&self, format: Option<format::Format>) -> format::Properties {
let properties = unsafe {
self.instance.0.get_physical_device_format_properties(
self.handle,
format.map_or(vk::Format::UNDEFINED, conv::map_format),
)
};
format::Properties {
linear_tiling: conv::map_image_features(properties.linear_tiling_features),
optimal_tiling: conv::map_image_features(properties.optimal_tiling_features),
buffer_features: conv::map_buffer_features(properties.buffer_features),
}
}
fn image_format_properties(
&self,
format: format::Format,
dimensions: u8,
tiling: image::Tiling,
usage: image::Usage,
view_caps: image::ViewCapabilities,
) -> Option<image::FormatProperties> {
let format_properties = unsafe {
self.instance.0.get_physical_device_image_format_properties(
self.handle,
conv::map_format(format),
match dimensions {
1 => vk::ImageType::TYPE_1D,
2 => vk::ImageType::TYPE_2D,
3 => vk::ImageType::TYPE_3D,
_ => panic!("Unexpected image dimensionality: {}", dimensions),
},
conv::map_tiling(tiling),
conv::map_image_usage(usage),
conv::map_view_capabilities(view_caps),
)
};
match format_properties {
Ok(props) => Some(image::FormatProperties {
max_extent: image::Extent {
width: props.max_extent.width,
height: props.max_extent.height,
depth: props.max_extent.depth,
},
max_levels: props.max_mip_levels as _,
max_layers: props.max_array_layers as _,
sample_count_mask: props.sample_counts.as_raw() as _,
max_resource_size: props.max_resource_size as _,
}),
Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None,
Err(other) => {
error!("Unexpected error in `image_format_properties`: {:?}", other);
None
}
}
}
fn memory_properties(&self) -> hal::MemoryProperties {
let mem_properties = unsafe {
self.instance
.0
.get_physical_device_memory_properties(self.handle)
};
let memory_heaps = mem_properties.memory_heaps
[.. mem_properties.memory_heap_count as usize]
.iter()
.map(|mem| mem.size)
.collect();
let memory_types = mem_properties.memory_types
[.. mem_properties.memory_type_count as usize]
.iter()
.map(|mem| {
use crate::memory::Properties;
let mut type_flags = Properties::empty();
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::DEVICE_LOCAL)
{
type_flags |= Properties::DEVICE_LOCAL;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
type_flags |= Properties::CPU_VISIBLE;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_COHERENT)
{
type_flags |= Properties::COHERENT;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_CACHED)
{
type_flags |= Properties::CPU_CACHED;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::LAZILY_ALLOCATED)
{
type_flags |= Properties::LAZILY_ALLOCATED;
}
hal::MemoryType {
properties: type_flags,
heap_index: mem.heap_index as usize,
}
})
.collect();
hal::MemoryProperties {
memory_heaps,
memory_types,
}
}
fn features(&self) -> Features {
// see https://github.com/gfx-rs/gfx/issues/1930
let is_windows_intel_dual_src_bug = cfg!(windows)
&& self.properties.vendor_id == info::intel::VENDOR
&& (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK
== info::intel::DEVICE_KABY_LAKE_MASK
|| self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK
== info::intel::DEVICE_SKY_LAKE_MASK);
let features = unsafe { self.instance.0.get_physical_device_features(self.handle) };
let mut bits = Features::TRIANGLE_FAN
| Features::SEPARATE_STENCIL_REF_VALUES
| Features::SAMPLER_MIP_LOD_BIAS;
if features.robust_buffer_access != 0 {
bits |= Features::ROBUST_BUFFER_ACCESS;
}
if features.full_draw_index_uint32 != 0 {
bits |= Features::FULL_DRAW_INDEX_U32;
}
if features.image_cube_array != 0 {
bits |= Features::IMAGE_CUBE_ARRAY;
}
if features.independent_blend != 0 {
bits |= Features::INDEPENDENT_BLENDING;
}
if features.geometry_shader != 0 {
bits |= Features::GEOMETRY_SHADER;
}
if features.tessellation_shader != 0 {
bits |= Features::TESSELLATION_SHADER;
}
if features.sample_rate_shading != 0 {
bits |= Features::SAMPLE_RATE_SHADING;
}
if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug {
bits |= Features::DUAL_SRC_BLENDING;
}
if features.logic_op != 0 {
bits |= Features::LOGIC_OP;
}
if features.multi_draw_indirect != 0 {
bits |= Features::MULTI_DRAW_INDIRECT;
}
if features.draw_indirect_first_instance != 0 {
bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE;
}
if features.depth_clamp != 0 {
bits |= Features::DEPTH_CLAMP;
}
if features.depth_bias_clamp != 0 {
bits |= Features::DEPTH_BIAS_CLAMP;
}
if features.fill_mode_non_solid != 0 {
bits |= Features::NON_FILL_POLYGON_MODE;
}
if features.depth_bounds != 0 {
bits |= Features::DEPTH_BOUNDS;
}
if features.wide_lines != 0 {
bits |= Features::LINE_WIDTH;
}
if features.large_points != 0 {
bits |= Features::POINT_SIZE;
}
if features.alpha_to_one != 0 {
bits |= Features::ALPHA_TO_ONE;
}
if features.multi_viewport != 0 {
bits |= Features::MULTI_VIEWPORTS;
}
if features.sampler_anisotropy != 0 {
bits |= Features::SAMPLER_ANISOTROPY;
}
if features.texture_compression_etc2 != 0 {
bits |= Features::FORMAT_ETC2;
}
if features.texture_compression_astc_ldr != 0 {
bits |= Features::FORMAT_ASTC_LDR;
}
if features.texture_compression_bc != 0 {
bits |= Features::FORMAT_BC;
}
if features.occlusion_query_precise != 0 {
bits |= Features::PRECISE_OCCLUSION_QUERY;
}
if features.pipeline_statistics_query != 0 {
bits |= Features::PIPELINE_STATISTICS_QUERY;
}
if features.vertex_pipeline_stores_and_atomics != 0 {
bits |= Features::VERTEX_STORES_AND_ATOMICS;
}
if features.fragment_stores_and_atomics != 0 {
bits |= Features::FRAGMENT_STORES_AND_ATOMICS;
}
//TODO: cover more features
bits
}
fn limits(&self) -> Limits {
let limits = &self.properties.limits;
let max_group_count = limits.max_compute_work_group_count;
let max_group_size = limits.max_compute_work_group_size;
Limits {
max_image_1d_size: limits.max_image_dimension1_d,
max_image_2d_size: limits.max_image_dimension2_d,
max_image_3d_size: limits.max_image_dimension3_d,
max_image_cube_size: limits.max_image_dimension_cube,
max_image_array_layers: limits.max_image_array_layers as _,
max_texel_elements: limits.max_texel_buffer_elements as _,
max_patch_size: limits.max_tessellation_patch_size as PatchSize,
max_viewports: limits.max_viewports as _,
max_viewport_dimensions: limits.max_viewport_dimensions,
max_framebuffer_extent: image::Extent {
width: limits.max_framebuffer_width,
height: limits.max_framebuffer_height,
depth: limits.max_framebuffer_layers,
},
max_compute_work_group_count: [
max_group_count[0] as _,
max_group_count[1] as _,
max_group_count[2] as _,
],
max_compute_work_group_size: [
max_group_size[0] as _,
max_group_size[1] as _,
max_group_size[2] as _,
],
max_vertex_input_attributes: limits.max_vertex_input_attributes as _,
max_vertex_input_bindings: limits.max_vertex_input_bindings as _,
max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _,
max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _,
max_vertex_output_components: limits.max_vertex_output_components as _,
optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _,
optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment
as _,
min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _,
min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _,
min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _,
framebuffer_color_samples_count: limits.framebuffer_color_sample_counts.as_raw() as _,
framebuffer_depth_samples_count: limits.framebuffer_depth_sample_counts.as_raw() as _,
framebuffer_stencil_samples_count: limits.framebuffer_stencil_sample_counts.as_raw()
as _,
max_color_attachments: limits.max_color_attachments as _,
buffer_image_granularity: limits.buffer_image_granularity,
non_coherent_atom_size: limits.non_coherent_atom_size as _,
max_sampler_anisotropy: limits.max_sampler_anisotropy,
min_vertex_input_binding_stride_alignment: 1,
max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _,
max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _,
max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _,
max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _,
max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _,
max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _,
max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _,
max_descriptor_set_storage_buffers_dynamic: limits
.max_descriptor_set_storage_buffers_dynamic
as _,
max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _,
max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _,
max_descriptor_set_uniform_buffers_dynamic: limits
.max_descriptor_set_uniform_buffers_dynamic
as _,
max_draw_indexed_index_value: limits.max_draw_indexed_index_value,
max_draw_indirect_count: limits.max_draw_indirect_count,
max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources
as _,
max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _,
max_fragment_input_components: limits.max_fragment_input_components as _,
max_fragment_output_attachments: limits.max_fragment_output_attachments as _,
max_framebuffer_layers: limits.max_framebuffer_layers as _,
max_geometry_input_components: limits.max_geometry_input_components as _,
max_geometry_output_components: limits.max_geometry_output_components as _,
max_geometry_output_vertices: limits.max_geometry_output_vertices as _,
max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _,
max_geometry_total_output_components: limits.max_geometry_total_output_components as _,
max_memory_allocation_count: limits.max_memory_allocation_count as _,
max_per_stage_descriptor_input_attachments: limits
.max_per_stage_descriptor_input_attachments
as _,
max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images
as _,
max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _,
max_per_stage_descriptor_storage_buffers: limits
.max_per_stage_descriptor_storage_buffers
as _,
max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images
as _,
max_per_stage_descriptor_uniform_buffers: limits
.max_per_stage_descriptor_uniform_buffers
as _,
max_per_stage_resources: limits.max_per_stage_resources as _,
max_push_constants_size: limits.max_push_constants_size as _,
max_sampler_allocation_count: limits.max_sampler_allocation_count as _,
max_sampler_lod_bias: limits.max_sampler_lod_bias as _,
max_storage_buffer_range: limits.max_storage_buffer_range as _,
max_uniform_buffer_range: limits.max_uniform_buffer_range as _,
min_memory_map_alignment: limits.min_memory_map_alignment,
standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE,
}
}
fn is_valid_cache(&self, cache: &[u8]) -> bool {
const HEADER_SIZE: usize = 16 + vk::UUID_SIZE;
if cache.len() < HEADER_SIZE {
warn!("Bad cache data length {:?}", cache.len());
return false;
}
let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]);
let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]);
let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]);
let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]);
// header length
if (header_len as usize) < HEADER_SIZE {
warn!("Bad header length {:?}", header_len);
return false;
}
// cache header version
if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 {
warn!("Unsupported cache header version: {:?}", header_version);
return false;
}
// vendor id
if vendor_id != self.properties.vendor_id {
warn!(
"Vendor ID mismatch. Device: {:?}, cache: {:?}.",
self.properties.vendor_id, vendor_id,
);
return false;
}
// device id
if device_id != self.properties.device_id {
warn!(
"Device ID mismatch. Device: {:?}, cache: {:?}.",
self.properties.device_id, device_id,
);
return false;
}
if self.properties.pipeline_cache_uuid != cache[16 .. 16 + vk::UUID_SIZE] {
warn!(
"Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.",
self.properties.pipeline_cache_uuid,
&cache[16 .. 16 + vk::UUID_SIZE],
);
return false;
}
true
}
}
#[doc(hidden)]
pub struct RawDevice(pub ash::Device, Features);
impl fmt::Debug for RawDevice {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RawDevice") // TODO: Real Debug impl
}
}
impl Drop for RawDevice {
fn drop(&mut self) {
unsafe {
self.0.destroy_device(None);
}
}
}
// Need to explicitly synchronize on submission and present.
pub type RawCommandQueue = Arc<vk::Queue>;
#[derive(Derivative)]
#[derivative(Debug)]
pub struct CommandQueue {
raw: RawCommandQueue,
device: Arc<RawDevice>,
#[derivative(Debug = "ignore")]
swapchain_fn: vk::KhrSwapchainFn,
}
impl hal::queue::RawCommandQueue<Backend> for CommandQueue {
unsafe fn submit<'a, T, Ic, S, Iw, Is>(
&mut self,
submission: hal::queue::Submission<Ic, Iw, Is>,
fence: Option<&native::Fence>,
) where
T: 'a + Borrow<command::CommandBuffer>,
Ic: IntoIterator<Item = &'a T>,
S: 'a + Borrow<native::Semaphore>,
Iw: IntoIterator<Item = (&'a S, PipelineStage)>,
Is: IntoIterator<Item = &'a S>,
{
//TODO: avoid heap allocations
let mut waits = Vec::new();
let mut stages = Vec::new();
let buffers = submission
.command_buffers
.into_iter()
.map(|cmd| cmd.borrow().raw)
.collect::<Vec<_>>();
for (semaphore, stage) in submission.wait_semaphores {
waits.push(semaphore.borrow().0);
stages.push(conv::map_pipeline_stage(stage));
}
let signals = submission
.signal_semaphores
.into_iter()
.map(|semaphore| semaphore.borrow().0)
.collect::<Vec<_>>();
let info = vk::SubmitInfo {
s_type: vk::StructureType::SUBMIT_INFO,
p_next: ptr::null(),
wait_semaphore_count: waits.len() as u32,
p_wait_semaphores: waits.as_ptr(),
// If count is zero, AMD driver crashes if nullptr is not set for stage masks
p_wait_dst_stage_mask: if stages.is_empty() {
ptr::null()
} else {
stages.as_ptr()
},
command_buffer_count: buffers.len() as u32,
p_command_buffers: buffers.as_ptr(),
signal_semaphore_count: signals.len() as u32,
p_signal_semaphores: signals.as_ptr(),
};
let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null());
let result = self.device.0.queue_submit(*self.raw, &[info], fence_raw);
assert_eq!(Ok(()), result);
}
unsafe fn present<'a, W, Is, S, Iw>(
&mut self,
swapchains: Is,
wait_semaphores: Iw,
) -> Result<Option<Suboptimal>, PresentError>
where
W: 'a + Borrow<window::Swapchain>,
Is: IntoIterator<Item = (&'a W, SwapImageIndex)>,
S: 'a + Borrow<native::Semaphore>,
Iw: IntoIterator<Item = &'a S>,
{
let semaphores = wait_semaphores
.into_iter()
.map(|sem| sem.borrow().0)
.collect::<Vec<_>>();
let mut frames = Vec::new();
let mut vk_swapchains = Vec::new();
for (swapchain, index) in swapchains {
vk_swapchains.push(swapchain.borrow().raw);
frames.push(index);
}
let info = vk::PresentInfoKHR {
s_type: vk::StructureType::PRESENT_INFO_KHR,
p_next: ptr::null(),
wait_semaphore_count: semaphores.len() as _,
p_wait_semaphores: semaphores.as_ptr(),
swapchain_count: vk_swapchains.len() as _,
p_swapchains: vk_swapchains.as_ptr(),
p_image_indices: frames.as_ptr(),
p_results: ptr::null_mut(),
};
match self.swapchain_fn.queue_present_khr(*self.raw, &info) {
vk::Result::SUCCESS => Ok(None),
vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)),
vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::OutOfHostMemory))
}
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::OutOfDeviceMemory))
}
vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)),
vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate),
vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)),
_ => panic!("Failed to present frame"),
}
}
fn wait_idle(&self) -> Result<(), HostExecutionError> {
unsafe {
self.device
.0
.queue_wait_idle(*self.raw)
.map_err(From::from)
.map_err(From::<result::Error>::from) // HostExecutionError
}
}
}
#[derive(Debug)]
pub struct Device {
raw: Arc<RawDevice>,
}
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Backend {}
impl hal::Backend for Backend {
type PhysicalDevice = PhysicalDevice;
type Device = Device;
type Surface = window::Surface;
type Swapchain = window::Swapchain;
type QueueFamily = QueueFamily;
type CommandQueue = CommandQueue;
type CommandBuffer = command::CommandBuffer;
type Memory = native::Memory;
type CommandPool = pool::RawCommandPool;
type ShaderModule = native::ShaderModule;
type RenderPass = native::RenderPass;
type Framebuffer = native::Framebuffer;
type Buffer = native::Buffer;
type BufferView = native::BufferView;
type Image = native::Image;
type ImageView = native::ImageView;
type Sampler = native::Sampler;
type ComputePipeline = native::ComputePipeline;
type GraphicsPipeline = native::GraphicsPipeline;
type PipelineLayout = native::PipelineLayout;
type PipelineCache = native::PipelineCache;
type DescriptorSetLayout = native::DescriptorSetLayout;
type DescriptorPool = native::DescriptorPool;
type DescriptorSet = native::DescriptorSet;
type Fence = native::Fence;
type Semaphore = native::Semaphore;
type Event = native::Event;
type QueryPool = native::QueryPool;
}
Use all device features provided by vulkan
#![allow(non_snake_case)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate ash;
extern crate byteorder;
#[macro_use]
extern crate derivative;
extern crate gfx_hal as hal;
#[macro_use]
extern crate lazy_static;
#[cfg(feature = "use-rtld-next")]
extern crate shared_library;
extern crate smallvec;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "macos")]
#[macro_use]
extern crate objc;
#[cfg(windows)]
extern crate winapi;
#[cfg(feature = "winit")]
extern crate winit;
#[cfg(all(
feature = "x11",
unix,
not(target_os = "android"),
not(target_os = "macos")
))]
extern crate x11;
#[cfg(all(
feature = "xcb",
unix,
not(target_os = "android"),
not(target_os = "macos")
))]
extern crate xcb;
use ash::extensions::{self, ext::DebugUtils};
use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0};
use ash::vk;
#[cfg(not(feature = "use-rtld-next"))]
use ash::{Entry, LoadingError};
use crate::hal::adapter::DeviceType;
use crate::hal::device::{DeviceLost, OutOfMemory, SurfaceLost};
use crate::hal::error::{DeviceCreationError, HostExecutionError};
use crate::hal::pso::PipelineStage;
use crate::hal::{
format,
image,
memory,
queue,
window::{PresentError, Suboptimal},
};
use crate::hal::{Features, Limits, PatchSize, QueueType, SwapImageIndex};
use std::borrow::{Borrow, Cow};
use std::ffi::{CStr, CString};
use std::sync::Arc;
use std::{fmt, mem, ptr, slice};
#[cfg(feature = "use-rtld-next")]
use ash::{EntryCustom, LoadingError};
#[cfg(feature = "use-rtld-next")]
use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles};
mod command;
mod conv;
mod device;
mod info;
mod native;
mod pool;
mod result;
mod window;
// CStr's cannot be constant yet, until const fn lands we need to use a lazy_static
lazy_static! {
static ref LAYERS: Vec<&'static CStr> = if cfg!(all(target_os = "android", debug_assertions)) {
vec![
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_core_validation\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_object_tracker\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_parameter_validation\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_threading\0").unwrap(),
CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_unique_objects\0").unwrap(),
]
} else if cfg!(debug_assertions) {
vec![CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_standard_validation\0").unwrap()]
} else {
vec![]
};
static ref EXTENSIONS: Vec<&'static CStr> = vec![#[cfg(debug_assertions)] CStr::from_bytes_with_nul(b"VK_EXT_debug_utils\0").unwrap()];
static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()];
static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![
extensions::khr::Surface::name(),
// Platform-specific WSI extensions
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::XlibSurface::name(),
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::XcbSurface::name(),
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::WaylandSurface::name(),
#[cfg(target_os = "android")]
extensions::khr::AndroidSurface::name(),
#[cfg(target_os = "windows")]
extensions::khr::Win32Surface::name(),
#[cfg(target_os = "macos")]
extensions::mvk::MacOSSurface::name(),
];
}
#[cfg(not(feature = "use-rtld-next"))]
lazy_static! {
// Entry function pointers
pub static ref VK_ENTRY: Result<Entry, LoadingError> = Entry::new();
}
#[cfg(feature = "use-rtld-next")]
lazy_static! {
// Entry function pointers
pub static ref VK_ENTRY: Result<EntryCustom<V1_0, ()>, LoadingError>
= EntryCustom::new_custom(
|| Ok(()),
|_, name| unsafe {
DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy())
.unwrap_or(ptr::null_mut())
}
);
}
pub struct RawInstance(
pub ash::Instance,
Option<(DebugUtils, vk::DebugUtilsMessengerEXT)>,
);
impl Drop for RawInstance {
fn drop(&mut self) {
unsafe {
#[cfg(debug_assertions)]
{
if let Some((ref ext, callback)) = self.1 {
ext.destroy_debug_utils_messenger(callback, None);
}
}
self.0.destroy_instance(None);
}
}
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Instance {
#[derivative(Debug = "ignore")]
pub raw: Arc<RawInstance>,
/// Supported extensions of this instance.
pub extensions: Vec<&'static CStr>,
}
fn map_queue_type(flags: vk::QueueFlags) -> QueueType {
if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) {
// TRANSFER_BIT optional
QueueType::General
} else if flags.contains(vk::QueueFlags::GRAPHICS) {
// TRANSFER_BIT optional
QueueType::Graphics
} else if flags.contains(vk::QueueFlags::COMPUTE) {
// TRANSFER_BIT optional
QueueType::Compute
} else if flags.contains(vk::QueueFlags::TRANSFER) {
QueueType::Transfer
} else {
// TODO: present only queues?
unimplemented!()
}
}
unsafe fn display_debug_utils_label_ext(
label_structs: *mut vk::DebugUtilsLabelEXT,
count: usize,
) -> Option<String> {
if count == 0 {
return None;
}
Some(
slice::from_raw_parts::<vk::DebugUtilsLabelEXT>(label_structs, count)
.iter()
.flat_map(|dul_obj| {
dul_obj
.p_label_name
.as_ref()
.map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned())
})
.collect::<Vec<String>>()
.join(", "),
)
}
unsafe fn display_debug_utils_object_name_info_ext(
info_structs: *mut vk::DebugUtilsObjectNameInfoEXT,
count: usize,
) -> Option<String> {
if count == 0 {
return None;
}
//TODO: use color field of vk::DebugUtilsLabelsExt in a meaningful way?
Some(
slice::from_raw_parts::<vk::DebugUtilsObjectNameInfoEXT>(info_structs, count)
.iter()
.map(|obj_info| {
let object_name = obj_info
.p_object_name
.as_ref()
.map(|name| CStr::from_ptr(name).to_string_lossy().into_owned());
match object_name {
Some(name) => format!(
"(type: {:?}, hndl: {}, name: {})",
obj_info.object_type,
&obj_info.object_handle.to_string(),
name
),
None => format!(
"(type: {:?}, hndl: {})",
obj_info.object_type,
&obj_info.object_handle.to_string()
),
}
})
.collect::<Vec<String>>()
.join(", "),
)
}
unsafe extern "system" fn debug_utils_messenger_callback(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_user_data: *mut std::os::raw::c_void,
) -> vk::Bool32 {
let callback_data = *p_callback_data;
let message_severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error,
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn,
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info,
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace,
_ => log::Level::Warn,
};
let message_type = &format!("{:?}", message_type);
let message_id_number: i32 = callback_data.message_id_number as i32;
let message_id_name = if callback_data.p_message_id_name.is_null() {
Cow::from("")
} else {
CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy()
};
let message = if callback_data.p_message.is_null() {
Cow::from("")
} else {
CStr::from_ptr(callback_data.p_message).to_string_lossy()
};
let additional_info: [(&str, Option<String>); 3] = [
(
"queue info",
display_debug_utils_label_ext(
callback_data.p_queue_labels as *mut _,
callback_data.queue_label_count as usize,
),
),
(
"cmd buf info",
display_debug_utils_label_ext(
callback_data.p_cmd_buf_labels as *mut _,
callback_data.cmd_buf_label_count as usize,
),
),
(
"object info",
display_debug_utils_object_name_info_ext(
callback_data.p_objects as *mut _,
callback_data.object_count as usize,
),
),
];
log!(message_severity, "{}\n", {
let mut msg = format!(
"\n{} [{} ({})] : {}",
message_type,
message_id_name,
&message_id_number.to_string(),
message
);
for (info_label, info) in additional_info.into_iter() {
match info {
Some(data) => {
msg = format!("{}\n{}: {}", msg, info_label, data);
}
None => {}
}
}
msg
});
vk::FALSE
}
impl Instance {
pub fn create(name: &str, version: u32) -> Self {
// TODO: return errors instead of panic
let entry = VK_ENTRY
.as_ref()
.expect("Unable to load Vulkan entry points");
let app_name = CString::new(name).unwrap();
let app_info = vk::ApplicationInfo {
s_type: vk::StructureType::APPLICATION_INFO,
p_next: ptr::null(),
p_application_name: app_name.as_ptr(),
application_version: version,
p_engine_name: b"gfx-rs\0".as_ptr() as *const _,
engine_version: 1,
api_version: vk_make_version!(1, 0, 0),
};
let instance_extensions = entry
.enumerate_instance_extension_properties()
.expect("Unable to enumerate instance extensions");
let instance_layers = entry
.enumerate_instance_layer_properties()
.expect("Unable to enumerate instance layers");
// Check our xtensions against the available extensions
let extensions = SURFACE_EXTENSIONS
.iter()
.chain(EXTENSIONS.iter())
.filter_map(|&ext| {
instance_extensions
.iter()
.find(|inst_ext| unsafe {
CStr::from_ptr(inst_ext.extension_name.as_ptr()).to_bytes()
== ext.to_bytes()
})
.map(|_| ext)
.or_else(|| {
warn!("Unable to find extension: {}", ext.to_string_lossy());
None
})
})
.collect::<Vec<&CStr>>();
// Check requested layers against the available layers
let layers = LAYERS
.iter()
.filter_map(|&layer| {
instance_layers
.iter()
.find(|inst_layer| unsafe {
CStr::from_ptr(inst_layer.layer_name.as_ptr()).to_bytes()
== layer.to_bytes()
})
.map(|_| layer)
.or_else(|| {
warn!("Unable to find layer: {}", layer.to_string_lossy());
None
})
})
.collect::<Vec<&CStr>>();
let instance = {
let cstrings = layers
.iter()
.chain(extensions.iter())
.map(|&s| CString::from(s))
.collect::<Vec<_>>();
let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
let create_info = vk::InstanceCreateInfo {
s_type: vk::StructureType::INSTANCE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::InstanceCreateFlags::empty(),
p_application_info: &app_info,
enabled_layer_count: layers.len() as _,
pp_enabled_layer_names: str_pointers.as_ptr(),
enabled_extension_count: extensions.len() as _,
pp_enabled_extension_names: str_pointers[layers.len() ..].as_ptr(),
};
unsafe { entry.create_instance(&create_info, None) }
.expect("Unable to create Vulkan instance")
};
#[cfg(debug_assertions)]
let debug_messenger = {
// make sure VK_EXT_debug_utils is available
if instance_extensions.iter().any(|props| unsafe {
CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name()
}) {
let ext = DebugUtils::new(entry, &instance);
let info = vk::DebugUtilsMessengerCreateInfoEXT {
s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::all(),
message_type: vk::DebugUtilsMessageTypeFlagsEXT::all(),
pfn_user_callback: Some(debug_utils_messenger_callback),
p_user_data: ptr::null_mut(),
};
let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap();
Some((ext, handle))
} else {
None
}
};
#[cfg(not(debug_assertions))]
let debug_messenger = None;
Instance {
raw: Arc::new(RawInstance(instance, debug_messenger)),
extensions,
}
}
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<hal::Adapter<Backend>> {
let devices = match unsafe { self.raw.0.enumerate_physical_devices() } {
Ok(devices) => devices,
Err(err) => {
error!("Could not enumerate physical devices! {}", err);
vec![]
}
};
devices
.into_iter()
.map(|device| {
let properties = unsafe { self.raw.0.get_physical_device_properties(device) };
let info = hal::AdapterInfo {
name: unsafe {
CStr::from_ptr(properties.device_name.as_ptr())
.to_str()
.unwrap_or("Unknown")
.to_owned()
},
vendor: properties.vendor_id as usize,
device: properties.device_id as usize,
device_type: match properties.device_type {
ash::vk::PhysicalDeviceType::OTHER => DeviceType::Other,
ash::vk::PhysicalDeviceType::INTEGRATED_GPU => DeviceType::IntegratedGpu,
ash::vk::PhysicalDeviceType::DISCRETE_GPU => DeviceType::DiscreteGpu,
ash::vk::PhysicalDeviceType::VIRTUAL_GPU => DeviceType::VirtualGpu,
ash::vk::PhysicalDeviceType::CPU => DeviceType::Cpu,
_ => DeviceType::Other,
},
};
let physical_device = PhysicalDevice {
instance: self.raw.clone(),
handle: device,
properties,
};
let queue_families = unsafe {
self.raw
.0
.get_physical_device_queue_family_properties(device)
.into_iter()
.enumerate()
.map(|(i, properties)| QueueFamily {
properties,
device,
index: i as u32,
})
.collect()
};
hal::Adapter {
info,
physical_device,
queue_families,
}
})
.collect()
}
}
#[derive(Debug, Clone)]
pub struct QueueFamily {
properties: vk::QueueFamilyProperties,
device: vk::PhysicalDevice,
index: u32,
}
impl hal::queue::QueueFamily for QueueFamily {
fn queue_type(&self) -> QueueType {
map_queue_type(self.properties.queue_flags)
}
fn max_queues(&self) -> usize {
self.properties.queue_count as _
}
fn id(&self) -> queue::QueueFamilyId {
queue::QueueFamilyId(self.index as _)
}
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct PhysicalDevice {
#[derivative(Debug = "ignore")]
instance: Arc<RawInstance>,
handle: vk::PhysicalDevice,
properties: vk::PhysicalDeviceProperties,
}
impl hal::PhysicalDevice<Backend> for PhysicalDevice {
unsafe fn open(
&self,
families: &[(&QueueFamily, &[hal::QueuePriority])],
requested_features: Features,
) -> Result<hal::Gpu<Backend>, DeviceCreationError> {
let family_infos = families
.iter()
.map(|&(family, priorities)| vk::DeviceQueueCreateInfo {
s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceQueueCreateFlags::empty(),
queue_family_index: family.index,
queue_count: priorities.len() as _,
p_queue_priorities: priorities.as_ptr(),
})
.collect::<Vec<_>>();
if !self.features().contains(requested_features) {
return Err(DeviceCreationError::MissingFeature);
}
let enabled_features = conv::map_device_features(requested_features);
// Create device
let device_raw = {
let cstrings = DEVICE_EXTENSIONS
.iter()
.map(|&s| CString::from(s))
.collect::<Vec<_>>();
let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
let info = vk::DeviceCreateInfo {
s_type: vk::StructureType::DEVICE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceCreateFlags::empty(),
queue_create_info_count: family_infos.len() as u32,
p_queue_create_infos: family_infos.as_ptr(),
enabled_layer_count: 0,
pp_enabled_layer_names: ptr::null(),
enabled_extension_count: str_pointers.len() as u32,
pp_enabled_extension_names: str_pointers.as_ptr(),
p_enabled_features: &enabled_features,
};
self.instance
.0
.create_device(self.handle, &info, None)
.map_err(Into::<result::Error>::into)
.map_err(Into::<DeviceCreationError>::into)?
};
let swapchain_fn = vk::KhrSwapchainFn::load(|name| {
mem::transmute(
self.instance
.0
.get_device_proc_addr(device_raw.handle(), name.as_ptr()),
)
});
let device = Device {
raw: Arc::new(RawDevice(device_raw, requested_features)),
};
let device_arc = device.raw.clone();
let queues = families
.into_iter()
.map(|&(family, ref priorities)| {
let family_index = family.index;
let mut family_raw = hal::backend::RawQueueGroup::new(family.clone());
for id in 0 .. priorities.len() {
let queue_raw = device_arc.0.get_device_queue(family_index, id as _);
family_raw.add_queue(CommandQueue {
raw: Arc::new(queue_raw),
device: device_arc.clone(),
swapchain_fn: swapchain_fn.clone(),
});
}
family_raw
})
.collect();
Ok(hal::Gpu {
device,
queues: queue::Queues::new(queues),
})
}
fn format_properties(&self, format: Option<format::Format>) -> format::Properties {
let properties = unsafe {
self.instance.0.get_physical_device_format_properties(
self.handle,
format.map_or(vk::Format::UNDEFINED, conv::map_format),
)
};
format::Properties {
linear_tiling: conv::map_image_features(properties.linear_tiling_features),
optimal_tiling: conv::map_image_features(properties.optimal_tiling_features),
buffer_features: conv::map_buffer_features(properties.buffer_features),
}
}
fn image_format_properties(
&self,
format: format::Format,
dimensions: u8,
tiling: image::Tiling,
usage: image::Usage,
view_caps: image::ViewCapabilities,
) -> Option<image::FormatProperties> {
let format_properties = unsafe {
self.instance.0.get_physical_device_image_format_properties(
self.handle,
conv::map_format(format),
match dimensions {
1 => vk::ImageType::TYPE_1D,
2 => vk::ImageType::TYPE_2D,
3 => vk::ImageType::TYPE_3D,
_ => panic!("Unexpected image dimensionality: {}", dimensions),
},
conv::map_tiling(tiling),
conv::map_image_usage(usage),
conv::map_view_capabilities(view_caps),
)
};
match format_properties {
Ok(props) => Some(image::FormatProperties {
max_extent: image::Extent {
width: props.max_extent.width,
height: props.max_extent.height,
depth: props.max_extent.depth,
},
max_levels: props.max_mip_levels as _,
max_layers: props.max_array_layers as _,
sample_count_mask: props.sample_counts.as_raw() as _,
max_resource_size: props.max_resource_size as _,
}),
Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None,
Err(other) => {
error!("Unexpected error in `image_format_properties`: {:?}", other);
None
}
}
}
fn memory_properties(&self) -> hal::MemoryProperties {
let mem_properties = unsafe {
self.instance
.0
.get_physical_device_memory_properties(self.handle)
};
let memory_heaps = mem_properties.memory_heaps
[.. mem_properties.memory_heap_count as usize]
.iter()
.map(|mem| mem.size)
.collect();
let memory_types = mem_properties.memory_types
[.. mem_properties.memory_type_count as usize]
.iter()
.map(|mem| {
use crate::memory::Properties;
let mut type_flags = Properties::empty();
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::DEVICE_LOCAL)
{
type_flags |= Properties::DEVICE_LOCAL;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
type_flags |= Properties::CPU_VISIBLE;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_COHERENT)
{
type_flags |= Properties::COHERENT;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_CACHED)
{
type_flags |= Properties::CPU_CACHED;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::LAZILY_ALLOCATED)
{
type_flags |= Properties::LAZILY_ALLOCATED;
}
hal::MemoryType {
properties: type_flags,
heap_index: mem.heap_index as usize,
}
})
.collect();
hal::MemoryProperties {
memory_heaps,
memory_types,
}
}
fn features(&self) -> Features {
// see https://github.com/gfx-rs/gfx/issues/1930
let is_windows_intel_dual_src_bug = cfg!(windows)
&& self.properties.vendor_id == info::intel::VENDOR
&& (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK
== info::intel::DEVICE_KABY_LAKE_MASK
|| self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK
== info::intel::DEVICE_SKY_LAKE_MASK);
let features = unsafe { self.instance.0.get_physical_device_features(self.handle) };
let mut bits = Features::TRIANGLE_FAN
| Features::SEPARATE_STENCIL_REF_VALUES
| Features::SAMPLER_MIP_LOD_BIAS;
if features.robust_buffer_access != 0 {
bits |= Features::ROBUST_BUFFER_ACCESS;
}
if features.full_draw_index_uint32 != 0 {
bits |= Features::FULL_DRAW_INDEX_U32;
}
if features.image_cube_array != 0 {
bits |= Features::IMAGE_CUBE_ARRAY;
}
if features.independent_blend != 0 {
bits |= Features::INDEPENDENT_BLENDING;
}
if features.geometry_shader != 0 {
bits |= Features::GEOMETRY_SHADER;
}
if features.tessellation_shader != 0 {
bits |= Features::TESSELLATION_SHADER;
}
if features.sample_rate_shading != 0 {
bits |= Features::SAMPLE_RATE_SHADING;
}
if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug {
bits |= Features::DUAL_SRC_BLENDING;
}
if features.logic_op != 0 {
bits |= Features::LOGIC_OP;
}
if features.multi_draw_indirect != 0 {
bits |= Features::MULTI_DRAW_INDIRECT;
}
if features.draw_indirect_first_instance != 0 {
bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE;
}
if features.depth_clamp != 0 {
bits |= Features::DEPTH_CLAMP;
}
if features.depth_bias_clamp != 0 {
bits |= Features::DEPTH_BIAS_CLAMP;
}
if features.fill_mode_non_solid != 0 {
bits |= Features::NON_FILL_POLYGON_MODE;
}
if features.depth_bounds != 0 {
bits |= Features::DEPTH_BOUNDS;
}
if features.wide_lines != 0 {
bits |= Features::LINE_WIDTH;
}
if features.large_points != 0 {
bits |= Features::POINT_SIZE;
}
if features.alpha_to_one != 0 {
bits |= Features::ALPHA_TO_ONE;
}
if features.multi_viewport != 0 {
bits |= Features::MULTI_VIEWPORTS;
}
if features.sampler_anisotropy != 0 {
bits |= Features::SAMPLER_ANISOTROPY;
}
if features.texture_compression_etc2 != 0 {
bits |= Features::FORMAT_ETC2;
}
if features.texture_compression_astc_ldr != 0 {
bits |= Features::FORMAT_ASTC_LDR;
}
if features.texture_compression_bc != 0 {
bits |= Features::FORMAT_BC;
}
if features.occlusion_query_precise != 0 {
bits |= Features::PRECISE_OCCLUSION_QUERY;
}
if features.pipeline_statistics_query != 0 {
bits |= Features::PIPELINE_STATISTICS_QUERY;
}
if features.vertex_pipeline_stores_and_atomics != 0 {
bits |= Features::VERTEX_STORES_AND_ATOMICS;
}
if features.fragment_stores_and_atomics != 0 {
bits |= Features::FRAGMENT_STORES_AND_ATOMICS;
}
if features.shader_tessellation_and_geometry_point_size != 0 {
bits |= Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE;
}
if features.shader_image_gather_extended != 0 {
bits |= Features::SHADER_IMAGE_GATHER_EXTENDED;
}
if features.shader_storage_image_extended_formats != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS;
}
if features.shader_storage_image_multisample != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_MULTISAMPLE;
}
if features.shader_storage_image_read_without_format != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT;
}
if features.shader_storage_image_write_without_format != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT;
}
if features.shader_uniform_buffer_array_dynamic_indexing != 0 {
bits |= Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_sampled_image_array_dynamic_indexing != 0 {
bits |= Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_storage_buffer_array_dynamic_indexing != 0 {
bits |= Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_storage_image_array_dynamic_indexing != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_clip_distance != 0 {
bits |= Features::SHADER_CLIP_DISTANCE;
}
if features.shader_cull_distance != 0 {
bits |= Features::SHADER_CULL_DISTANCE;
}
if features.shader_float64 != 0 {
bits |= Features::SHADER_FLOAT64;
}
if features.shader_int64 != 0 {
bits |= Features::SHADER_INT64;
}
if features.shader_int16 != 0 {
bits |= Features::SHADER_INT16;
}
if features.shader_resource_residency != 0 {
bits |= Features::SHADER_RESOURCE_RESIDENCY;
}
if features.shader_resource_min_lod != 0 {
bits |= Features::SHADER_RESOURCE_MIN_LOD;
}
if features.sparse_binding != 0 {
bits |= Features::SPARSE_BINDING;
}
if features.sparse_residency_buffer != 0 {
bits |= Features::SPARSE_RESIDENCY_BUFFER;
}
if features.sparse_residency_image2_d != 0 {
bits |= Features::SPARSE_RESIDENCY_IMAGE_2D;
}
if features.sparse_residency_image3_d != 0 {
bits |= Features::SPARSE_RESIDENCY_IMAGE_3D;
}
if features.sparse_residency2_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_2_SAMPLES;
}
if features.sparse_residency4_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_4_SAMPLES;
}
if features.sparse_residency8_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_8_SAMPLES;
}
if features.sparse_residency16_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_16_SAMPLES;
}
if features.sparse_residency_aliased != 0 {
bits |= Features::SPARSE_RESIDENCY_ALIASED;
}
if features.variable_multisample_rate != 0 {
bits |= Features::VARIABLE_MULTISAMPLE_RATE;
}
if features.inherited_queries != 0 {
bits |= Features::INHERITED_QUERIES;
}
bits
}
fn limits(&self) -> Limits {
let limits = &self.properties.limits;
let max_group_count = limits.max_compute_work_group_count;
let max_group_size = limits.max_compute_work_group_size;
Limits {
max_image_1d_size: limits.max_image_dimension1_d,
max_image_2d_size: limits.max_image_dimension2_d,
max_image_3d_size: limits.max_image_dimension3_d,
max_image_cube_size: limits.max_image_dimension_cube,
max_image_array_layers: limits.max_image_array_layers as _,
max_texel_elements: limits.max_texel_buffer_elements as _,
max_patch_size: limits.max_tessellation_patch_size as PatchSize,
max_viewports: limits.max_viewports as _,
max_viewport_dimensions: limits.max_viewport_dimensions,
max_framebuffer_extent: image::Extent {
width: limits.max_framebuffer_width,
height: limits.max_framebuffer_height,
depth: limits.max_framebuffer_layers,
},
max_compute_work_group_count: [
max_group_count[0] as _,
max_group_count[1] as _,
max_group_count[2] as _,
],
max_compute_work_group_size: [
max_group_size[0] as _,
max_group_size[1] as _,
max_group_size[2] as _,
],
max_vertex_input_attributes: limits.max_vertex_input_attributes as _,
max_vertex_input_bindings: limits.max_vertex_input_bindings as _,
max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _,
max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _,
max_vertex_output_components: limits.max_vertex_output_components as _,
optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _,
optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment
as _,
min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _,
min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _,
min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _,
framebuffer_color_samples_count: limits.framebuffer_color_sample_counts.as_raw() as _,
framebuffer_depth_samples_count: limits.framebuffer_depth_sample_counts.as_raw() as _,
framebuffer_stencil_samples_count: limits.framebuffer_stencil_sample_counts.as_raw()
as _,
max_color_attachments: limits.max_color_attachments as _,
buffer_image_granularity: limits.buffer_image_granularity,
non_coherent_atom_size: limits.non_coherent_atom_size as _,
max_sampler_anisotropy: limits.max_sampler_anisotropy,
min_vertex_input_binding_stride_alignment: 1,
max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _,
max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _,
max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _,
max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _,
max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _,
max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _,
max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _,
max_descriptor_set_storage_buffers_dynamic: limits
.max_descriptor_set_storage_buffers_dynamic
as _,
max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _,
max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _,
max_descriptor_set_uniform_buffers_dynamic: limits
.max_descriptor_set_uniform_buffers_dynamic
as _,
max_draw_indexed_index_value: limits.max_draw_indexed_index_value,
max_draw_indirect_count: limits.max_draw_indirect_count,
max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources
as _,
max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _,
max_fragment_input_components: limits.max_fragment_input_components as _,
max_fragment_output_attachments: limits.max_fragment_output_attachments as _,
max_framebuffer_layers: limits.max_framebuffer_layers as _,
max_geometry_input_components: limits.max_geometry_input_components as _,
max_geometry_output_components: limits.max_geometry_output_components as _,
max_geometry_output_vertices: limits.max_geometry_output_vertices as _,
max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _,
max_geometry_total_output_components: limits.max_geometry_total_output_components as _,
max_memory_allocation_count: limits.max_memory_allocation_count as _,
max_per_stage_descriptor_input_attachments: limits
.max_per_stage_descriptor_input_attachments
as _,
max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images
as _,
max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _,
max_per_stage_descriptor_storage_buffers: limits
.max_per_stage_descriptor_storage_buffers
as _,
max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images
as _,
max_per_stage_descriptor_uniform_buffers: limits
.max_per_stage_descriptor_uniform_buffers
as _,
max_per_stage_resources: limits.max_per_stage_resources as _,
max_push_constants_size: limits.max_push_constants_size as _,
max_sampler_allocation_count: limits.max_sampler_allocation_count as _,
max_sampler_lod_bias: limits.max_sampler_lod_bias as _,
max_storage_buffer_range: limits.max_storage_buffer_range as _,
max_uniform_buffer_range: limits.max_uniform_buffer_range as _,
min_memory_map_alignment: limits.min_memory_map_alignment,
standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE,
}
}
fn is_valid_cache(&self, cache: &[u8]) -> bool {
const HEADER_SIZE: usize = 16 + vk::UUID_SIZE;
if cache.len() < HEADER_SIZE {
warn!("Bad cache data length {:?}", cache.len());
return false;
}
let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]);
let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]);
let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]);
let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]);
// header length
if (header_len as usize) < HEADER_SIZE {
warn!("Bad header length {:?}", header_len);
return false;
}
// cache header version
if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 {
warn!("Unsupported cache header version: {:?}", header_version);
return false;
}
// vendor id
if vendor_id != self.properties.vendor_id {
warn!(
"Vendor ID mismatch. Device: {:?}, cache: {:?}.",
self.properties.vendor_id, vendor_id,
);
return false;
}
// device id
if device_id != self.properties.device_id {
warn!(
"Device ID mismatch. Device: {:?}, cache: {:?}.",
self.properties.device_id, device_id,
);
return false;
}
if self.properties.pipeline_cache_uuid != cache[16 .. 16 + vk::UUID_SIZE] {
warn!(
"Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.",
self.properties.pipeline_cache_uuid,
&cache[16 .. 16 + vk::UUID_SIZE],
);
return false;
}
true
}
}
#[doc(hidden)]
pub struct RawDevice(pub ash::Device, Features);
impl fmt::Debug for RawDevice {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RawDevice") // TODO: Real Debug impl
}
}
impl Drop for RawDevice {
fn drop(&mut self) {
unsafe {
self.0.destroy_device(None);
}
}
}
// Need to explicitly synchronize on submission and present.
pub type RawCommandQueue = Arc<vk::Queue>;
#[derive(Derivative)]
#[derivative(Debug)]
pub struct CommandQueue {
raw: RawCommandQueue,
device: Arc<RawDevice>,
#[derivative(Debug = "ignore")]
swapchain_fn: vk::KhrSwapchainFn,
}
impl hal::queue::RawCommandQueue<Backend> for CommandQueue {
unsafe fn submit<'a, T, Ic, S, Iw, Is>(
&mut self,
submission: hal::queue::Submission<Ic, Iw, Is>,
fence: Option<&native::Fence>,
) where
T: 'a + Borrow<command::CommandBuffer>,
Ic: IntoIterator<Item = &'a T>,
S: 'a + Borrow<native::Semaphore>,
Iw: IntoIterator<Item = (&'a S, PipelineStage)>,
Is: IntoIterator<Item = &'a S>,
{
//TODO: avoid heap allocations
let mut waits = Vec::new();
let mut stages = Vec::new();
let buffers = submission
.command_buffers
.into_iter()
.map(|cmd| cmd.borrow().raw)
.collect::<Vec<_>>();
for (semaphore, stage) in submission.wait_semaphores {
waits.push(semaphore.borrow().0);
stages.push(conv::map_pipeline_stage(stage));
}
let signals = submission
.signal_semaphores
.into_iter()
.map(|semaphore| semaphore.borrow().0)
.collect::<Vec<_>>();
let info = vk::SubmitInfo {
s_type: vk::StructureType::SUBMIT_INFO,
p_next: ptr::null(),
wait_semaphore_count: waits.len() as u32,
p_wait_semaphores: waits.as_ptr(),
// If count is zero, AMD driver crashes if nullptr is not set for stage masks
p_wait_dst_stage_mask: if stages.is_empty() {
ptr::null()
} else {
stages.as_ptr()
},
command_buffer_count: buffers.len() as u32,
p_command_buffers: buffers.as_ptr(),
signal_semaphore_count: signals.len() as u32,
p_signal_semaphores: signals.as_ptr(),
};
let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null());
let result = self.device.0.queue_submit(*self.raw, &[info], fence_raw);
assert_eq!(Ok(()), result);
}
unsafe fn present<'a, W, Is, S, Iw>(
&mut self,
swapchains: Is,
wait_semaphores: Iw,
) -> Result<Option<Suboptimal>, PresentError>
where
W: 'a + Borrow<window::Swapchain>,
Is: IntoIterator<Item = (&'a W, SwapImageIndex)>,
S: 'a + Borrow<native::Semaphore>,
Iw: IntoIterator<Item = &'a S>,
{
let semaphores = wait_semaphores
.into_iter()
.map(|sem| sem.borrow().0)
.collect::<Vec<_>>();
let mut frames = Vec::new();
let mut vk_swapchains = Vec::new();
for (swapchain, index) in swapchains {
vk_swapchains.push(swapchain.borrow().raw);
frames.push(index);
}
let info = vk::PresentInfoKHR {
s_type: vk::StructureType::PRESENT_INFO_KHR,
p_next: ptr::null(),
wait_semaphore_count: semaphores.len() as _,
p_wait_semaphores: semaphores.as_ptr(),
swapchain_count: vk_swapchains.len() as _,
p_swapchains: vk_swapchains.as_ptr(),
p_image_indices: frames.as_ptr(),
p_results: ptr::null_mut(),
};
match self.swapchain_fn.queue_present_khr(*self.raw, &info) {
vk::Result::SUCCESS => Ok(None),
vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)),
vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::OutOfHostMemory))
}
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::OutOfDeviceMemory))
}
vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)),
vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate),
vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)),
_ => panic!("Failed to present frame"),
}
}
fn wait_idle(&self) -> Result<(), HostExecutionError> {
unsafe {
self.device
.0
.queue_wait_idle(*self.raw)
.map_err(From::from)
.map_err(From::<result::Error>::from) // HostExecutionError
}
}
}
#[derive(Debug)]
pub struct Device {
raw: Arc<RawDevice>,
}
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Backend {}
impl hal::Backend for Backend {
type PhysicalDevice = PhysicalDevice;
type Device = Device;
type Surface = window::Surface;
type Swapchain = window::Swapchain;
type QueueFamily = QueueFamily;
type CommandQueue = CommandQueue;
type CommandBuffer = command::CommandBuffer;
type Memory = native::Memory;
type CommandPool = pool::RawCommandPool;
type ShaderModule = native::ShaderModule;
type RenderPass = native::RenderPass;
type Framebuffer = native::Framebuffer;
type Buffer = native::Buffer;
type BufferView = native::BufferView;
type Image = native::Image;
type ImageView = native::ImageView;
type Sampler = native::Sampler;
type ComputePipeline = native::ComputePipeline;
type GraphicsPipeline = native::GraphicsPipeline;
type PipelineLayout = native::PipelineLayout;
type PipelineCache = native::PipelineCache;
type DescriptorSetLayout = native::DescriptorSetLayout;
type DescriptorPool = native::DescriptorPool;
type DescriptorSet = native::DescriptorSet;
type Fence = native::Fence;
type Semaphore = native::Semaphore;
type Event = native::Event;
type QueryPool = native::QueryPool;
}
|
// Copyright (c) 2019, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
use crate::api::ContextInner;
use crate::encoder::TEMPORAL_DELIMITER;
use crate::quantize::ac_q;
use crate::quantize::dc_q;
use crate::quantize::select_ac_qi;
use crate::quantize::select_dc_qi;
use crate::util::{clamp, ILog, Pixel};
// The number of frame sub-types for which we track distinct parameters.
// This does not include FRAME_SUBTYPE_SEF, because we don't need to do any
// parameter tracking for Show Existing Frame frames.
pub const FRAME_NSUBTYPES: usize = 4;
pub const FRAME_SUBTYPE_I: usize = 0;
pub const FRAME_SUBTYPE_P: usize = 1;
#[allow(unused)]
pub const FRAME_SUBTYPE_B0: usize = 2;
#[allow(unused)]
pub const FRAME_SUBTYPE_B1: usize = 3;
pub const FRAME_SUBTYPE_SEF: usize = 4;
const PASS_SINGLE: i32 = 0;
const PASS_1: i32 = 1;
const PASS_2: i32 = 2;
const PASS_2_PLUS_1: i32 = 3;
// Magic value at the start of the 2-pass stats file
const TWOPASS_MAGIC: i32 = 0x50324156;
// Version number for the 2-pass stats file
const TWOPASS_VERSION: i32 = 1;
// 4 byte magic + 4 byte version + 4 byte TU count + 4 byte SEF frame count
// + FRAME_NSUBTYPES*(4 byte frame count + 1 byte exp + 8 byte scale_sum)
const TWOPASS_HEADER_SZ: usize = 16 + FRAME_NSUBTYPES * (4 + 1 + 8);
// 4 byte frame type (show_frame and fti jointly coded) + 4 byte log_scale_q24
const TWOPASS_PACKET_SZ: usize = 8;
const SEF_BITS: i64 = 24;
// The scale of AV1 quantizer tables (relative to the pixel domain), i.e., Q3.
pub(crate) const QSCALE: i32 = 3;
// We clamp the actual I and B frame delays to a minimum of 10 to work
// within the range of values where later incrementing the delay works as
// designed.
// 10 is not an exact choice, but rather a good working trade-off.
const INTER_DELAY_TARGET_MIN: i32 = 10;
// The base quantizer for a frame is adjusted based on the frame type using the
// formula (log_qp*mqp + dqp), where log_qp is the base-2 logarithm of the
// "linear" quantizer (the actual factor by which coefficients are divided).
// Because log_qp has an implicit offset built in based on the scale of the
// coefficients (which depends on the pixel bit depth and the transform
// scale), we normalize the quantizer to the equivalent for 8-bit pixels with
// orthonormal transforms for the purposes of rate modeling.
const MQP_Q12: &[i32; FRAME_NSUBTYPES] = &[
// TODO: Use a const function once f64 operations in const functions are
// stable.
(1.0 * (1 << 12) as f64) as i32,
(1.0 * (1 << 12) as f64) as i32,
(1.0 * (1 << 12) as f64) as i32,
(1.0 * (1 << 12) as f64) as i32,
];
// The ratio 33_810_170.0 / 86_043_287.0 was derived by approximating the median
// of a change of 15 quantizer steps in the quantizer tables.
const DQP_Q57: &[i64; FRAME_NSUBTYPES] = &[
(-(33_810_170.0 / 86_043_287.0) * (1i64 << 57) as f64) as i64,
(0.0 * (1i64 << 57) as f64) as i64,
((33_810_170.0 / 86_043_287.0) * (1i64 << 57) as f64) as i64,
(2.0 * (33_810_170.0 / 86_043_287.0) * (1i64 << 57) as f64) as i64,
];
// Convert an integer into a Q57 fixed-point fraction.
// The integer must be in the range -64 to 63, inclusive.
pub(crate) const fn q57(v: i32) -> i64 {
// TODO: Add assert if it ever becomes possible to do in a const function.
(v as i64) << 57
}
#[rustfmt::skip]
const ATANH_LOG2: &[i64; 32] = &[
0x32B8_0347_3F7A_D0F4, 0x2F2A_71BD_4E25_E916, 0x2E68_B244_BB93_BA06,
0x2E39_FB91_98CE_62E4, 0x2E2E_683F_6856_5C8F, 0x2E2B_850B_E207_7FC1,
0x2E2A_CC58_FE7B_78DB, 0x2E2A_9E2D_E52F_D5F2, 0x2E2A_92A3_38D5_3EEC,
0x2E2A_8FC0_8F5E_19B6, 0x2E2A_8F07_E51A_485E, 0x2E2A_8ED9_BA8A_F388,
0x2E2A_8ECE_2FE7_384A, 0x2E2A_8ECB_4D3E_4B1A, 0x2E2A_8ECA_9494_0FE8,
0x2E2A_8ECA_6669_811D, 0x2E2A_8ECA_5ADE_DD6A, 0x2E2A_8ECA_57FC_347E,
0x2E2A_8ECA_5743_8A43, 0x2E2A_8ECA_5715_5FB4, 0x2E2A_8ECA_5709_D510,
0x2E2A_8ECA_5706_F267, 0x2E2A_8ECA_5706_39BD, 0x2E2A_8ECA_5706_0B92,
0x2E2A_8ECA_5706_0008, 0x2E2A_8ECA_5705_FD25, 0x2E2A_8ECA_5705_FC6C,
0x2E2A_8ECA_5705_FC3E, 0x2E2A_8ECA_5705_FC33, 0x2E2A_8ECA_5705_FC30,
0x2E2A_8ECA_5705_FC2F, 0x2E2A_8ECA_5705_FC2F
];
// Computes the binary exponential of logq57.
// input: a log base 2 in Q57 format.
// output: a 64 bit integer in Q0 (no fraction).
// TODO: Mark const once we can use local variables in a const function.
pub(crate) fn bexp64(logq57: i64) -> i64 {
let ipart = (logq57 >> 57) as i32;
if ipart < 0 {
return 0;
}
if ipart >= 63 {
return 0x7FFF_FFFF_FFFF_FFFF;
}
// z is the fractional part of the log in Q62 format.
// We need 1 bit of headroom since the magnitude can get larger than 1
// during the iteration, and a sign bit.
let mut z = logq57 - q57(ipart);
let mut w: i64;
if z != 0 {
// Rust has 128 bit multiplies, so it should be possible to do this
// faster without losing accuracy.
z <<= 5;
// w is the exponential in Q61 format (since it also needs headroom and can
// get as large as 2.0); we could get another bit if we dropped the sign,
// but we'll recover that bit later anyway.
// Ideally this should start out as
// \lim_{n->\infty} 2^{61}/\product_{i=1}^n \sqrt{1-2^{-2i}}
// but in order to guarantee convergence we have to repeat iterations 4,
// 13 (=3*4+1), and 40 (=3*13+1, etc.), so it winds up somewhat larger.
w = 0x26A3_D0E4_01DD_846D;
let mut i: i64 = 0;
loop {
let mask = -((z < 0) as i64);
w += ((w >> (i + 1)) + mask) ^ mask;
z -= (ATANH_LOG2[i as usize] + mask) ^ mask;
// Repeat iteration 4.
if i >= 3 {
break;
}
z *= 2;
i += 1;
}
loop {
let mask = -((z < 0) as i64);
w += ((w >> (i + 1)) + mask) ^ mask;
z -= (ATANH_LOG2[i as usize] + mask) ^ mask;
// Repeat iteration 13.
if i >= 12 {
break;
}
z *= 2;
i += 1;
}
while i < 32 {
let mask = -((z < 0) as i64);
w += ((w >> (i + 1)) + mask) ^ mask;
z = (z - ((ATANH_LOG2[i as usize] + mask) ^ mask)) * 2;
i += 1;
}
// Skip the remaining iterations unless we really require that much
// precision.
// We could have bailed out earlier for smaller iparts, but that would
// require initializing w from a table, as the limit doesn't converge to
// 61-bit precision until n=30.
let mut wlo: i32 = 0;
if ipart > 30 {
// For these iterations, we just update the low bits, as the high bits
// can't possibly be affected.
// OD_ATANH_LOG2 has also converged (it actually did so one iteration
// earlier, but that's no reason for an extra special case).
loop {
let mask = -((z < 0) as i64);
wlo += (((w >> i) + mask) ^ mask) as i32;
z -= (ATANH_LOG2[31] + mask) ^ mask;
// Repeat iteration 40.
if i >= 39 {
break;
}
z *= 2;
i += 1;
}
while i < 61 {
let mask = -((z < 0) as i64);
wlo += (((w >> i) + mask) ^ mask) as i32;
z = (z - ((ATANH_LOG2[31] + mask) ^ mask)) * 2;
i += 1;
}
}
w = (w << 1) + (wlo as i64);
} else {
w = 1i64 << 62;
}
if ipart < 62 {
w = ((w >> (61 - ipart)) + 1) >> 1;
}
w
}
// Computes the binary log of w.
// input: a 64-bit integer in Q0 (no fraction).
// output: a 64-bit log in Q57.
// TODO: Mark const once we can use local variables in a const function.
fn blog64(w: i64) -> i64 {
let mut w = w;
if w <= 0 {
return -1;
}
let ipart = w.ilog() as i32 - 1;
if ipart > 61 {
w >>= ipart - 61;
} else {
w <<= 61 - ipart;
}
// z is the fractional part of the log in Q61 format.
let mut z: i64 = 0;
if (w & (w - 1)) != 0 {
// Rust has 128 bit multiplies, so it should be possible to do this
// faster without losing accuracy.
// x and y are the cosh() and sinh(), respectively, in Q61 format.
// We are computing z = 2*atanh(y/x) = 2*atanh((w - 1)/(w + 1)).
let mut x = w + (1i64 << 61);
let mut y = w - (1i64 << 61);
for i in 0..4 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[i as usize] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// Repeat iteration 4.
for i in 3..13 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[i as usize] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// Repeat iteration 13.
for i in 12..32 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[i as usize] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// OD_ATANH_LOG2 has converged.
for i in 32..40 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[31] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// Repeat iteration 40.
for i in 39..62 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[31] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
z = (z + 8) >> 4;
}
q57(ipart) + z
}
// Converts a Q57 fixed-point fraction to Q24 by rounding.
const fn q57_to_q24(v: i64) -> i32 {
(((v >> 32) + 1) >> 1) as i32
}
// Converts a Q24 fixed-point fraction to Q57.
const fn q24_to_q57(v: i32) -> i64 {
(v as i64) << 33
}
// Binary exponentiation of a log_scale with 24-bit fractional precision and
// saturation.
// log_scale: A binary logarithm in Q24 format.
// Return: The binary exponential in Q24 format, saturated to 2**47 - 1 if
// log_scale was too large.
fn bexp_q24(log_scale: i32) -> i64 {
if log_scale < 23 << 24 {
let ret = bexp64(((log_scale as i64) << 33) + q57(24));
if ret < (1i64 << 47) - 1 {
return ret;
}
}
(1i64 << 47) - 1
}
#[rustfmt::skip]
const ROUGH_TAN_LOOKUP: &[u16; 18] = &[
0, 358, 722, 1098, 1491, 1910,
2365, 2868, 3437, 4096, 4881, 5850,
7094, 8784, 11254, 15286, 23230, 46817
];
// A digital approximation of a 2nd-order low-pass Bessel follower.
// We use this for rate control because it has fast reaction time, but is
// critically damped.
pub struct IIRBessel2 {
c: [i32; 2],
g: i32,
x: [i32; 2],
y: [i32; 2],
}
// alpha is Q24 in the range [0,0.5).
// The return value is 5.12.
// TODO: Mark const once we can use local variables in a const function.
fn warp_alpha(alpha: i32) -> i32 {
let i = ((alpha * 36) >> 24).min(16);
let t0 = ROUGH_TAN_LOOKUP[i as usize];
let t1 = ROUGH_TAN_LOOKUP[i as usize + 1];
let d = alpha * 36 - (i << 24);
((((t0 as i64) << 32) + (((t1 - t0) << 8) as i64) * (d as i64)) >> 32) as i32
}
// Compute Bessel filter coefficients with the specified delay.
// Return: Filter parameters (c[0], c[1], g).
fn iir_bessel2_get_parameters(delay: i32) -> (i32, i32, i32) {
// This borrows some code from an unreleased version of Postfish.
// See the recipe at http://unicorn.us.com/alex/2polefilters.html for details
// on deriving the filter coefficients.
// alpha is Q24
let alpha = (1 << 24) / delay;
// warp is 7.12 (5.12? the max value is 70386 in Q12).
let warp = warp_alpha(alpha).max(1) as i64;
// k1 is 9.12 (6.12?)
let k1 = 3 * warp;
// k2 is 16.24 (11.24?)
let k2 = k1 * warp;
// d is 16.15 (10.15?)
let d = ((((1 << 12) + k1) << 12) + k2 + 256) >> 9;
// a is 0.32, since d is larger than both 1.0 and k2
let a = (k2 << 23) / d;
// ik2 is 25.24
let ik2 = (1i64 << 48) / k2;
// b1 is Q56; in practice, the integer ranges between -2 and 2.
let b1 = 2 * a * (ik2 - (1i64 << 24));
// b2 is Q56; in practice, the integer ranges between -2 and 2.
let b2 = (1i64 << 56) - ((4 * a) << 24) - b1;
// All of the filter parameters are Q24.
(
((b1 + (1i64 << 31)) >> 32) as i32,
((b2 + (1i64 << 31)) >> 32) as i32,
((a + 128) >> 8) as i32,
)
}
impl IIRBessel2 {
pub fn new(delay: i32, value: i32) -> IIRBessel2 {
let (c0, c1, g) = iir_bessel2_get_parameters(delay);
IIRBessel2 { c: [c0, c1], g, x: [value, value], y: [value, value] }
}
// Re-initialize Bessel filter coefficients with the specified delay.
// This does not alter the x/y state, but changes the reaction time of the
// filter.
// Altering the time constant of a reactive filter without altering internal
// state is something that has to be done carefuly, but our design operates
// at high enough delays and with small enough time constant changes to make
// it safe.
pub fn reinit(&mut self, delay: i32) {
let (c0, c1, g) = iir_bessel2_get_parameters(delay);
self.c[0] = c0;
self.c[1] = c1;
self.g = g;
}
pub fn update(&mut self, x: i32) -> i32 {
let c0 = self.c[0] as i64;
let c1 = self.c[1] as i64;
let g = self.g as i64;
let x0 = self.x[0] as i64;
let x1 = self.x[1] as i64;
let y0 = self.y[0] as i64;
let y1 = self.y[1] as i64;
let ya =
((((x as i64) + x0 * 2 + x1) * g + y0 * c0 + y1 * c1 + (1i64 << 23))
>> 24) as i32;
self.x[1] = self.x[0];
self.x[0] = x;
self.y[1] = self.y[0];
self.y[0] = ya;
ya
}
}
#[derive(Copy, Clone)]
struct RCFrameMetrics {
// The log base 2 of the scale factor for this frame in Q24 format.
log_scale_q24: i32,
// The frame type from pass 1
fti: usize,
// Whether or not the frame was hidden in pass 1
show_frame: bool,
// TODO: The input frame number corresponding to this frame in the input.
// input_frameno: u32
// TODO vfr: PTS
}
impl RCFrameMetrics {
fn new() -> RCFrameMetrics {
RCFrameMetrics { log_scale_q24: 0, fti: 0, show_frame: false }
}
}
pub struct RCState {
// The target bit-rate in bits per second.
target_bitrate: i32,
// The number of TUs over which to distribute the reservoir usage.
// We use TUs because in our leaky bucket model, we only add bits to the
// reservoir on TU boundaries.
reservoir_frame_delay: i32,
// Whether or not the reservoir_frame_delay was explicitly specified by the
// user, or is the default value.
reservoir_frame_delay_is_set: bool,
// The maximum quantizer index to allow (for the luma AC coefficients, other
// quantizers will still be adjusted to match).
maybe_ac_qi_max: Option<u8>,
// The minimum quantizer index to allow (for the luma AC coefficients).
ac_qi_min: u8,
// Will we drop frames to meet bitrate requirements?
drop_frames: bool,
// Do we respect the maximum reservoir fullness?
cap_overflow: bool,
// Can the reservoir go negative?
cap_underflow: bool,
// The log of the first-pass base quantizer.
pass1_log_base_q: i64,
// Two-pass mode state.
// PASS_SINGLE => 1-pass encoding.
// PASS_1 => 1st pass of 2-pass encoding.
// PASS_2 => 2nd pass of 2-pass encoding.
// PASS_2_PLUS_1 => 2nd pass of 2-pass encoding, but also emitting pass 1
// data again.
twopass_state: i32,
// The log of the number of pixels in a frame in Q57 format.
log_npixels: i64,
// The target average bits per Temporal Unit (input frame).
bits_per_tu: i64,
// The current bit reservoir fullness (bits available to be used).
reservoir_fullness: i64,
// The target buffer fullness.
// This is where we'd like to be by the last keyframe that appears in the
// next reservoir_frame_delay frames.
reservoir_target: i64,
// The maximum buffer fullness (total size of the buffer).
reservoir_max: i64,
// The log of estimated scale factor for the rate model in Q57 format.
//
// TODO: Convert to Q23 or figure out a better way to avoid overflow
// once 2-pass mode is introduced, if required.
log_scale: [i64; FRAME_NSUBTYPES],
// The exponent used in the rate model in Q6 format.
exp: [u8; FRAME_NSUBTYPES],
// The log of an estimated scale factor used to obtain the real framerate,
// for VFR sources or, e.g., 12 fps content doubled to 24 fps, etc.
// TODO vfr: log_vfr_scale: i64,
// Second-order lowpass filters to track scale and VFR.
scalefilter: [IIRBessel2; FRAME_NSUBTYPES],
// TODO vfr: vfrfilter: IIRBessel2,
// The number of frames of each type we have seen, for filter adaptation
// purposes.
// These are only 32 bits to guarantee that we can sum the scales over the
// whole file without overflow in a 64-bit int.
// That limits us to 2.268 years at 60 fps (minus 33% with re-ordering).
nframes: [i32; FRAME_NSUBTYPES + 1],
inter_delay: [i32; FRAME_NSUBTYPES - 1],
inter_delay_target: i32,
// The total accumulated estimation bias.
rate_bias: i64,
// The number of (non-Show Existing Frame) frames that have been encoded.
nencoded_frames: i64,
// The number of Show Existing Frames that have been emitted.
nsef_frames: i64,
// Buffer for current frame metrics in pass 1.
pass1_buffer: [u8; TWOPASS_HEADER_SZ],
// Whether or not the user has retrieved the pass 1 data for the last frame.
// For PASS_1 or PASS_2_PLUS_1 encoding, this is set to false after each
// frame is encoded, and must be set to true by calling twopass_out() before
// the next frame can be encoded.
pass1_data_retrieved: bool,
// Marks whether or not the user has retrieved the summary data at the end of
// the encode.
pass1_summary_retrieved: bool,
// Buffer for current frame metrics in pass 2.
pass2_buffer: [u8; TWOPASS_HEADER_SZ],
// Whether or not the user has provided enough data to encode in the second
// pass.
// For PASS_2 or PASS_2_PLUS_1 encoding, this is set to false after each
// frame, and must be set to true by calling twopass_in() before the next
// frame can be encoded.
pass2_data_ready: bool,
// The current byte position in the frame metrics buffer.
pass2_buffer_pos: usize,
// In pass 2, this represents the number of bytes that are available in the
// input buffer.
pass2_buffer_fill: usize,
// TODO: Add a way to force the next frame to be a keyframe in 2-pass mode.
// Right now we are relying on keyframe detection to detect the same
// keyframes.
// The metrics for the previous frame.
prev_metrics: RCFrameMetrics,
// The metrics for the current frame.
cur_metrics: RCFrameMetrics,
// The buffered metrics for future frames.
frame_metrics: Vec<RCFrameMetrics>,
// The total number of frames still in use in the circular metric buffer.
nframe_metrics: usize,
// The index of the current frame in the circular metric buffer.
frame_metrics_head: usize,
// The TU count encoded so far.
ntus: i32,
// The TU count for the whole file.
ntus_total: i32,
// The remaining TU count.
ntus_left: i32,
// The frame count of each frame subtype in the whole file.
nframes_total: [i32; FRAME_NSUBTYPES + 1],
// The sum of those counts.
nframes_total_total: i32,
// The number of frames of each subtype yet to be processed.
nframes_left: [i32; FRAME_NSUBTYPES + 1],
// The sum of the scale values for each frame subtype.
scale_sum: [i64; FRAME_NSUBTYPES],
// The number of TUs represented by the current scale sums.
scale_window_ntus: i32,
// The frame count of each frame subtype in the current scale window.
scale_window_nframes: [i32; FRAME_NSUBTYPES + 1],
// The sum of the scale values for each frame subtype in the current window.
scale_window_sum: [i64; FRAME_NSUBTYPES],
}
// TODO: Separate qi values for each color plane.
pub struct QuantizerParameters {
// The full-precision, unmodulated log quantizer upon which our modulated
// quantizer indices are based.
// This is only used to limit sudden quality changes from frame to frame, and
// as such is not adjusted when we encounter buffer overrun or underrun.
pub log_base_q: i64,
// The full-precision log quantizer modulated by the current frame type upon
// which our quantizer indices are based (including any adjustments to
// prevent buffer overrun or underrun).
// This is used when estimating the scale parameter once we know the actual
// bit usage of a frame.
pub log_target_q: i64,
pub dc_qi: [u8; 3],
pub ac_qi: [u8; 3],
pub lambda: f64,
}
const Q57_SQUARE_EXP_SCALE: f64 =
(2.0 * ::std::f64::consts::LN_2) / ((1i64 << 57) as f64);
// Daala style log-offset for chroma quantizers
fn chroma_offset(log_target_q: i64) -> (i64, i64) {
let x = log_target_q.max(0);
// Gradient 0.266 optimized for CIEDE2000+PSNR on subset3
let y = (x >> 2) + (x >> 6);
// blog64(7) - blog64(4); blog64(5) - blog64(4)
(0x19D_5D9F_D501_0B37 - y, 0xA4_D3C2_5E68_DC58 - y)
}
impl QuantizerParameters {
fn new_from_log_q(
log_base_q: i64, log_target_q: i64, bit_depth: usize,
) -> QuantizerParameters {
let scale = q57(QSCALE + bit_depth as i32 - 8);
let quantizer = bexp64(log_target_q + scale);
let (offset_u, offset_v) = chroma_offset(log_target_q);
let quantizer_u = bexp64(log_target_q + offset_u + scale);
let quantizer_v = bexp64(log_target_q + offset_v + scale);
QuantizerParameters {
log_base_q,
log_target_q,
// TODO: Allow lossless mode; i.e. qi == 0.
dc_qi: [
select_dc_qi(quantizer, bit_depth).max(1),
select_dc_qi(quantizer_u, bit_depth).max(1),
select_dc_qi(quantizer_v, bit_depth).max(1),
],
ac_qi: [
select_ac_qi(quantizer, bit_depth).max(1),
select_ac_qi(quantizer_u, bit_depth).max(1),
select_ac_qi(quantizer_v, bit_depth).max(1),
],
lambda: (::std::f64::consts::LN_2 / 6.0)
* ((log_target_q as f64) * Q57_SQUARE_EXP_SCALE).exp(),
}
}
}
// The parameters that are required by twopass_out().
// We need a reference to the enclosing ContextInner to compute these, but
// twopass_out() cannot take such a reference, since it needs a &mut self
// reference to do its job, and RCState is contained inside ContextInner.
// In practice we don't modify anything in RCState until after we're finished
// reading from ContextInner, but Rust's borrow checker does not have a way to
// express that.
// There's probably a cleaner way to do this, but going with something simple
// for now, since this is not exposed in the public API.
pub(crate) struct TwoPassOutParams {
pass1_log_base_q: i64,
done_processing: bool,
}
impl RCState {
pub fn new(
frame_width: i32, frame_height: i32, framerate_num: i64,
framerate_den: i64, target_bitrate: i32, maybe_ac_qi_max: Option<u8>,
ac_qi_min: u8, max_key_frame_interval: i32,
maybe_reservoir_frame_delay: Option<i32>,
) -> Option<RCState> {
// The default buffer size is set equal to 1.5x the keyframe interval.
// We enforce a minimum of 12 and a maximum of 240.
// The interval is short enough to allow reaction, but long enough to allow
// looking into the next GOP (avoiding the case where the last frames
// before an I-frame get starved), in most cases.
// The 12 frame minimum gives us some chance to distribute bit estimation
// errors in the worst case.
let reservoir_frame_delay = maybe_reservoir_frame_delay
.unwrap_or(max_key_frame_interval.checked_mul(3)? >> 1)
.max(12)
.min(240);
// TODO: What are the limits on these?
let npixels = (frame_width as i64) * (frame_height as i64);
// Insane framerates or frame sizes mean insane bitrates.
// Let's not get carried away.
// We also subtract 16 bits from each temporal unit to account for the
// temporal delimeter, whose bits are not included in the frame sizes
// reported to update_state().
// TODO: Support constraints imposed by levels.
let bits_per_tu = clamp(
(target_bitrate as i64)
.checked_mul(framerate_den)?
.checked_div(framerate_num)?,
40,
0x4000_0000_0000,
) - (TEMPORAL_DELIMITER.len() * 8) as i64;
let reservoir_max =
bits_per_tu.checked_mul(reservoir_frame_delay as i64)?;
// Start with a buffer fullness and fullness target of 50%.
let reservoir_target = (reservoir_max + 1) >> 1;
// Pick exponents and initial scales for quantizer selection.
let ibpp = npixels / bits_per_tu;
// These have been derived by encoding many clips at every quantizer
// and running a piecewise-linear regression in binary log space.
let (i_exp, i_log_scale) = if ibpp < 1 {
(48u8, blog64(36) - q57(QSCALE))
} else if ibpp < 4 {
(61u8, blog64(55) - q57(QSCALE))
} else {
(77u8, blog64(129) - q57(QSCALE))
};
let (p_exp, p_log_scale) = if ibpp < 2 {
(69u8, blog64(32) - q57(QSCALE))
} else if ibpp < 139 {
(104u8, blog64(84) - q57(QSCALE))
} else {
(83u8, blog64(19) - q57(QSCALE))
};
let (b0_exp, b0_log_scale) = if ibpp < 2 {
(84u8, blog64(30) - q57(QSCALE))
} else if ibpp < 92 {
(120u8, blog64(68) - q57(QSCALE))
} else {
(68u8, blog64(4) - q57(QSCALE))
};
let (b1_exp, b1_log_scale) = if ibpp < 2 {
(87u8, blog64(27) - q57(QSCALE))
} else if ibpp < 126 {
(139u8, blog64(84) - q57(QSCALE))
} else {
(61u8, blog64(1) - q57(QSCALE))
};
// TODO: Add support for "golden" P frames.
Some(RCState {
target_bitrate,
reservoir_frame_delay,
reservoir_frame_delay_is_set: maybe_reservoir_frame_delay.is_some(),
maybe_ac_qi_max,
ac_qi_min,
drop_frames: false,
cap_overflow: true,
cap_underflow: false,
pass1_log_base_q: 0,
twopass_state: PASS_SINGLE,
log_npixels: blog64(npixels),
bits_per_tu,
reservoir_fullness: reservoir_target,
reservoir_target,
reservoir_max,
log_scale: [i_log_scale, p_log_scale, b0_log_scale, b1_log_scale],
exp: [i_exp, p_exp, b0_exp, b1_exp],
scalefilter: [
IIRBessel2::new(4, q57_to_q24(i_log_scale)),
IIRBessel2::new(INTER_DELAY_TARGET_MIN, q57_to_q24(p_log_scale)),
IIRBessel2::new(INTER_DELAY_TARGET_MIN, q57_to_q24(b0_log_scale)),
IIRBessel2::new(INTER_DELAY_TARGET_MIN, q57_to_q24(b1_log_scale)),
],
// TODO VFR
nframes: [0; FRAME_NSUBTYPES + 1],
inter_delay: [INTER_DELAY_TARGET_MIN; FRAME_NSUBTYPES - 1],
inter_delay_target: reservoir_frame_delay >> 1,
rate_bias: 0,
nencoded_frames: 0,
nsef_frames: 0,
pass1_buffer: [0; TWOPASS_HEADER_SZ],
pass1_data_retrieved: false,
pass1_summary_retrieved: false,
pass2_buffer: [0; TWOPASS_HEADER_SZ],
pass2_data_ready: false,
pass2_buffer_pos: 0,
pass2_buffer_fill: 0,
prev_metrics: RCFrameMetrics::new(),
cur_metrics: RCFrameMetrics::new(),
frame_metrics: Vec::new(),
nframe_metrics: 0,
frame_metrics_head: 0,
ntus: 0,
ntus_total: 0,
ntus_left: 0,
nframes_total: [0; FRAME_NSUBTYPES + 1],
nframes_total_total: 0,
nframes_left: [0; FRAME_NSUBTYPES + 1],
scale_sum: [0; FRAME_NSUBTYPES],
scale_window_ntus: 0,
scale_window_nframes: [0; FRAME_NSUBTYPES + 1],
scale_window_sum: [0; FRAME_NSUBTYPES],
})
}
// TODO: Separate quantizers for Cb and Cr.
pub(crate) fn select_qi<T: Pixel>(
&self, ctx: &ContextInner<T>, output_frameno: u64, fti: usize,
maybe_prev_log_base_q: Option<i64>,
) -> QuantizerParameters {
// Is rate control active?
if self.target_bitrate <= 0 {
// Rate control is not active.
// Derive quantizer directly from frame type.
// TODO: Rename "quantizer" something that indicates it is a quantizer
// index, and move it somewhere more sensible (or choose a better way to
// parameterize a "quality" configuration parameter).
let base_qi = ctx.config.quantizer;
let bit_depth = ctx.config.bit_depth;
// We use the AC quantizer as the source quantizer since its quantizer
// tables have unique entries, while the DC tables do not.
let ac_quantizer = ac_q(base_qi as u8, 0, bit_depth) as i64;
// Pick the nearest DC entry since an exact match may be unavailable.
let dc_qi = select_dc_qi(ac_quantizer, bit_depth);
let dc_quantizer = dc_q(dc_qi as u8, 0, bit_depth) as i64;
// Get the log quantizers as Q57.
let log_ac_q = blog64(ac_quantizer) - q57(QSCALE + bit_depth as i32 - 8);
let log_dc_q = blog64(dc_quantizer) - q57(QSCALE + bit_depth as i32 - 8);
// Target the midpoint of the chosen entries.
let log_base_q = (log_ac_q + log_dc_q + 1) >> 1;
// Adjust the quantizer for the frame type, result is Q57:
let log_q = ((log_base_q + (1i64 << 11)) >> 12) * (MQP_Q12[fti] as i64)
+ DQP_Q57[fti];
QuantizerParameters::new_from_log_q(log_base_q, log_q, bit_depth)
} else {
let mut nframes: [i32; FRAME_NSUBTYPES + 1] = [0; FRAME_NSUBTYPES + 1];
let mut log_scale: [i64; FRAME_NSUBTYPES] = self.log_scale;
let mut reservoir_tus = self.reservoir_frame_delay.min(self.ntus_left);
let mut reservoir_frames = 0;
let mut log_cur_scale = (self.scalefilter[fti].y[0] as i64) << 33;
match self.twopass_state {
// First pass of 2-pass mode: use a fixed base quantizer.
PASS_1 => {
// Adjust the quantizer for the frame type, result is Q57:
let log_q = ((self.pass1_log_base_q + (1i64 << 11)) >> 12)
* (MQP_Q12[fti] as i64)
+ DQP_Q57[fti];
return QuantizerParameters::new_from_log_q(
self.pass1_log_base_q,
log_q,
ctx.config.bit_depth,
);
}
// Second pass of 2-pass mode: we know exactly how much of each frame
// type there is in the current buffer window, and have estimates for
// the scales.
PASS_2 | PASS_2_PLUS_1 => {
let mut scale_window_sum: [i64; FRAME_NSUBTYPES] =
self.scale_window_sum;
let mut scale_window_nframes: [i32; FRAME_NSUBTYPES + 1] =
self.scale_window_nframes;
// Intentionally exclude Show Existing Frame frames from this.
for ftj in 0..FRAME_NSUBTYPES {
reservoir_frames += scale_window_nframes[ftj];
}
// If we're approaching the end of the file, add some slack to keep
// us from slamming into a rail.
// Our rate accuracy goes down, but it keeps the result sensible.
// We position the target where the first forced keyframe beyond the
// end of the file would be (for consistency with 1-pass mode).
// TODO: let mut buf_pad = self.reservoir_frame_delay.min(...);
// if buf_delay < buf_pad {
// buf_pad -= buf_delay;
// }
// else ...
// Otherwise, search for the last keyframe in the buffer window and
// target that.
// Currently we only do this when using a finite buffer.
// We could save the position of the last keyframe in the stream in
// the summary data and do it with a whole-file buffer as well, but
// it isn't likely to make a difference.
if !self.frame_metrics.is_empty() {
let mut fm_tail = self.frame_metrics_head + self.nframe_metrics;
if fm_tail >= self.frame_metrics.len() {
fm_tail -= self.frame_metrics.len();
}
let mut fmi = fm_tail;
loop {
if fmi == 0 {
fmi += self.frame_metrics.len();
}
fmi -= 1;
// Stop before we remove the first frame.
if fmi == self.frame_metrics_head {
break;
}
// If we find a keyframe, remove it and everything past it.
if self.frame_metrics[fmi].fti == FRAME_SUBTYPE_I {
while fmi != fm_tail {
let m = &self.frame_metrics[fmi];
let ftj = m.fti;
scale_window_nframes[ftj] -= 1;
if ftj < FRAME_NSUBTYPES {
scale_window_sum[ftj] -= bexp_q24(m.log_scale_q24);
reservoir_frames -= 1;
}
if m.show_frame {
reservoir_tus -= 1;
}
fmi += 1;
if fmi >= self.frame_metrics.len() {
fmi = 0;
}
}
// And stop scanning backwards.
break;
}
}
}
nframes = scale_window_nframes;
// If we're not using the same frame type as in pass 1 (because
// someone changed some encoding parameters), remove that scale
// estimate.
// We'll add a replacement for the correct frame type below.
if self.cur_metrics.fti != fti {
scale_window_nframes[self.cur_metrics.fti] -= 1;
if self.cur_metrics.fti != FRAME_SUBTYPE_SEF {
scale_window_sum[self.cur_metrics.fti] -=
bexp_q24(self.cur_metrics.log_scale_q24);
}
} else {
log_cur_scale = (self.cur_metrics.log_scale_q24 as i64) << 33;
}
// If we're approaching the end of the file, add some slack to keep
// us from slamming into a rail.
// Our rate accuracy goes down, but it keeps the result sensible.
// We position the target where the first forced keyframe beyond the
// end of the file would be (for consistency with 1-pass mode).
if reservoir_tus >= self.ntus_left
&& self.ntus_total as u64
> ctx.gop_input_frameno_start[&output_frameno]
{
let nfinal_gop_tus = self.ntus_total
- (ctx.gop_input_frameno_start[&output_frameno] as i32);
if ctx.config.max_key_frame_interval as i32 > nfinal_gop_tus {
let reservoir_pad = (ctx.config.max_key_frame_interval as i32
- nfinal_gop_tus)
.min(self.reservoir_frame_delay - reservoir_tus);
let (guessed_reservoir_frames, guessed_reservoir_tus) = ctx
.guess_frame_subtypes(
&mut nframes,
reservoir_tus + reservoir_pad,
);
reservoir_frames = guessed_reservoir_frames;
reservoir_tus = guessed_reservoir_tus;
}
}
// Blend in the low-pass filtered scale according to how many
// frames of each type we need to add compared to the actual sums in
// our window.
for ftj in 0..FRAME_NSUBTYPES {
let scale = scale_window_sum[ftj]
+ bexp_q24(self.scalefilter[ftj].y[0])
* (nframes[ftj] - scale_window_nframes[ftj]) as i64;
log_scale[ftj] = if nframes[ftj] > 0 {
blog64(scale) - blog64(nframes[ftj] as i64) - q57(24)
} else {
-self.log_npixels
};
}
}
// Single pass.
_ => {
// Figure out how to re-distribute bits so that we hit our fullness
// target before the last keyframe in our current buffer window
// (after the current frame), or the end of the buffer window,
// whichever comes first.
// Count the various types and classes of frames.
let (guessed_reservoir_frames, guessed_reservoir_tus) =
ctx.guess_frame_subtypes(&mut nframes, self.reservoir_frame_delay);
reservoir_frames = guessed_reservoir_frames;
reservoir_tus = guessed_reservoir_tus;
// TODO: Scale for VFR.
}
}
// If we've been missing our target, add a penalty term.
let rate_bias = (self.rate_bias / (self.nencoded_frames as i64 + 100))
* (reservoir_frames as i64);
// rate_total is the total bits available over the next
// reservoir_tus TUs.
let rate_total = self.reservoir_fullness - self.reservoir_target
+ rate_bias
+ (reservoir_tus as i64) * self.bits_per_tu;
// Find a target quantizer that meets our rate target for the
// specific mix of frame types we'll have over the next
// reservoir_frame frames.
// We model the rate<->quantizer relationship as
// rate = scale*(quantizer**-exp)
// In this case, we have our desired rate, an exponent selected in
// setup, and a scale that's been measured over our frame history,
// so we're solving for the quantizer.
// Exponentiation with arbitrary exponents is expensive, so we work
// in the binary log domain (binary exp and log aren't too bad):
// rate = exp2(log2(scale) - log2(quantizer)*exp)
// There's no easy closed form solution, so we bisection searh for it.
let bit_depth = ctx.config.bit_depth;
// TODO: Proper handling of lossless.
let mut log_qlo = blog64(ac_q(self.ac_qi_min, 0, bit_depth) as i64)
- q57(QSCALE + bit_depth as i32 - 8);
// The AC quantizer tables map to values larger than the DC quantizer
// tables, so we use that as the upper bound to make sure we can use
// the full table if needed.
let mut log_qhi =
blog64(ac_q(self.maybe_ac_qi_max.unwrap_or(255), 0, bit_depth) as i64)
- q57(QSCALE + bit_depth as i32 - 8);
let mut log_base_q = (log_qlo + log_qhi) >> 1;
while log_qlo < log_qhi {
// Count bits contributed by each frame type using the model.
let mut bits = 0i64;
for ftj in 0..FRAME_NSUBTYPES {
// Modulate base quantizer by frame type.
let log_q = ((log_base_q + (1i64 << 11)) >> 12)
* (MQP_Q12[ftj] as i64)
+ DQP_Q57[ftj];
// All the fields here are Q57 except for the exponent, which is
// Q6.
bits += (nframes[ftj] as i64)
* bexp64(
log_scale[ftj] + self.log_npixels
- ((log_q + 32) >> 6) * (self.exp[ftj] as i64),
);
}
// The number of bits for Show Existing Frame frames is constant.
bits += (nframes[FRAME_SUBTYPE_SEF] as i64) * SEF_BITS;
let diff = bits - rate_total;
if diff > 0 {
log_qlo = log_base_q + 1;
} else if diff < 0 {
log_qhi = log_base_q - 1;
} else {
break;
}
log_base_q = (log_qlo + log_qhi) >> 1;
}
// If this was not one of the initial frames, limit the change in
// base quantizer to within [0.8*Q, 1.2*Q] where Q is the previous
// frame's base quantizer.
if let Some(prev_log_base_q) = maybe_prev_log_base_q {
log_base_q = clamp(
log_base_q,
prev_log_base_q - 0xA4_D3C2_5E68_DC58,
prev_log_base_q + 0xA4_D3C2_5E68_DC58,
);
}
// Modulate base quantizer by frame type.
let mut log_q = ((log_base_q + (1i64 << 11)) >> 12)
* (MQP_Q12[fti] as i64)
+ DQP_Q57[fti];
// The above allocation looks only at the total rate we'll accumulate
// in the next reservoir_frame_delay frames.
// However, we could overflow the bit reservoir on the very next
// frame.
// Check for that here if we're not using a soft target.
if self.cap_overflow {
// Allow 3% of the buffer for prediction error.
// This should be plenty, and we don't mind if we go a bit over.
// We only want to keep these bits from being completely wasted.
let margin = (self.reservoir_max + 31) >> 5;
// We want to use at least this many bits next frame.
let soft_limit = self.reservoir_fullness + self.bits_per_tu
- (self.reservoir_max - margin);
if soft_limit > 0 {
let log_soft_limit = blog64(soft_limit);
// If we're predicting we won't use that many bits...
// TODO: When using frame re-ordering, we should include the rate
// for all of the frames in the current TU.
// When there is more than one frame, there will be no direct
// solution for the required adjustment, however.
let log_scale_pixels = log_cur_scale + self.log_npixels;
let exp = self.exp[fti] as i64;
let mut log_q_exp = ((log_q + 32) >> 6) * exp;
if log_scale_pixels - log_q_exp < log_soft_limit {
// Scale the adjustment based on how far into the margin we are.
log_q_exp += ((log_scale_pixels - log_soft_limit - log_q_exp)
>> 32)
* ((margin.min(soft_limit) << 32) / margin);
log_q = ((log_q_exp + (exp >> 1)) / exp) << 6;
}
}
}
// We just checked we don't overflow the reservoir next frame, now
// check we don't underflow and bust the budget (when not using a
// soft target).
if self.maybe_ac_qi_max.is_none() {
// Compute the maximum number of bits we can use in the next frame.
// Allow 50% of the rate for a single frame for prediction error.
// This may not be enough for keyframes or sudden changes in
// complexity.
let log_hard_limit =
blog64(self.reservoir_fullness + (self.bits_per_tu >> 1));
// If we're predicting we'll use more than this...
// TODO: When using frame re-ordering, we should include the rate
// for all of the frames in the current TU.
// When there is more than one frame, there will be no direct
// solution for the required adjustment, however.
let log_scale_pixels = log_cur_scale + self.log_npixels;
let exp = self.exp[fti] as i64;
let mut log_q_exp = ((log_q + 32) >> 6) * exp;
if log_scale_pixels - log_q_exp > log_hard_limit {
// Force the target to hit our limit exactly.
log_q_exp = log_scale_pixels - log_hard_limit;
log_q = ((log_q_exp + (exp >> 1)) / exp) << 6;
// If that target is unreasonable, oh well; we'll have to drop.
}
}
QuantizerParameters::new_from_log_q(log_base_q, log_q, bit_depth)
}
}
pub fn update_state(
&mut self, bits: i64, fti: usize, show_frame: bool, log_target_q: i64,
trial: bool, droppable: bool,
) -> bool {
if trial {
assert!(self.needs_trial_encode(fti));
assert!(bits > 0);
}
let mut dropped = false;
// Update rate control only if rate control is active.
if self.target_bitrate > 0 {
let mut estimated_bits = 0;
let mut bits = bits;
let mut droppable = droppable;
let mut log_scale = q57(-64);
// Drop frames is also disabled for now in the case of infinite-buffer
// two-pass mode.
if !self.drop_frames
|| fti == FRAME_SUBTYPE_SEF
|| (self.twopass_state == PASS_2
|| self.twopass_state == PASS_2_PLUS_1)
&& !self.frame_metrics.is_empty()
{
droppable = false;
}
if fti == FRAME_SUBTYPE_SEF {
debug_assert!(bits == SEF_BITS);
debug_assert!(show_frame);
// Please don't make trial encodes of a SEF.
debug_assert!(!trial);
estimated_bits = SEF_BITS;
self.nsef_frames += 1;
} else {
let log_q_exp = ((log_target_q + 32) >> 6) * (self.exp[fti] as i64);
let prev_log_scale = self.log_scale[fti];
if bits <= 0 {
// We didn't code any blocks in this frame.
bits = 0;
dropped = true;
// TODO: Adjust VFR rate based on drop count.
} else {
// Compute the estimated scale factor for this frame type.
let log_bits = blog64(bits);
log_scale = (log_bits - self.log_npixels + log_q_exp).min(q57(16));
estimated_bits =
bexp64(prev_log_scale + self.log_npixels - log_q_exp);
if !trial {
self.nencoded_frames += 1;
}
}
}
let log_scale_q24 = q57_to_q24(log_scale);
// Special two-pass processing.
if self.twopass_state == PASS_2 || self.twopass_state == PASS_2_PLUS_1 {
// Pass 2 mode:
if !trial {
// Move the current metrics back one frame.
self.prev_metrics = self.cur_metrics;
// Back out the last frame's statistics from the sliding window.
let ftj = self.prev_metrics.fti;
self.nframes_left[ftj] -= 1;
self.scale_window_nframes[ftj] -= 1;
if ftj < FRAME_NSUBTYPES {
self.scale_window_sum[ftj] -=
bexp_q24(self.prev_metrics.log_scale_q24);
}
if self.prev_metrics.show_frame {
self.ntus_left -= 1;
self.scale_window_ntus -= 1;
}
// Free the corresponding entry in the circular buffer.
if !self.frame_metrics.is_empty() {
self.nframe_metrics -= 1;
self.frame_metrics_head += 1;
if self.frame_metrics_head >= self.frame_metrics.len() {
self.frame_metrics_head = 0;
}
}
// Mark us ready for the next 2-pass packet.
self.pass2_data_ready = false;
// Update state, so the user doesn't have to keep calling
// twopass_in() after they've fed in all the data when we're using
// a finite buffer.
self.twopass_in(None).unwrap_or(0);
}
}
if self.twopass_state == PASS_1 || self.twopass_state == PASS_2_PLUS_1 {
// Pass 1 mode: save the metrics for this frame.
self.prev_metrics.log_scale_q24 = log_scale_q24;
self.prev_metrics.fti = fti;
self.prev_metrics.show_frame = show_frame;
self.pass1_data_retrieved = false;
}
// Common to all passes:
if fti != FRAME_SUBTYPE_SEF && bits > 0 {
// If this is the first example of the given frame type we've seen,
// we immediately replace the default scale factor guess with the
// estimate we just computed using the first frame.
if trial || self.nframes[fti] <= 0 {
let f = &mut self.scalefilter[fti];
let x = log_scale_q24;
f.x[0] = x;
f.x[1] = x;
f.y[0] = x;
f.y[1] = x;
self.log_scale[fti] = log_scale;
// TODO: Duplicate regular P frame state for first golden P frame.
} else {
// Lengthen the time constant for the inter filters as we collect
// more frame statistics, until we reach our target.
if fti > 0
&& self.inter_delay[fti - 1] < self.inter_delay_target
&& self.nframes[fti] >= self.inter_delay[fti - 1]
{
self.inter_delay[fti - 1] += 1;
self.scalefilter[fti].reinit(self.inter_delay[fti - 1]);
}
// Update the low-pass scale filter for this frame type regardless
// of whether or not we will ultimately drop this frame.
self.log_scale[fti] =
q24_to_q57(self.scalefilter[fti].update(log_scale_q24));
}
// If this frame busts our budget, it must be dropped.
if droppable && self.reservoir_fullness + self.bits_per_tu < bits {
// TODO: Adjust VFR rate based on drop count.
bits = 0;
dropped = true;
} else {
// TODO: Update a low-pass filter to estimate the "real" frame rate
// taking timestamps and drops into account.
// This is only done if the frame is coded, as it needs the final
// count of dropped frames.
}
}
if !trial {
// Increment the frame count for filter adaptation purposes.
if !trial && self.nframes[fti] < ::std::i32::MAX {
self.nframes[fti] += 1;
}
self.reservoir_fullness -= bits;
if show_frame {
self.reservoir_fullness += self.bits_per_tu;
// TODO: Properly account for temporal delimeter bits.
}
// If we're too quick filling the buffer and overflow is capped, that
// rate is lost forever.
if self.cap_overflow {
self.reservoir_fullness =
self.reservoir_fullness.min(self.reservoir_max);
}
// If we're too quick draining the buffer and underflow is capped,
// don't try to make up that rate later.
if self.cap_underflow {
self.reservoir_fullness = self.reservoir_fullness.max(0);
}
// Adjust the bias for the real bits we've used.
self.rate_bias += estimated_bits - bits;
}
}
dropped
}
pub fn needs_trial_encode(&self, fti: usize) -> bool {
self.target_bitrate > 0 && self.nframes[fti] == 0
}
pub(crate) fn ready(&self) -> bool {
match self.twopass_state {
PASS_SINGLE => true,
PASS_1 => self.pass1_data_retrieved,
PASS_2 => self.pass2_data_ready,
_ => self.pass1_data_retrieved && self.pass2_data_ready,
}
}
fn buffer_val(&mut self, val: i64, bytes: usize, cur_pos: usize) -> usize {
let mut val = val;
let mut bytes = bytes;
let mut cur_pos = cur_pos;
while bytes > 0 {
bytes -= 1;
self.pass1_buffer[cur_pos] = val as u8;
cur_pos += 1;
val >>= 8;
}
cur_pos
}
pub(crate) fn get_twopass_out_params<T: Pixel>(
&self, ctx: &ContextInner<T>, output_frameno: u64,
) -> TwoPassOutParams {
let mut pass1_log_base_q = 0;
let mut done_processing = false;
if !self.pass1_data_retrieved {
if self.twopass_state == PASS_SINGLE {
pass1_log_base_q = self
.select_qi(ctx, output_frameno, FRAME_SUBTYPE_I, None)
.log_base_q;
}
} else {
done_processing = ctx.done_processing();
}
TwoPassOutParams { pass1_log_base_q, done_processing }
}
pub(crate) fn twopass_out(
&mut self, params: TwoPassOutParams,
) -> Option<&[u8]> {
let mut cur_pos = 0;
if !self.pass1_data_retrieved {
if self.twopass_state != PASS_1 && self.twopass_state != PASS_2_PLUS_1 {
// Initialize the first pass.
if self.twopass_state == PASS_SINGLE {
// Pick first-pass qi for scale calculations.
self.pass1_log_base_q = params.pass1_log_base_q;
} else {
debug_assert!(self.twopass_state == PASS_2);
}
self.twopass_state += PASS_1;
// Fill in dummy summary values.
cur_pos = self.buffer_val(TWOPASS_MAGIC as i64, 4, cur_pos);
cur_pos = self.buffer_val(TWOPASS_VERSION as i64, 4, cur_pos);
cur_pos = self.buffer_val(0, TWOPASS_HEADER_SZ - 8, cur_pos);
debug_assert!(cur_pos == TWOPASS_HEADER_SZ);
} else {
let fti = self.prev_metrics.fti;
if fti < FRAME_NSUBTYPES {
self.scale_sum[fti] += bexp_q24(self.prev_metrics.log_scale_q24);
}
if self.prev_metrics.show_frame {
self.ntus += 1;
}
// If we have encoded too many frames, prevent us from reaching the
// ready state required to encode more.
if self.nencoded_frames + self.nsef_frames >= std::i32::MAX as i64 {
None?
}
cur_pos = self.buffer_val(
(self.prev_metrics.show_frame as i64) << 31
| self.prev_metrics.fti as i64,
4,
cur_pos,
);
cur_pos =
self.buffer_val(self.prev_metrics.log_scale_q24 as i64, 4, cur_pos);
debug_assert!(cur_pos == TWOPASS_PACKET_SZ);
}
self.pass1_data_retrieved = true;
} else if params.done_processing && !self.pass1_summary_retrieved {
cur_pos = self.buffer_val(TWOPASS_MAGIC as i64, 4, cur_pos);
cur_pos = self.buffer_val(TWOPASS_VERSION as i64, 4, cur_pos);
cur_pos = self.buffer_val(self.ntus as i64, 4, cur_pos);
for fti in 0..=FRAME_NSUBTYPES {
cur_pos = self.buffer_val(self.nframes[fti] as i64, 4, cur_pos);
}
for fti in 0..FRAME_NSUBTYPES {
cur_pos = self.buffer_val(self.exp[fti] as i64, 1, cur_pos);
}
for fti in 0..FRAME_NSUBTYPES {
cur_pos = self.buffer_val(self.scale_sum[fti], 8, cur_pos);
}
debug_assert!(cur_pos == TWOPASS_HEADER_SZ);
self.pass1_summary_retrieved = true;
} else {
// The data for this frame has already been retrieved.
return None;
}
Some(&self.pass1_buffer[..cur_pos])
}
fn buffer_fill(
&mut self, buf: &[u8], consumed: usize, goal: usize,
) -> usize {
let mut consumed = consumed;
while self.pass2_buffer_fill < goal && consumed < buf.len() {
self.pass2_buffer[self.pass2_buffer_fill] = buf[consumed];
self.pass2_buffer_fill += 1;
consumed += 1;
}
consumed
}
fn unbuffer_val(&mut self, bytes: usize) -> i64 {
let mut bytes = bytes;
let mut ret = 0;
let mut shift = 0;
while bytes > 0 {
bytes -= 1;
ret |= (self.pass2_buffer[self.pass2_buffer_pos] as i64) << shift;
self.pass2_buffer_pos += 1;
shift += 8;
}
ret
}
// Read metrics for the next frame.
fn parse_metrics(&mut self) -> Result<RCFrameMetrics, ()> {
debug_assert!(self.pass2_buffer_fill >= TWOPASS_PACKET_SZ);
let ft_val = self.unbuffer_val(4);
let show_frame = (ft_val >> 31) != 0;
let fti = (ft_val & 0x7FFFFFFF) as usize;
// Make sure the frame type is valid.
if fti > FRAME_NSUBTYPES {
Err(())?;
}
let log_scale_q24 = self.unbuffer_val(4) as i32;
Ok(RCFrameMetrics { log_scale_q24, fti, show_frame })
}
pub(crate) fn twopass_in(
&mut self, maybe_buf: Option<&[u8]>,
) -> Result<usize, ()> {
let mut consumed = 0;
if self.twopass_state == PASS_SINGLE || self.twopass_state == PASS_1 {
// Initialize the second pass.
self.twopass_state += PASS_2;
// If the user requested a finite buffer, reserve the space required for
// it.
if self.reservoir_frame_delay_is_set {
debug_assert!(self.reservoir_frame_delay > 0);
// reservoir_frame_delay counts in TUs, but RCFrameMetrics are stored
// per frame (including Show Existing Frame frames).
// When re-ordering, we will have more frames than TUs.
// How many more?
// That depends on the re-ordering scheme used.
// Doubling the number of TUs and adding a fixed latency equal to the
// maximum number of reference frames we can store should be
// sufficient for any reasonable scheme, and keeps this code from
// depending too closely on the details of the scheme currently used
// by rav1e.
let nmetrics = (self.reservoir_frame_delay as usize) * 2 + 8;
self.frame_metrics.reserve_exact(nmetrics);
self.frame_metrics.resize(nmetrics, RCFrameMetrics::new());
}
}
// If we haven't got a valid summary header yet, try to parse one.
if self.nframes_total[FRAME_SUBTYPE_I] == 0 {
self.pass2_data_ready = false;
if let Some(buf) = maybe_buf {
consumed = self.buffer_fill(buf, consumed, TWOPASS_HEADER_SZ);
if self.pass2_buffer_fill >= TWOPASS_HEADER_SZ {
self.pass2_buffer_pos = 0;
// Read the summary header data.
// check the magic value and version number.
if self.unbuffer_val(4) != TWOPASS_MAGIC as i64
|| self.unbuffer_val(4) != TWOPASS_VERSION as i64
{
Err(())?;
}
let ntus_total = self.unbuffer_val(4) as i32;
// Make sure the file claims to have at least one TU.
// Otherwise we probably got the placeholder data from an aborted
// pass 1.
if ntus_total < 1 {
Err(())?;
}
let mut maybe_nframes_total_total: Option<i32> = Some(0);
let mut nframes_total: [i32; FRAME_NSUBTYPES + 1] =
[0; FRAME_NSUBTYPES + 1];
for fti in 0..=FRAME_NSUBTYPES {
nframes_total[fti] = self.unbuffer_val(4) as i32;
if nframes_total[fti] < 0 {
Err(())?;
}
maybe_nframes_total_total = maybe_nframes_total_total
.and_then(|n| n.checked_add(nframes_total[fti]));
}
if let Some(nframes_total_total) = maybe_nframes_total_total {
// We can't have more TUs than frames.
if ntus_total > nframes_total_total {
Err(())?;
}
let mut exp: [u8; FRAME_NSUBTYPES] = [0; FRAME_NSUBTYPES];
for fti in 0..FRAME_NSUBTYPES {
exp[fti] = self.unbuffer_val(1) as u8;
}
let mut scale_sum: [i64; FRAME_NSUBTYPES] = [0; FRAME_NSUBTYPES];
for fti in 0..FRAME_NSUBTYPES {
scale_sum[fti] = self.unbuffer_val(8);
if scale_sum[fti] < 0 {
Err(())?;
}
}
// Got a valid header.
// Set up pass 2.
self.ntus_total = ntus_total;
self.ntus_left = ntus_total;
self.nframes_total = nframes_total;
self.nframes_left = nframes_total;
self.nframes_total_total = nframes_total_total;
if self.frame_metrics.is_empty() {
self.reservoir_frame_delay = ntus_total;
self.scale_window_nframes = self.nframes_total;
self.scale_window_sum = scale_sum;
self.reservoir_max =
self.bits_per_tu * (self.reservoir_frame_delay as i64);
self.reservoir_target = (self.reservoir_max + 1) >> 1;
self.reservoir_fullness = self.reservoir_target;
} else {
self.reservoir_frame_delay =
self.reservoir_frame_delay.min(ntus_total);
}
self.exp = exp;
// Clear the header data from the buffer to make room for the
// packet data.
self.pass2_buffer_fill = 0;
} else {
// The sum of the frame counts for each type overflowed a 32-bit
// integer.
Err(())?;
}
}
} else {
let frames_needed = if !self.frame_metrics.is_empty() {
// If we're not using whole-file buffering, we need at least one
// frame per buffer slot.
self.reservoir_frame_delay as usize
} else {
// Otherwise we need just one.
1
};
return Ok(TWOPASS_HEADER_SZ + frames_needed * TWOPASS_PACKET_SZ);
}
}
if self.nframes_total[FRAME_SUBTYPE_I] > 0 {
if self.nencoded_frames + self.nsef_frames
>= self.nframes_total_total as i64
{
// We don't want any more data after the last frame, and we don't want
// to allow any more frames to be encoded.
self.pass2_data_ready = false;
} else if !self.pass2_data_ready {
if self.frame_metrics.is_empty() {
// We're using a whole-file buffer.
if let Some(buf) = maybe_buf {
consumed = self.buffer_fill(buf, consumed, TWOPASS_PACKET_SZ);
if self.pass2_buffer_fill >= TWOPASS_PACKET_SZ {
self.pass2_buffer_pos = 0;
// Read metrics for the next frame.
self.cur_metrics = self.parse_metrics()?;
// Clear the buffer for the next frame.
self.pass2_buffer_fill = 0;
self.pass2_data_ready = true;
}
} else {
return Ok(TWOPASS_PACKET_SZ - self.pass2_buffer_fill);
}
} else {
// We're using a finite buffer.
let mut cur_scale_window_nframes = 0;
let mut cur_nframes_left = 0;
for fti in 0..=FRAME_NSUBTYPES {
cur_scale_window_nframes += self.scale_window_nframes[fti];
cur_nframes_left += self.nframes_left[fti];
}
let mut frames_needed = (self.reservoir_frame_delay
- self.scale_window_ntus)
.max(0)
.min(cur_nframes_left - cur_scale_window_nframes);
while frames_needed > 0 {
if let Some(buf) = maybe_buf {
consumed = self.buffer_fill(buf, consumed, TWOPASS_PACKET_SZ);
if self.pass2_buffer_fill >= TWOPASS_PACKET_SZ {
self.pass2_buffer_pos = 0;
// Read the metrics for the next frame.
let m = self.parse_metrics()?;
// Add them to the circular buffer.
if self.nframe_metrics >= self.frame_metrics.len() {
// We read too many frames without finding enough TUs.
Err(())?;
}
let mut fmi = self.frame_metrics_head + self.nframe_metrics;
if fmi >= self.frame_metrics.len() {
fmi -= self.frame_metrics.len();
}
self.nframe_metrics += 1;
self.frame_metrics[fmi] = m;
// And accumulate the statistics over the window.
self.scale_window_nframes[m.fti] += 1;
cur_scale_window_nframes += 1;
if m.fti < FRAME_NSUBTYPES {
self.scale_window_sum[m.fti] += bexp_q24(m.log_scale_q24);
}
if m.show_frame {
self.scale_window_ntus += 1;
}
frames_needed = (self.reservoir_frame_delay
- self.scale_window_ntus)
.max(0)
.min(cur_nframes_left - cur_scale_window_nframes);
// Clear the buffer for the next frame.
self.pass2_buffer_fill = 0;
} else {
// Go back for more data.
break;
}
} else {
return Ok(
TWOPASS_PACKET_SZ * (frames_needed as usize)
- self.pass2_buffer_fill,
);
}
}
// If we've got all the frames we need, fill in the current metrics.
// We're ready to go.
if frames_needed <= 0 {
self.cur_metrics = self.frame_metrics[self.frame_metrics_head];
// Mark us ready for the next frame.
self.pass2_data_ready = true;
}
}
}
}
Ok(consumed)
}
}
#[cfg(test)]
mod test {
use super::{bexp64, blog64};
#[test]
fn blog64_vectors() -> () {
assert!(blog64(1793) == 0x159dc71e24d32daf);
assert!(blog64(0x678dde6e5fd29f05) == 0x7d6373ad151ca685);
}
#[test]
fn bexp64_vectors() -> () {
assert!(bexp64(0x159dc71e24d32daf) == 1793);
assert!((bexp64(0x7d6373ad151ca685) - 0x678dde6e5fd29f05).abs() < 29);
}
#[test]
fn blog64_bexp64_round_trip() {
for a in 1..=std::u16::MAX as i64 {
let b = std::i64::MAX / a;
let (log_a, log_b, log_ab) = (blog64(a), blog64(b), blog64(a * b));
assert!((log_a + log_b - log_ab).abs() < 4);
assert!(bexp64(log_a) == a);
assert!((bexp64(log_b) - b).abs() < 128);
assert!((bexp64(log_ab) - a * b).abs() < 128);
}
}
}
Revert "Fix reservoir constraints"
This broke the actual intent and several use cases of the reservoir.
This reverts commit 444b89470c099fb885ee6945c070411a3a182698.
// Copyright (c) 2019, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
use crate::api::ContextInner;
use crate::encoder::TEMPORAL_DELIMITER;
use crate::quantize::ac_q;
use crate::quantize::dc_q;
use crate::quantize::select_ac_qi;
use crate::quantize::select_dc_qi;
use crate::util::{clamp, ILog, Pixel};
// The number of frame sub-types for which we track distinct parameters.
// This does not include FRAME_SUBTYPE_SEF, because we don't need to do any
// parameter tracking for Show Existing Frame frames.
pub const FRAME_NSUBTYPES: usize = 4;
pub const FRAME_SUBTYPE_I: usize = 0;
pub const FRAME_SUBTYPE_P: usize = 1;
#[allow(unused)]
pub const FRAME_SUBTYPE_B0: usize = 2;
#[allow(unused)]
pub const FRAME_SUBTYPE_B1: usize = 3;
pub const FRAME_SUBTYPE_SEF: usize = 4;
const PASS_SINGLE: i32 = 0;
const PASS_1: i32 = 1;
const PASS_2: i32 = 2;
const PASS_2_PLUS_1: i32 = 3;
// Magic value at the start of the 2-pass stats file
const TWOPASS_MAGIC: i32 = 0x50324156;
// Version number for the 2-pass stats file
const TWOPASS_VERSION: i32 = 1;
// 4 byte magic + 4 byte version + 4 byte TU count + 4 byte SEF frame count
// + FRAME_NSUBTYPES*(4 byte frame count + 1 byte exp + 8 byte scale_sum)
const TWOPASS_HEADER_SZ: usize = 16 + FRAME_NSUBTYPES * (4 + 1 + 8);
// 4 byte frame type (show_frame and fti jointly coded) + 4 byte log_scale_q24
const TWOPASS_PACKET_SZ: usize = 8;
const SEF_BITS: i64 = 24;
// The scale of AV1 quantizer tables (relative to the pixel domain), i.e., Q3.
pub(crate) const QSCALE: i32 = 3;
// We clamp the actual I and B frame delays to a minimum of 10 to work
// within the range of values where later incrementing the delay works as
// designed.
// 10 is not an exact choice, but rather a good working trade-off.
const INTER_DELAY_TARGET_MIN: i32 = 10;
// The base quantizer for a frame is adjusted based on the frame type using the
// formula (log_qp*mqp + dqp), where log_qp is the base-2 logarithm of the
// "linear" quantizer (the actual factor by which coefficients are divided).
// Because log_qp has an implicit offset built in based on the scale of the
// coefficients (which depends on the pixel bit depth and the transform
// scale), we normalize the quantizer to the equivalent for 8-bit pixels with
// orthonormal transforms for the purposes of rate modeling.
const MQP_Q12: &[i32; FRAME_NSUBTYPES] = &[
// TODO: Use a const function once f64 operations in const functions are
// stable.
(1.0 * (1 << 12) as f64) as i32,
(1.0 * (1 << 12) as f64) as i32,
(1.0 * (1 << 12) as f64) as i32,
(1.0 * (1 << 12) as f64) as i32,
];
// The ratio 33_810_170.0 / 86_043_287.0 was derived by approximating the median
// of a change of 15 quantizer steps in the quantizer tables.
const DQP_Q57: &[i64; FRAME_NSUBTYPES] = &[
(-(33_810_170.0 / 86_043_287.0) * (1i64 << 57) as f64) as i64,
(0.0 * (1i64 << 57) as f64) as i64,
((33_810_170.0 / 86_043_287.0) * (1i64 << 57) as f64) as i64,
(2.0 * (33_810_170.0 / 86_043_287.0) * (1i64 << 57) as f64) as i64,
];
// Convert an integer into a Q57 fixed-point fraction.
// The integer must be in the range -64 to 63, inclusive.
pub(crate) const fn q57(v: i32) -> i64 {
// TODO: Add assert if it ever becomes possible to do in a const function.
(v as i64) << 57
}
#[rustfmt::skip]
const ATANH_LOG2: &[i64; 32] = &[
0x32B8_0347_3F7A_D0F4, 0x2F2A_71BD_4E25_E916, 0x2E68_B244_BB93_BA06,
0x2E39_FB91_98CE_62E4, 0x2E2E_683F_6856_5C8F, 0x2E2B_850B_E207_7FC1,
0x2E2A_CC58_FE7B_78DB, 0x2E2A_9E2D_E52F_D5F2, 0x2E2A_92A3_38D5_3EEC,
0x2E2A_8FC0_8F5E_19B6, 0x2E2A_8F07_E51A_485E, 0x2E2A_8ED9_BA8A_F388,
0x2E2A_8ECE_2FE7_384A, 0x2E2A_8ECB_4D3E_4B1A, 0x2E2A_8ECA_9494_0FE8,
0x2E2A_8ECA_6669_811D, 0x2E2A_8ECA_5ADE_DD6A, 0x2E2A_8ECA_57FC_347E,
0x2E2A_8ECA_5743_8A43, 0x2E2A_8ECA_5715_5FB4, 0x2E2A_8ECA_5709_D510,
0x2E2A_8ECA_5706_F267, 0x2E2A_8ECA_5706_39BD, 0x2E2A_8ECA_5706_0B92,
0x2E2A_8ECA_5706_0008, 0x2E2A_8ECA_5705_FD25, 0x2E2A_8ECA_5705_FC6C,
0x2E2A_8ECA_5705_FC3E, 0x2E2A_8ECA_5705_FC33, 0x2E2A_8ECA_5705_FC30,
0x2E2A_8ECA_5705_FC2F, 0x2E2A_8ECA_5705_FC2F
];
// Computes the binary exponential of logq57.
// input: a log base 2 in Q57 format.
// output: a 64 bit integer in Q0 (no fraction).
// TODO: Mark const once we can use local variables in a const function.
pub(crate) fn bexp64(logq57: i64) -> i64 {
let ipart = (logq57 >> 57) as i32;
if ipart < 0 {
return 0;
}
if ipart >= 63 {
return 0x7FFF_FFFF_FFFF_FFFF;
}
// z is the fractional part of the log in Q62 format.
// We need 1 bit of headroom since the magnitude can get larger than 1
// during the iteration, and a sign bit.
let mut z = logq57 - q57(ipart);
let mut w: i64;
if z != 0 {
// Rust has 128 bit multiplies, so it should be possible to do this
// faster without losing accuracy.
z <<= 5;
// w is the exponential in Q61 format (since it also needs headroom and can
// get as large as 2.0); we could get another bit if we dropped the sign,
// but we'll recover that bit later anyway.
// Ideally this should start out as
// \lim_{n->\infty} 2^{61}/\product_{i=1}^n \sqrt{1-2^{-2i}}
// but in order to guarantee convergence we have to repeat iterations 4,
// 13 (=3*4+1), and 40 (=3*13+1, etc.), so it winds up somewhat larger.
w = 0x26A3_D0E4_01DD_846D;
let mut i: i64 = 0;
loop {
let mask = -((z < 0) as i64);
w += ((w >> (i + 1)) + mask) ^ mask;
z -= (ATANH_LOG2[i as usize] + mask) ^ mask;
// Repeat iteration 4.
if i >= 3 {
break;
}
z *= 2;
i += 1;
}
loop {
let mask = -((z < 0) as i64);
w += ((w >> (i + 1)) + mask) ^ mask;
z -= (ATANH_LOG2[i as usize] + mask) ^ mask;
// Repeat iteration 13.
if i >= 12 {
break;
}
z *= 2;
i += 1;
}
while i < 32 {
let mask = -((z < 0) as i64);
w += ((w >> (i + 1)) + mask) ^ mask;
z = (z - ((ATANH_LOG2[i as usize] + mask) ^ mask)) * 2;
i += 1;
}
// Skip the remaining iterations unless we really require that much
// precision.
// We could have bailed out earlier for smaller iparts, but that would
// require initializing w from a table, as the limit doesn't converge to
// 61-bit precision until n=30.
let mut wlo: i32 = 0;
if ipart > 30 {
// For these iterations, we just update the low bits, as the high bits
// can't possibly be affected.
// OD_ATANH_LOG2 has also converged (it actually did so one iteration
// earlier, but that's no reason for an extra special case).
loop {
let mask = -((z < 0) as i64);
wlo += (((w >> i) + mask) ^ mask) as i32;
z -= (ATANH_LOG2[31] + mask) ^ mask;
// Repeat iteration 40.
if i >= 39 {
break;
}
z *= 2;
i += 1;
}
while i < 61 {
let mask = -((z < 0) as i64);
wlo += (((w >> i) + mask) ^ mask) as i32;
z = (z - ((ATANH_LOG2[31] + mask) ^ mask)) * 2;
i += 1;
}
}
w = (w << 1) + (wlo as i64);
} else {
w = 1i64 << 62;
}
if ipart < 62 {
w = ((w >> (61 - ipart)) + 1) >> 1;
}
w
}
// Computes the binary log of w.
// input: a 64-bit integer in Q0 (no fraction).
// output: a 64-bit log in Q57.
// TODO: Mark const once we can use local variables in a const function.
fn blog64(w: i64) -> i64 {
let mut w = w;
if w <= 0 {
return -1;
}
let ipart = w.ilog() as i32 - 1;
if ipart > 61 {
w >>= ipart - 61;
} else {
w <<= 61 - ipart;
}
// z is the fractional part of the log in Q61 format.
let mut z: i64 = 0;
if (w & (w - 1)) != 0 {
// Rust has 128 bit multiplies, so it should be possible to do this
// faster without losing accuracy.
// x and y are the cosh() and sinh(), respectively, in Q61 format.
// We are computing z = 2*atanh(y/x) = 2*atanh((w - 1)/(w + 1)).
let mut x = w + (1i64 << 61);
let mut y = w - (1i64 << 61);
for i in 0..4 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[i as usize] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// Repeat iteration 4.
for i in 3..13 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[i as usize] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// Repeat iteration 13.
for i in 12..32 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[i as usize] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// OD_ATANH_LOG2 has converged.
for i in 32..40 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[31] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
// Repeat iteration 40.
for i in 39..62 {
let mask = -((y < 0) as i64);
z += ((ATANH_LOG2[31] >> i) + mask) ^ mask;
let u = x >> (i + 1);
x -= ((y >> (i + 1)) + mask) ^ mask;
y -= (u + mask) ^ mask;
}
z = (z + 8) >> 4;
}
q57(ipart) + z
}
// Converts a Q57 fixed-point fraction to Q24 by rounding.
const fn q57_to_q24(v: i64) -> i32 {
(((v >> 32) + 1) >> 1) as i32
}
// Converts a Q24 fixed-point fraction to Q57.
const fn q24_to_q57(v: i32) -> i64 {
(v as i64) << 33
}
// Binary exponentiation of a log_scale with 24-bit fractional precision and
// saturation.
// log_scale: A binary logarithm in Q24 format.
// Return: The binary exponential in Q24 format, saturated to 2**47 - 1 if
// log_scale was too large.
fn bexp_q24(log_scale: i32) -> i64 {
if log_scale < 23 << 24 {
let ret = bexp64(((log_scale as i64) << 33) + q57(24));
if ret < (1i64 << 47) - 1 {
return ret;
}
}
(1i64 << 47) - 1
}
#[rustfmt::skip]
const ROUGH_TAN_LOOKUP: &[u16; 18] = &[
0, 358, 722, 1098, 1491, 1910,
2365, 2868, 3437, 4096, 4881, 5850,
7094, 8784, 11254, 15286, 23230, 46817
];
// A digital approximation of a 2nd-order low-pass Bessel follower.
// We use this for rate control because it has fast reaction time, but is
// critically damped.
pub struct IIRBessel2 {
c: [i32; 2],
g: i32,
x: [i32; 2],
y: [i32; 2],
}
// alpha is Q24 in the range [0,0.5).
// The return value is 5.12.
// TODO: Mark const once we can use local variables in a const function.
fn warp_alpha(alpha: i32) -> i32 {
let i = ((alpha * 36) >> 24).min(16);
let t0 = ROUGH_TAN_LOOKUP[i as usize];
let t1 = ROUGH_TAN_LOOKUP[i as usize + 1];
let d = alpha * 36 - (i << 24);
((((t0 as i64) << 32) + (((t1 - t0) << 8) as i64) * (d as i64)) >> 32) as i32
}
// Compute Bessel filter coefficients with the specified delay.
// Return: Filter parameters (c[0], c[1], g).
fn iir_bessel2_get_parameters(delay: i32) -> (i32, i32, i32) {
// This borrows some code from an unreleased version of Postfish.
// See the recipe at http://unicorn.us.com/alex/2polefilters.html for details
// on deriving the filter coefficients.
// alpha is Q24
let alpha = (1 << 24) / delay;
// warp is 7.12 (5.12? the max value is 70386 in Q12).
let warp = warp_alpha(alpha).max(1) as i64;
// k1 is 9.12 (6.12?)
let k1 = 3 * warp;
// k2 is 16.24 (11.24?)
let k2 = k1 * warp;
// d is 16.15 (10.15?)
let d = ((((1 << 12) + k1) << 12) + k2 + 256) >> 9;
// a is 0.32, since d is larger than both 1.0 and k2
let a = (k2 << 23) / d;
// ik2 is 25.24
let ik2 = (1i64 << 48) / k2;
// b1 is Q56; in practice, the integer ranges between -2 and 2.
let b1 = 2 * a * (ik2 - (1i64 << 24));
// b2 is Q56; in practice, the integer ranges between -2 and 2.
let b2 = (1i64 << 56) - ((4 * a) << 24) - b1;
// All of the filter parameters are Q24.
(
((b1 + (1i64 << 31)) >> 32) as i32,
((b2 + (1i64 << 31)) >> 32) as i32,
((a + 128) >> 8) as i32,
)
}
impl IIRBessel2 {
pub fn new(delay: i32, value: i32) -> IIRBessel2 {
let (c0, c1, g) = iir_bessel2_get_parameters(delay);
IIRBessel2 { c: [c0, c1], g, x: [value, value], y: [value, value] }
}
// Re-initialize Bessel filter coefficients with the specified delay.
// This does not alter the x/y state, but changes the reaction time of the
// filter.
// Altering the time constant of a reactive filter without altering internal
// state is something that has to be done carefuly, but our design operates
// at high enough delays and with small enough time constant changes to make
// it safe.
pub fn reinit(&mut self, delay: i32) {
let (c0, c1, g) = iir_bessel2_get_parameters(delay);
self.c[0] = c0;
self.c[1] = c1;
self.g = g;
}
pub fn update(&mut self, x: i32) -> i32 {
let c0 = self.c[0] as i64;
let c1 = self.c[1] as i64;
let g = self.g as i64;
let x0 = self.x[0] as i64;
let x1 = self.x[1] as i64;
let y0 = self.y[0] as i64;
let y1 = self.y[1] as i64;
let ya =
((((x as i64) + x0 * 2 + x1) * g + y0 * c0 + y1 * c1 + (1i64 << 23))
>> 24) as i32;
self.x[1] = self.x[0];
self.x[0] = x;
self.y[1] = self.y[0];
self.y[0] = ya;
ya
}
}
#[derive(Copy, Clone)]
struct RCFrameMetrics {
// The log base 2 of the scale factor for this frame in Q24 format.
log_scale_q24: i32,
// The frame type from pass 1
fti: usize,
// Whether or not the frame was hidden in pass 1
show_frame: bool,
// TODO: The input frame number corresponding to this frame in the input.
// input_frameno: u32
// TODO vfr: PTS
}
impl RCFrameMetrics {
fn new() -> RCFrameMetrics {
RCFrameMetrics { log_scale_q24: 0, fti: 0, show_frame: false }
}
}
pub struct RCState {
// The target bit-rate in bits per second.
target_bitrate: i32,
// The number of TUs over which to distribute the reservoir usage.
// We use TUs because in our leaky bucket model, we only add bits to the
// reservoir on TU boundaries.
reservoir_frame_delay: i32,
// Whether or not the reservoir_frame_delay was explicitly specified by the
// user, or is the default value.
reservoir_frame_delay_is_set: bool,
// The maximum quantizer index to allow (for the luma AC coefficients, other
// quantizers will still be adjusted to match).
maybe_ac_qi_max: Option<u8>,
// The minimum quantizer index to allow (for the luma AC coefficients).
ac_qi_min: u8,
// Will we drop frames to meet bitrate requirements?
drop_frames: bool,
// Do we respect the maximum reservoir fullness?
cap_overflow: bool,
// Can the reservoir go negative?
cap_underflow: bool,
// The log of the first-pass base quantizer.
pass1_log_base_q: i64,
// Two-pass mode state.
// PASS_SINGLE => 1-pass encoding.
// PASS_1 => 1st pass of 2-pass encoding.
// PASS_2 => 2nd pass of 2-pass encoding.
// PASS_2_PLUS_1 => 2nd pass of 2-pass encoding, but also emitting pass 1
// data again.
twopass_state: i32,
// The log of the number of pixels in a frame in Q57 format.
log_npixels: i64,
// The target average bits per Temporal Unit (input frame).
bits_per_tu: i64,
// The current bit reservoir fullness (bits available to be used).
reservoir_fullness: i64,
// The target buffer fullness.
// This is where we'd like to be by the last keyframe that appears in the
// next reservoir_frame_delay frames.
reservoir_target: i64,
// The maximum buffer fullness (total size of the buffer).
reservoir_max: i64,
// The log of estimated scale factor for the rate model in Q57 format.
//
// TODO: Convert to Q23 or figure out a better way to avoid overflow
// once 2-pass mode is introduced, if required.
log_scale: [i64; FRAME_NSUBTYPES],
// The exponent used in the rate model in Q6 format.
exp: [u8; FRAME_NSUBTYPES],
// The log of an estimated scale factor used to obtain the real framerate,
// for VFR sources or, e.g., 12 fps content doubled to 24 fps, etc.
// TODO vfr: log_vfr_scale: i64,
// Second-order lowpass filters to track scale and VFR.
scalefilter: [IIRBessel2; FRAME_NSUBTYPES],
// TODO vfr: vfrfilter: IIRBessel2,
// The number of frames of each type we have seen, for filter adaptation
// purposes.
// These are only 32 bits to guarantee that we can sum the scales over the
// whole file without overflow in a 64-bit int.
// That limits us to 2.268 years at 60 fps (minus 33% with re-ordering).
nframes: [i32; FRAME_NSUBTYPES + 1],
inter_delay: [i32; FRAME_NSUBTYPES - 1],
inter_delay_target: i32,
// The total accumulated estimation bias.
rate_bias: i64,
// The number of (non-Show Existing Frame) frames that have been encoded.
nencoded_frames: i64,
// The number of Show Existing Frames that have been emitted.
nsef_frames: i64,
// Buffer for current frame metrics in pass 1.
pass1_buffer: [u8; TWOPASS_HEADER_SZ],
// Whether or not the user has retrieved the pass 1 data for the last frame.
// For PASS_1 or PASS_2_PLUS_1 encoding, this is set to false after each
// frame is encoded, and must be set to true by calling twopass_out() before
// the next frame can be encoded.
pass1_data_retrieved: bool,
// Marks whether or not the user has retrieved the summary data at the end of
// the encode.
pass1_summary_retrieved: bool,
// Buffer for current frame metrics in pass 2.
pass2_buffer: [u8; TWOPASS_HEADER_SZ],
// Whether or not the user has provided enough data to encode in the second
// pass.
// For PASS_2 or PASS_2_PLUS_1 encoding, this is set to false after each
// frame, and must be set to true by calling twopass_in() before the next
// frame can be encoded.
pass2_data_ready: bool,
// The current byte position in the frame metrics buffer.
pass2_buffer_pos: usize,
// In pass 2, this represents the number of bytes that are available in the
// input buffer.
pass2_buffer_fill: usize,
// TODO: Add a way to force the next frame to be a keyframe in 2-pass mode.
// Right now we are relying on keyframe detection to detect the same
// keyframes.
// The metrics for the previous frame.
prev_metrics: RCFrameMetrics,
// The metrics for the current frame.
cur_metrics: RCFrameMetrics,
// The buffered metrics for future frames.
frame_metrics: Vec<RCFrameMetrics>,
// The total number of frames still in use in the circular metric buffer.
nframe_metrics: usize,
// The index of the current frame in the circular metric buffer.
frame_metrics_head: usize,
// The TU count encoded so far.
ntus: i32,
// The TU count for the whole file.
ntus_total: i32,
// The remaining TU count.
ntus_left: i32,
// The frame count of each frame subtype in the whole file.
nframes_total: [i32; FRAME_NSUBTYPES + 1],
// The sum of those counts.
nframes_total_total: i32,
// The number of frames of each subtype yet to be processed.
nframes_left: [i32; FRAME_NSUBTYPES + 1],
// The sum of the scale values for each frame subtype.
scale_sum: [i64; FRAME_NSUBTYPES],
// The number of TUs represented by the current scale sums.
scale_window_ntus: i32,
// The frame count of each frame subtype in the current scale window.
scale_window_nframes: [i32; FRAME_NSUBTYPES + 1],
// The sum of the scale values for each frame subtype in the current window.
scale_window_sum: [i64; FRAME_NSUBTYPES],
}
// TODO: Separate qi values for each color plane.
pub struct QuantizerParameters {
// The full-precision, unmodulated log quantizer upon which our modulated
// quantizer indices are based.
// This is only used to limit sudden quality changes from frame to frame, and
// as such is not adjusted when we encounter buffer overrun or underrun.
pub log_base_q: i64,
// The full-precision log quantizer modulated by the current frame type upon
// which our quantizer indices are based (including any adjustments to
// prevent buffer overrun or underrun).
// This is used when estimating the scale parameter once we know the actual
// bit usage of a frame.
pub log_target_q: i64,
pub dc_qi: [u8; 3],
pub ac_qi: [u8; 3],
pub lambda: f64,
}
const Q57_SQUARE_EXP_SCALE: f64 =
(2.0 * ::std::f64::consts::LN_2) / ((1i64 << 57) as f64);
// Daala style log-offset for chroma quantizers
fn chroma_offset(log_target_q: i64) -> (i64, i64) {
let x = log_target_q.max(0);
// Gradient 0.266 optimized for CIEDE2000+PSNR on subset3
let y = (x >> 2) + (x >> 6);
// blog64(7) - blog64(4); blog64(5) - blog64(4)
(0x19D_5D9F_D501_0B37 - y, 0xA4_D3C2_5E68_DC58 - y)
}
impl QuantizerParameters {
fn new_from_log_q(
log_base_q: i64, log_target_q: i64, bit_depth: usize,
) -> QuantizerParameters {
let scale = q57(QSCALE + bit_depth as i32 - 8);
let quantizer = bexp64(log_target_q + scale);
let (offset_u, offset_v) = chroma_offset(log_target_q);
let quantizer_u = bexp64(log_target_q + offset_u + scale);
let quantizer_v = bexp64(log_target_q + offset_v + scale);
QuantizerParameters {
log_base_q,
log_target_q,
// TODO: Allow lossless mode; i.e. qi == 0.
dc_qi: [
select_dc_qi(quantizer, bit_depth).max(1),
select_dc_qi(quantizer_u, bit_depth).max(1),
select_dc_qi(quantizer_v, bit_depth).max(1),
],
ac_qi: [
select_ac_qi(quantizer, bit_depth).max(1),
select_ac_qi(quantizer_u, bit_depth).max(1),
select_ac_qi(quantizer_v, bit_depth).max(1),
],
lambda: (::std::f64::consts::LN_2 / 6.0)
* ((log_target_q as f64) * Q57_SQUARE_EXP_SCALE).exp(),
}
}
}
// The parameters that are required by twopass_out().
// We need a reference to the enclosing ContextInner to compute these, but
// twopass_out() cannot take such a reference, since it needs a &mut self
// reference to do its job, and RCState is contained inside ContextInner.
// In practice we don't modify anything in RCState until after we're finished
// reading from ContextInner, but Rust's borrow checker does not have a way to
// express that.
// There's probably a cleaner way to do this, but going with something simple
// for now, since this is not exposed in the public API.
pub(crate) struct TwoPassOutParams {
pass1_log_base_q: i64,
done_processing: bool,
}
impl RCState {
pub fn new(
frame_width: i32, frame_height: i32, framerate_num: i64,
framerate_den: i64, target_bitrate: i32, maybe_ac_qi_max: Option<u8>,
ac_qi_min: u8, max_key_frame_interval: i32,
maybe_reservoir_frame_delay: Option<i32>,
) -> Option<RCState> {
// The default buffer size is set equal to 1.5x the keyframe interval, or 240
// frames; whichsever is smaller.
// For user set values, we enforce a minimum of 12.
// The interval is short enough to allow reaction, but long enough to allow
// looking into the next GOP (avoiding the case where the last frames
// before an I-frame get starved), in most cases.
// The 12 frame minimum gives us some chance to distribute bit estimation
// errors in the worst case.
let reservoir_frame_delay = if maybe_reservoir_frame_delay.is_some() {
maybe_reservoir_frame_delay.unwrap().max(12)
} else {
((max_key_frame_interval.checked_mul(3)?) >> 1).max(240)
};
// TODO: What are the limits on these?
let npixels = (frame_width as i64) * (frame_height as i64);
// Insane framerates or frame sizes mean insane bitrates.
// Let's not get carried away.
// We also subtract 16 bits from each temporal unit to account for the
// temporal delimeter, whose bits are not included in the frame sizes
// reported to update_state().
// TODO: Support constraints imposed by levels.
let bits_per_tu = clamp(
(target_bitrate as i64)
.checked_mul(framerate_den)?
.checked_div(framerate_num)?,
40,
0x4000_0000_0000,
) - (TEMPORAL_DELIMITER.len() * 8) as i64;
let reservoir_max =
bits_per_tu.checked_mul(reservoir_frame_delay as i64)?;
// Start with a buffer fullness and fullness target of 50%.
let reservoir_target = (reservoir_max + 1) >> 1;
// Pick exponents and initial scales for quantizer selection.
let ibpp = npixels / bits_per_tu;
// These have been derived by encoding many clips at every quantizer
// and running a piecewise-linear regression in binary log space.
let (i_exp, i_log_scale) = if ibpp < 1 {
(48u8, blog64(36) - q57(QSCALE))
} else if ibpp < 4 {
(61u8, blog64(55) - q57(QSCALE))
} else {
(77u8, blog64(129) - q57(QSCALE))
};
let (p_exp, p_log_scale) = if ibpp < 2 {
(69u8, blog64(32) - q57(QSCALE))
} else if ibpp < 139 {
(104u8, blog64(84) - q57(QSCALE))
} else {
(83u8, blog64(19) - q57(QSCALE))
};
let (b0_exp, b0_log_scale) = if ibpp < 2 {
(84u8, blog64(30) - q57(QSCALE))
} else if ibpp < 92 {
(120u8, blog64(68) - q57(QSCALE))
} else {
(68u8, blog64(4) - q57(QSCALE))
};
let (b1_exp, b1_log_scale) = if ibpp < 2 {
(87u8, blog64(27) - q57(QSCALE))
} else if ibpp < 126 {
(139u8, blog64(84) - q57(QSCALE))
} else {
(61u8, blog64(1) - q57(QSCALE))
};
// TODO: Add support for "golden" P frames.
Some(RCState {
target_bitrate,
reservoir_frame_delay,
reservoir_frame_delay_is_set: maybe_reservoir_frame_delay.is_some(),
maybe_ac_qi_max,
ac_qi_min,
drop_frames: false,
cap_overflow: true,
cap_underflow: false,
pass1_log_base_q: 0,
twopass_state: PASS_SINGLE,
log_npixels: blog64(npixels),
bits_per_tu,
reservoir_fullness: reservoir_target,
reservoir_target,
reservoir_max,
log_scale: [i_log_scale, p_log_scale, b0_log_scale, b1_log_scale],
exp: [i_exp, p_exp, b0_exp, b1_exp],
scalefilter: [
IIRBessel2::new(4, q57_to_q24(i_log_scale)),
IIRBessel2::new(INTER_DELAY_TARGET_MIN, q57_to_q24(p_log_scale)),
IIRBessel2::new(INTER_DELAY_TARGET_MIN, q57_to_q24(b0_log_scale)),
IIRBessel2::new(INTER_DELAY_TARGET_MIN, q57_to_q24(b1_log_scale)),
],
// TODO VFR
nframes: [0; FRAME_NSUBTYPES + 1],
inter_delay: [INTER_DELAY_TARGET_MIN; FRAME_NSUBTYPES - 1],
inter_delay_target: reservoir_frame_delay >> 1,
rate_bias: 0,
nencoded_frames: 0,
nsef_frames: 0,
pass1_buffer: [0; TWOPASS_HEADER_SZ],
pass1_data_retrieved: false,
pass1_summary_retrieved: false,
pass2_buffer: [0; TWOPASS_HEADER_SZ],
pass2_data_ready: false,
pass2_buffer_pos: 0,
pass2_buffer_fill: 0,
prev_metrics: RCFrameMetrics::new(),
cur_metrics: RCFrameMetrics::new(),
frame_metrics: Vec::new(),
nframe_metrics: 0,
frame_metrics_head: 0,
ntus: 0,
ntus_total: 0,
ntus_left: 0,
nframes_total: [0; FRAME_NSUBTYPES + 1],
nframes_total_total: 0,
nframes_left: [0; FRAME_NSUBTYPES + 1],
scale_sum: [0; FRAME_NSUBTYPES],
scale_window_ntus: 0,
scale_window_nframes: [0; FRAME_NSUBTYPES + 1],
scale_window_sum: [0; FRAME_NSUBTYPES],
})
}
// TODO: Separate quantizers for Cb and Cr.
pub(crate) fn select_qi<T: Pixel>(
&self, ctx: &ContextInner<T>, output_frameno: u64, fti: usize,
maybe_prev_log_base_q: Option<i64>,
) -> QuantizerParameters {
// Is rate control active?
if self.target_bitrate <= 0 {
// Rate control is not active.
// Derive quantizer directly from frame type.
// TODO: Rename "quantizer" something that indicates it is a quantizer
// index, and move it somewhere more sensible (or choose a better way to
// parameterize a "quality" configuration parameter).
let base_qi = ctx.config.quantizer;
let bit_depth = ctx.config.bit_depth;
// We use the AC quantizer as the source quantizer since its quantizer
// tables have unique entries, while the DC tables do not.
let ac_quantizer = ac_q(base_qi as u8, 0, bit_depth) as i64;
// Pick the nearest DC entry since an exact match may be unavailable.
let dc_qi = select_dc_qi(ac_quantizer, bit_depth);
let dc_quantizer = dc_q(dc_qi as u8, 0, bit_depth) as i64;
// Get the log quantizers as Q57.
let log_ac_q = blog64(ac_quantizer) - q57(QSCALE + bit_depth as i32 - 8);
let log_dc_q = blog64(dc_quantizer) - q57(QSCALE + bit_depth as i32 - 8);
// Target the midpoint of the chosen entries.
let log_base_q = (log_ac_q + log_dc_q + 1) >> 1;
// Adjust the quantizer for the frame type, result is Q57:
let log_q = ((log_base_q + (1i64 << 11)) >> 12) * (MQP_Q12[fti] as i64)
+ DQP_Q57[fti];
QuantizerParameters::new_from_log_q(log_base_q, log_q, bit_depth)
} else {
let mut nframes: [i32; FRAME_NSUBTYPES + 1] = [0; FRAME_NSUBTYPES + 1];
let mut log_scale: [i64; FRAME_NSUBTYPES] = self.log_scale;
let mut reservoir_tus = self.reservoir_frame_delay.min(self.ntus_left);
let mut reservoir_frames = 0;
let mut log_cur_scale = (self.scalefilter[fti].y[0] as i64) << 33;
match self.twopass_state {
// First pass of 2-pass mode: use a fixed base quantizer.
PASS_1 => {
// Adjust the quantizer for the frame type, result is Q57:
let log_q = ((self.pass1_log_base_q + (1i64 << 11)) >> 12)
* (MQP_Q12[fti] as i64)
+ DQP_Q57[fti];
return QuantizerParameters::new_from_log_q(
self.pass1_log_base_q,
log_q,
ctx.config.bit_depth,
);
}
// Second pass of 2-pass mode: we know exactly how much of each frame
// type there is in the current buffer window, and have estimates for
// the scales.
PASS_2 | PASS_2_PLUS_1 => {
let mut scale_window_sum: [i64; FRAME_NSUBTYPES] =
self.scale_window_sum;
let mut scale_window_nframes: [i32; FRAME_NSUBTYPES + 1] =
self.scale_window_nframes;
// Intentionally exclude Show Existing Frame frames from this.
for ftj in 0..FRAME_NSUBTYPES {
reservoir_frames += scale_window_nframes[ftj];
}
// If we're approaching the end of the file, add some slack to keep
// us from slamming into a rail.
// Our rate accuracy goes down, but it keeps the result sensible.
// We position the target where the first forced keyframe beyond the
// end of the file would be (for consistency with 1-pass mode).
// TODO: let mut buf_pad = self.reservoir_frame_delay.min(...);
// if buf_delay < buf_pad {
// buf_pad -= buf_delay;
// }
// else ...
// Otherwise, search for the last keyframe in the buffer window and
// target that.
// Currently we only do this when using a finite buffer.
// We could save the position of the last keyframe in the stream in
// the summary data and do it with a whole-file buffer as well, but
// it isn't likely to make a difference.
if !self.frame_metrics.is_empty() {
let mut fm_tail = self.frame_metrics_head + self.nframe_metrics;
if fm_tail >= self.frame_metrics.len() {
fm_tail -= self.frame_metrics.len();
}
let mut fmi = fm_tail;
loop {
if fmi == 0 {
fmi += self.frame_metrics.len();
}
fmi -= 1;
// Stop before we remove the first frame.
if fmi == self.frame_metrics_head {
break;
}
// If we find a keyframe, remove it and everything past it.
if self.frame_metrics[fmi].fti == FRAME_SUBTYPE_I {
while fmi != fm_tail {
let m = &self.frame_metrics[fmi];
let ftj = m.fti;
scale_window_nframes[ftj] -= 1;
if ftj < FRAME_NSUBTYPES {
scale_window_sum[ftj] -= bexp_q24(m.log_scale_q24);
reservoir_frames -= 1;
}
if m.show_frame {
reservoir_tus -= 1;
}
fmi += 1;
if fmi >= self.frame_metrics.len() {
fmi = 0;
}
}
// And stop scanning backwards.
break;
}
}
}
nframes = scale_window_nframes;
// If we're not using the same frame type as in pass 1 (because
// someone changed some encoding parameters), remove that scale
// estimate.
// We'll add a replacement for the correct frame type below.
if self.cur_metrics.fti != fti {
scale_window_nframes[self.cur_metrics.fti] -= 1;
if self.cur_metrics.fti != FRAME_SUBTYPE_SEF {
scale_window_sum[self.cur_metrics.fti] -=
bexp_q24(self.cur_metrics.log_scale_q24);
}
} else {
log_cur_scale = (self.cur_metrics.log_scale_q24 as i64) << 33;
}
// If we're approaching the end of the file, add some slack to keep
// us from slamming into a rail.
// Our rate accuracy goes down, but it keeps the result sensible.
// We position the target where the first forced keyframe beyond the
// end of the file would be (for consistency with 1-pass mode).
if reservoir_tus >= self.ntus_left
&& self.ntus_total as u64
> ctx.gop_input_frameno_start[&output_frameno]
{
let nfinal_gop_tus = self.ntus_total
- (ctx.gop_input_frameno_start[&output_frameno] as i32);
if ctx.config.max_key_frame_interval as i32 > nfinal_gop_tus {
let reservoir_pad = (ctx.config.max_key_frame_interval as i32
- nfinal_gop_tus)
.min(self.reservoir_frame_delay - reservoir_tus);
let (guessed_reservoir_frames, guessed_reservoir_tus) = ctx
.guess_frame_subtypes(
&mut nframes,
reservoir_tus + reservoir_pad,
);
reservoir_frames = guessed_reservoir_frames;
reservoir_tus = guessed_reservoir_tus;
}
}
// Blend in the low-pass filtered scale according to how many
// frames of each type we need to add compared to the actual sums in
// our window.
for ftj in 0..FRAME_NSUBTYPES {
let scale = scale_window_sum[ftj]
+ bexp_q24(self.scalefilter[ftj].y[0])
* (nframes[ftj] - scale_window_nframes[ftj]) as i64;
log_scale[ftj] = if nframes[ftj] > 0 {
blog64(scale) - blog64(nframes[ftj] as i64) - q57(24)
} else {
-self.log_npixels
};
}
}
// Single pass.
_ => {
// Figure out how to re-distribute bits so that we hit our fullness
// target before the last keyframe in our current buffer window
// (after the current frame), or the end of the buffer window,
// whichever comes first.
// Count the various types and classes of frames.
let (guessed_reservoir_frames, guessed_reservoir_tus) =
ctx.guess_frame_subtypes(&mut nframes, self.reservoir_frame_delay);
reservoir_frames = guessed_reservoir_frames;
reservoir_tus = guessed_reservoir_tus;
// TODO: Scale for VFR.
}
}
// If we've been missing our target, add a penalty term.
let rate_bias = (self.rate_bias / (self.nencoded_frames as i64 + 100))
* (reservoir_frames as i64);
// rate_total is the total bits available over the next
// reservoir_tus TUs.
let rate_total = self.reservoir_fullness - self.reservoir_target
+ rate_bias
+ (reservoir_tus as i64) * self.bits_per_tu;
// Find a target quantizer that meets our rate target for the
// specific mix of frame types we'll have over the next
// reservoir_frame frames.
// We model the rate<->quantizer relationship as
// rate = scale*(quantizer**-exp)
// In this case, we have our desired rate, an exponent selected in
// setup, and a scale that's been measured over our frame history,
// so we're solving for the quantizer.
// Exponentiation with arbitrary exponents is expensive, so we work
// in the binary log domain (binary exp and log aren't too bad):
// rate = exp2(log2(scale) - log2(quantizer)*exp)
// There's no easy closed form solution, so we bisection searh for it.
let bit_depth = ctx.config.bit_depth;
// TODO: Proper handling of lossless.
let mut log_qlo = blog64(ac_q(self.ac_qi_min, 0, bit_depth) as i64)
- q57(QSCALE + bit_depth as i32 - 8);
// The AC quantizer tables map to values larger than the DC quantizer
// tables, so we use that as the upper bound to make sure we can use
// the full table if needed.
let mut log_qhi =
blog64(ac_q(self.maybe_ac_qi_max.unwrap_or(255), 0, bit_depth) as i64)
- q57(QSCALE + bit_depth as i32 - 8);
let mut log_base_q = (log_qlo + log_qhi) >> 1;
while log_qlo < log_qhi {
// Count bits contributed by each frame type using the model.
let mut bits = 0i64;
for ftj in 0..FRAME_NSUBTYPES {
// Modulate base quantizer by frame type.
let log_q = ((log_base_q + (1i64 << 11)) >> 12)
* (MQP_Q12[ftj] as i64)
+ DQP_Q57[ftj];
// All the fields here are Q57 except for the exponent, which is
// Q6.
bits += (nframes[ftj] as i64)
* bexp64(
log_scale[ftj] + self.log_npixels
- ((log_q + 32) >> 6) * (self.exp[ftj] as i64),
);
}
// The number of bits for Show Existing Frame frames is constant.
bits += (nframes[FRAME_SUBTYPE_SEF] as i64) * SEF_BITS;
let diff = bits - rate_total;
if diff > 0 {
log_qlo = log_base_q + 1;
} else if diff < 0 {
log_qhi = log_base_q - 1;
} else {
break;
}
log_base_q = (log_qlo + log_qhi) >> 1;
}
// If this was not one of the initial frames, limit the change in
// base quantizer to within [0.8*Q, 1.2*Q] where Q is the previous
// frame's base quantizer.
if let Some(prev_log_base_q) = maybe_prev_log_base_q {
log_base_q = clamp(
log_base_q,
prev_log_base_q - 0xA4_D3C2_5E68_DC58,
prev_log_base_q + 0xA4_D3C2_5E68_DC58,
);
}
// Modulate base quantizer by frame type.
let mut log_q = ((log_base_q + (1i64 << 11)) >> 12)
* (MQP_Q12[fti] as i64)
+ DQP_Q57[fti];
// The above allocation looks only at the total rate we'll accumulate
// in the next reservoir_frame_delay frames.
// However, we could overflow the bit reservoir on the very next
// frame.
// Check for that here if we're not using a soft target.
if self.cap_overflow {
// Allow 3% of the buffer for prediction error.
// This should be plenty, and we don't mind if we go a bit over.
// We only want to keep these bits from being completely wasted.
let margin = (self.reservoir_max + 31) >> 5;
// We want to use at least this many bits next frame.
let soft_limit = self.reservoir_fullness + self.bits_per_tu
- (self.reservoir_max - margin);
if soft_limit > 0 {
let log_soft_limit = blog64(soft_limit);
// If we're predicting we won't use that many bits...
// TODO: When using frame re-ordering, we should include the rate
// for all of the frames in the current TU.
// When there is more than one frame, there will be no direct
// solution for the required adjustment, however.
let log_scale_pixels = log_cur_scale + self.log_npixels;
let exp = self.exp[fti] as i64;
let mut log_q_exp = ((log_q + 32) >> 6) * exp;
if log_scale_pixels - log_q_exp < log_soft_limit {
// Scale the adjustment based on how far into the margin we are.
log_q_exp += ((log_scale_pixels - log_soft_limit - log_q_exp)
>> 32)
* ((margin.min(soft_limit) << 32) / margin);
log_q = ((log_q_exp + (exp >> 1)) / exp) << 6;
}
}
}
// We just checked we don't overflow the reservoir next frame, now
// check we don't underflow and bust the budget (when not using a
// soft target).
if self.maybe_ac_qi_max.is_none() {
// Compute the maximum number of bits we can use in the next frame.
// Allow 50% of the rate for a single frame for prediction error.
// This may not be enough for keyframes or sudden changes in
// complexity.
let log_hard_limit =
blog64(self.reservoir_fullness + (self.bits_per_tu >> 1));
// If we're predicting we'll use more than this...
// TODO: When using frame re-ordering, we should include the rate
// for all of the frames in the current TU.
// When there is more than one frame, there will be no direct
// solution for the required adjustment, however.
let log_scale_pixels = log_cur_scale + self.log_npixels;
let exp = self.exp[fti] as i64;
let mut log_q_exp = ((log_q + 32) >> 6) * exp;
if log_scale_pixels - log_q_exp > log_hard_limit {
// Force the target to hit our limit exactly.
log_q_exp = log_scale_pixels - log_hard_limit;
log_q = ((log_q_exp + (exp >> 1)) / exp) << 6;
// If that target is unreasonable, oh well; we'll have to drop.
}
}
QuantizerParameters::new_from_log_q(log_base_q, log_q, bit_depth)
}
}
pub fn update_state(
&mut self, bits: i64, fti: usize, show_frame: bool, log_target_q: i64,
trial: bool, droppable: bool,
) -> bool {
if trial {
assert!(self.needs_trial_encode(fti));
assert!(bits > 0);
}
let mut dropped = false;
// Update rate control only if rate control is active.
if self.target_bitrate > 0 {
let mut estimated_bits = 0;
let mut bits = bits;
let mut droppable = droppable;
let mut log_scale = q57(-64);
// Drop frames is also disabled for now in the case of infinite-buffer
// two-pass mode.
if !self.drop_frames
|| fti == FRAME_SUBTYPE_SEF
|| (self.twopass_state == PASS_2
|| self.twopass_state == PASS_2_PLUS_1)
&& !self.frame_metrics.is_empty()
{
droppable = false;
}
if fti == FRAME_SUBTYPE_SEF {
debug_assert!(bits == SEF_BITS);
debug_assert!(show_frame);
// Please don't make trial encodes of a SEF.
debug_assert!(!trial);
estimated_bits = SEF_BITS;
self.nsef_frames += 1;
} else {
let log_q_exp = ((log_target_q + 32) >> 6) * (self.exp[fti] as i64);
let prev_log_scale = self.log_scale[fti];
if bits <= 0 {
// We didn't code any blocks in this frame.
bits = 0;
dropped = true;
// TODO: Adjust VFR rate based on drop count.
} else {
// Compute the estimated scale factor for this frame type.
let log_bits = blog64(bits);
log_scale = (log_bits - self.log_npixels + log_q_exp).min(q57(16));
estimated_bits =
bexp64(prev_log_scale + self.log_npixels - log_q_exp);
if !trial {
self.nencoded_frames += 1;
}
}
}
let log_scale_q24 = q57_to_q24(log_scale);
// Special two-pass processing.
if self.twopass_state == PASS_2 || self.twopass_state == PASS_2_PLUS_1 {
// Pass 2 mode:
if !trial {
// Move the current metrics back one frame.
self.prev_metrics = self.cur_metrics;
// Back out the last frame's statistics from the sliding window.
let ftj = self.prev_metrics.fti;
self.nframes_left[ftj] -= 1;
self.scale_window_nframes[ftj] -= 1;
if ftj < FRAME_NSUBTYPES {
self.scale_window_sum[ftj] -=
bexp_q24(self.prev_metrics.log_scale_q24);
}
if self.prev_metrics.show_frame {
self.ntus_left -= 1;
self.scale_window_ntus -= 1;
}
// Free the corresponding entry in the circular buffer.
if !self.frame_metrics.is_empty() {
self.nframe_metrics -= 1;
self.frame_metrics_head += 1;
if self.frame_metrics_head >= self.frame_metrics.len() {
self.frame_metrics_head = 0;
}
}
// Mark us ready for the next 2-pass packet.
self.pass2_data_ready = false;
// Update state, so the user doesn't have to keep calling
// twopass_in() after they've fed in all the data when we're using
// a finite buffer.
self.twopass_in(None).unwrap_or(0);
}
}
if self.twopass_state == PASS_1 || self.twopass_state == PASS_2_PLUS_1 {
// Pass 1 mode: save the metrics for this frame.
self.prev_metrics.log_scale_q24 = log_scale_q24;
self.prev_metrics.fti = fti;
self.prev_metrics.show_frame = show_frame;
self.pass1_data_retrieved = false;
}
// Common to all passes:
if fti != FRAME_SUBTYPE_SEF && bits > 0 {
// If this is the first example of the given frame type we've seen,
// we immediately replace the default scale factor guess with the
// estimate we just computed using the first frame.
if trial || self.nframes[fti] <= 0 {
let f = &mut self.scalefilter[fti];
let x = log_scale_q24;
f.x[0] = x;
f.x[1] = x;
f.y[0] = x;
f.y[1] = x;
self.log_scale[fti] = log_scale;
// TODO: Duplicate regular P frame state for first golden P frame.
} else {
// Lengthen the time constant for the inter filters as we collect
// more frame statistics, until we reach our target.
if fti > 0
&& self.inter_delay[fti - 1] < self.inter_delay_target
&& self.nframes[fti] >= self.inter_delay[fti - 1]
{
self.inter_delay[fti - 1] += 1;
self.scalefilter[fti].reinit(self.inter_delay[fti - 1]);
}
// Update the low-pass scale filter for this frame type regardless
// of whether or not we will ultimately drop this frame.
self.log_scale[fti] =
q24_to_q57(self.scalefilter[fti].update(log_scale_q24));
}
// If this frame busts our budget, it must be dropped.
if droppable && self.reservoir_fullness + self.bits_per_tu < bits {
// TODO: Adjust VFR rate based on drop count.
bits = 0;
dropped = true;
} else {
// TODO: Update a low-pass filter to estimate the "real" frame rate
// taking timestamps and drops into account.
// This is only done if the frame is coded, as it needs the final
// count of dropped frames.
}
}
if !trial {
// Increment the frame count for filter adaptation purposes.
if !trial && self.nframes[fti] < ::std::i32::MAX {
self.nframes[fti] += 1;
}
self.reservoir_fullness -= bits;
if show_frame {
self.reservoir_fullness += self.bits_per_tu;
// TODO: Properly account for temporal delimeter bits.
}
// If we're too quick filling the buffer and overflow is capped, that
// rate is lost forever.
if self.cap_overflow {
self.reservoir_fullness =
self.reservoir_fullness.min(self.reservoir_max);
}
// If we're too quick draining the buffer and underflow is capped,
// don't try to make up that rate later.
if self.cap_underflow {
self.reservoir_fullness = self.reservoir_fullness.max(0);
}
// Adjust the bias for the real bits we've used.
self.rate_bias += estimated_bits - bits;
}
}
dropped
}
pub fn needs_trial_encode(&self, fti: usize) -> bool {
self.target_bitrate > 0 && self.nframes[fti] == 0
}
pub(crate) fn ready(&self) -> bool {
match self.twopass_state {
PASS_SINGLE => true,
PASS_1 => self.pass1_data_retrieved,
PASS_2 => self.pass2_data_ready,
_ => self.pass1_data_retrieved && self.pass2_data_ready,
}
}
fn buffer_val(&mut self, val: i64, bytes: usize, cur_pos: usize) -> usize {
let mut val = val;
let mut bytes = bytes;
let mut cur_pos = cur_pos;
while bytes > 0 {
bytes -= 1;
self.pass1_buffer[cur_pos] = val as u8;
cur_pos += 1;
val >>= 8;
}
cur_pos
}
pub(crate) fn get_twopass_out_params<T: Pixel>(
&self, ctx: &ContextInner<T>, output_frameno: u64,
) -> TwoPassOutParams {
let mut pass1_log_base_q = 0;
let mut done_processing = false;
if !self.pass1_data_retrieved {
if self.twopass_state == PASS_SINGLE {
pass1_log_base_q = self
.select_qi(ctx, output_frameno, FRAME_SUBTYPE_I, None)
.log_base_q;
}
} else {
done_processing = ctx.done_processing();
}
TwoPassOutParams { pass1_log_base_q, done_processing }
}
pub(crate) fn twopass_out(
&mut self, params: TwoPassOutParams,
) -> Option<&[u8]> {
let mut cur_pos = 0;
if !self.pass1_data_retrieved {
if self.twopass_state != PASS_1 && self.twopass_state != PASS_2_PLUS_1 {
// Initialize the first pass.
if self.twopass_state == PASS_SINGLE {
// Pick first-pass qi for scale calculations.
self.pass1_log_base_q = params.pass1_log_base_q;
} else {
debug_assert!(self.twopass_state == PASS_2);
}
self.twopass_state += PASS_1;
// Fill in dummy summary values.
cur_pos = self.buffer_val(TWOPASS_MAGIC as i64, 4, cur_pos);
cur_pos = self.buffer_val(TWOPASS_VERSION as i64, 4, cur_pos);
cur_pos = self.buffer_val(0, TWOPASS_HEADER_SZ - 8, cur_pos);
debug_assert!(cur_pos == TWOPASS_HEADER_SZ);
} else {
let fti = self.prev_metrics.fti;
if fti < FRAME_NSUBTYPES {
self.scale_sum[fti] += bexp_q24(self.prev_metrics.log_scale_q24);
}
if self.prev_metrics.show_frame {
self.ntus += 1;
}
// If we have encoded too many frames, prevent us from reaching the
// ready state required to encode more.
if self.nencoded_frames + self.nsef_frames >= std::i32::MAX as i64 {
None?
}
cur_pos = self.buffer_val(
(self.prev_metrics.show_frame as i64) << 31
| self.prev_metrics.fti as i64,
4,
cur_pos,
);
cur_pos =
self.buffer_val(self.prev_metrics.log_scale_q24 as i64, 4, cur_pos);
debug_assert!(cur_pos == TWOPASS_PACKET_SZ);
}
self.pass1_data_retrieved = true;
} else if params.done_processing && !self.pass1_summary_retrieved {
cur_pos = self.buffer_val(TWOPASS_MAGIC as i64, 4, cur_pos);
cur_pos = self.buffer_val(TWOPASS_VERSION as i64, 4, cur_pos);
cur_pos = self.buffer_val(self.ntus as i64, 4, cur_pos);
for fti in 0..=FRAME_NSUBTYPES {
cur_pos = self.buffer_val(self.nframes[fti] as i64, 4, cur_pos);
}
for fti in 0..FRAME_NSUBTYPES {
cur_pos = self.buffer_val(self.exp[fti] as i64, 1, cur_pos);
}
for fti in 0..FRAME_NSUBTYPES {
cur_pos = self.buffer_val(self.scale_sum[fti], 8, cur_pos);
}
debug_assert!(cur_pos == TWOPASS_HEADER_SZ);
self.pass1_summary_retrieved = true;
} else {
// The data for this frame has already been retrieved.
return None;
}
Some(&self.pass1_buffer[..cur_pos])
}
fn buffer_fill(
&mut self, buf: &[u8], consumed: usize, goal: usize,
) -> usize {
let mut consumed = consumed;
while self.pass2_buffer_fill < goal && consumed < buf.len() {
self.pass2_buffer[self.pass2_buffer_fill] = buf[consumed];
self.pass2_buffer_fill += 1;
consumed += 1;
}
consumed
}
fn unbuffer_val(&mut self, bytes: usize) -> i64 {
let mut bytes = bytes;
let mut ret = 0;
let mut shift = 0;
while bytes > 0 {
bytes -= 1;
ret |= (self.pass2_buffer[self.pass2_buffer_pos] as i64) << shift;
self.pass2_buffer_pos += 1;
shift += 8;
}
ret
}
// Read metrics for the next frame.
fn parse_metrics(&mut self) -> Result<RCFrameMetrics, ()> {
debug_assert!(self.pass2_buffer_fill >= TWOPASS_PACKET_SZ);
let ft_val = self.unbuffer_val(4);
let show_frame = (ft_val >> 31) != 0;
let fti = (ft_val & 0x7FFFFFFF) as usize;
// Make sure the frame type is valid.
if fti > FRAME_NSUBTYPES {
Err(())?;
}
let log_scale_q24 = self.unbuffer_val(4) as i32;
Ok(RCFrameMetrics { log_scale_q24, fti, show_frame })
}
pub(crate) fn twopass_in(
&mut self, maybe_buf: Option<&[u8]>,
) -> Result<usize, ()> {
let mut consumed = 0;
if self.twopass_state == PASS_SINGLE || self.twopass_state == PASS_1 {
// Initialize the second pass.
self.twopass_state += PASS_2;
// If the user requested a finite buffer, reserve the space required for
// it.
if self.reservoir_frame_delay_is_set {
debug_assert!(self.reservoir_frame_delay > 0);
// reservoir_frame_delay counts in TUs, but RCFrameMetrics are stored
// per frame (including Show Existing Frame frames).
// When re-ordering, we will have more frames than TUs.
// How many more?
// That depends on the re-ordering scheme used.
// Doubling the number of TUs and adding a fixed latency equal to the
// maximum number of reference frames we can store should be
// sufficient for any reasonable scheme, and keeps this code from
// depending too closely on the details of the scheme currently used
// by rav1e.
let nmetrics = (self.reservoir_frame_delay as usize) * 2 + 8;
self.frame_metrics.reserve_exact(nmetrics);
self.frame_metrics.resize(nmetrics, RCFrameMetrics::new());
}
}
// If we haven't got a valid summary header yet, try to parse one.
if self.nframes_total[FRAME_SUBTYPE_I] == 0 {
self.pass2_data_ready = false;
if let Some(buf) = maybe_buf {
consumed = self.buffer_fill(buf, consumed, TWOPASS_HEADER_SZ);
if self.pass2_buffer_fill >= TWOPASS_HEADER_SZ {
self.pass2_buffer_pos = 0;
// Read the summary header data.
// check the magic value and version number.
if self.unbuffer_val(4) != TWOPASS_MAGIC as i64
|| self.unbuffer_val(4) != TWOPASS_VERSION as i64
{
Err(())?;
}
let ntus_total = self.unbuffer_val(4) as i32;
// Make sure the file claims to have at least one TU.
// Otherwise we probably got the placeholder data from an aborted
// pass 1.
if ntus_total < 1 {
Err(())?;
}
let mut maybe_nframes_total_total: Option<i32> = Some(0);
let mut nframes_total: [i32; FRAME_NSUBTYPES + 1] =
[0; FRAME_NSUBTYPES + 1];
for fti in 0..=FRAME_NSUBTYPES {
nframes_total[fti] = self.unbuffer_val(4) as i32;
if nframes_total[fti] < 0 {
Err(())?;
}
maybe_nframes_total_total = maybe_nframes_total_total
.and_then(|n| n.checked_add(nframes_total[fti]));
}
if let Some(nframes_total_total) = maybe_nframes_total_total {
// We can't have more TUs than frames.
if ntus_total > nframes_total_total {
Err(())?;
}
let mut exp: [u8; FRAME_NSUBTYPES] = [0; FRAME_NSUBTYPES];
for fti in 0..FRAME_NSUBTYPES {
exp[fti] = self.unbuffer_val(1) as u8;
}
let mut scale_sum: [i64; FRAME_NSUBTYPES] = [0; FRAME_NSUBTYPES];
for fti in 0..FRAME_NSUBTYPES {
scale_sum[fti] = self.unbuffer_val(8);
if scale_sum[fti] < 0 {
Err(())?;
}
}
// Got a valid header.
// Set up pass 2.
self.ntus_total = ntus_total;
self.ntus_left = ntus_total;
self.nframes_total = nframes_total;
self.nframes_left = nframes_total;
self.nframes_total_total = nframes_total_total;
if self.frame_metrics.is_empty() {
self.reservoir_frame_delay = ntus_total;
self.scale_window_nframes = self.nframes_total;
self.scale_window_sum = scale_sum;
self.reservoir_max =
self.bits_per_tu * (self.reservoir_frame_delay as i64);
self.reservoir_target = (self.reservoir_max + 1) >> 1;
self.reservoir_fullness = self.reservoir_target;
} else {
self.reservoir_frame_delay =
self.reservoir_frame_delay.min(ntus_total);
}
self.exp = exp;
// Clear the header data from the buffer to make room for the
// packet data.
self.pass2_buffer_fill = 0;
} else {
// The sum of the frame counts for each type overflowed a 32-bit
// integer.
Err(())?;
}
}
} else {
let frames_needed = if !self.frame_metrics.is_empty() {
// If we're not using whole-file buffering, we need at least one
// frame per buffer slot.
self.reservoir_frame_delay as usize
} else {
// Otherwise we need just one.
1
};
return Ok(TWOPASS_HEADER_SZ + frames_needed * TWOPASS_PACKET_SZ);
}
}
if self.nframes_total[FRAME_SUBTYPE_I] > 0 {
if self.nencoded_frames + self.nsef_frames
>= self.nframes_total_total as i64
{
// We don't want any more data after the last frame, and we don't want
// to allow any more frames to be encoded.
self.pass2_data_ready = false;
} else if !self.pass2_data_ready {
if self.frame_metrics.is_empty() {
// We're using a whole-file buffer.
if let Some(buf) = maybe_buf {
consumed = self.buffer_fill(buf, consumed, TWOPASS_PACKET_SZ);
if self.pass2_buffer_fill >= TWOPASS_PACKET_SZ {
self.pass2_buffer_pos = 0;
// Read metrics for the next frame.
self.cur_metrics = self.parse_metrics()?;
// Clear the buffer for the next frame.
self.pass2_buffer_fill = 0;
self.pass2_data_ready = true;
}
} else {
return Ok(TWOPASS_PACKET_SZ - self.pass2_buffer_fill);
}
} else {
// We're using a finite buffer.
let mut cur_scale_window_nframes = 0;
let mut cur_nframes_left = 0;
for fti in 0..=FRAME_NSUBTYPES {
cur_scale_window_nframes += self.scale_window_nframes[fti];
cur_nframes_left += self.nframes_left[fti];
}
let mut frames_needed = (self.reservoir_frame_delay
- self.scale_window_ntus)
.max(0)
.min(cur_nframes_left - cur_scale_window_nframes);
while frames_needed > 0 {
if let Some(buf) = maybe_buf {
consumed = self.buffer_fill(buf, consumed, TWOPASS_PACKET_SZ);
if self.pass2_buffer_fill >= TWOPASS_PACKET_SZ {
self.pass2_buffer_pos = 0;
// Read the metrics for the next frame.
let m = self.parse_metrics()?;
// Add them to the circular buffer.
if self.nframe_metrics >= self.frame_metrics.len() {
// We read too many frames without finding enough TUs.
Err(())?;
}
let mut fmi = self.frame_metrics_head + self.nframe_metrics;
if fmi >= self.frame_metrics.len() {
fmi -= self.frame_metrics.len();
}
self.nframe_metrics += 1;
self.frame_metrics[fmi] = m;
// And accumulate the statistics over the window.
self.scale_window_nframes[m.fti] += 1;
cur_scale_window_nframes += 1;
if m.fti < FRAME_NSUBTYPES {
self.scale_window_sum[m.fti] += bexp_q24(m.log_scale_q24);
}
if m.show_frame {
self.scale_window_ntus += 1;
}
frames_needed = (self.reservoir_frame_delay
- self.scale_window_ntus)
.max(0)
.min(cur_nframes_left - cur_scale_window_nframes);
// Clear the buffer for the next frame.
self.pass2_buffer_fill = 0;
} else {
// Go back for more data.
break;
}
} else {
return Ok(
TWOPASS_PACKET_SZ * (frames_needed as usize)
- self.pass2_buffer_fill,
);
}
}
// If we've got all the frames we need, fill in the current metrics.
// We're ready to go.
if frames_needed <= 0 {
self.cur_metrics = self.frame_metrics[self.frame_metrics_head];
// Mark us ready for the next frame.
self.pass2_data_ready = true;
}
}
}
}
Ok(consumed)
}
}
#[cfg(test)]
mod test {
use super::{bexp64, blog64};
#[test]
fn blog64_vectors() -> () {
assert!(blog64(1793) == 0x159dc71e24d32daf);
assert!(blog64(0x678dde6e5fd29f05) == 0x7d6373ad151ca685);
}
#[test]
fn bexp64_vectors() -> () {
assert!(bexp64(0x159dc71e24d32daf) == 1793);
assert!((bexp64(0x7d6373ad151ca685) - 0x678dde6e5fd29f05).abs() < 29);
}
#[test]
fn blog64_bexp64_round_trip() {
for a in 1..=std::u16::MAX as i64 {
let b = std::i64::MAX / a;
let (log_a, log_b, log_ab) = (blog64(a), blog64(b), blog64(a * b));
assert!((log_a + log_b - log_ab).abs() < 4);
assert!(bexp64(log_a) == a);
assert!((bexp64(log_b) - b).abs() < 128);
assert!((bexp64(log_ab) - a * b).abs() < 128);
}
}
}
|
// Hound -- A WAV encoding and decoding library in Rust
// Copyright (C) 2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License, version 3,
// as published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::fs;
use std::io;
use std::marker;
use std::path;
use super::{Error, Result, Sample, WavSpec};
// TODO: Can this be unified among Hound and Claxon? Copy + Paste is bad, but
// I refuse to use an external crate just to read into an array of bytes, or
// to read an integer. Such functionality should really be in the standard
// library. Practically _every_ program that does IO will need more high-level
// functionality than what the standard library currently provides.
/// Extends the functionality of `io::Read` with additional methods.
///
/// The methods may be used on any type that implements `io::Read`.
pub trait ReadExt: io::Read {
/// Reads as many bytes as `buf` is long.
///
/// This may issue multiple `read` calls internally. An error is returned
/// if `read` read 0 bytes before the buffer is full.
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()>;
/// Reads `n` bytes and returns them in a vector.
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>>;
/// Reads two bytes and interprets them as a little-endian 16-bit signed integer.
fn read_le_i16(&mut self) -> io::Result<i16>;
/// Reads two bytes and interprets them as a little-endian 16-bit unsigned integer.
fn read_le_u16(&mut self) -> io::Result<u16>;
/// Reads four bytes and interprets them as a little-endian 32-bit unsigned integer.
fn read_le_u32(&mut self) -> io::Result<u32>;
}
impl<R> ReadExt for R where R: io::Read {
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()> {
let mut n = 0;
while n < buf.len() {
let progress = try!(self.read(&mut buf[n ..]));
if progress > 0 {
n += progress;
} else {
return Err(io::Error::new(io::ErrorKind::Other,
"Failed to read enough bytes."));
}
}
Ok(())
}
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>> {
let mut buf = Vec::with_capacity(n);
// TODO: is there a safe alternative that is not crazy like draining
// a repeat(0u8) iterator?
unsafe { buf.set_len(n); }
try!(self.read_into(&mut buf[..]));
Ok(buf)
}
fn read_le_i16(&mut self) -> io::Result<i16> {
self.read_le_u16().map(|x| x as i16)
}
fn read_le_u16(&mut self) -> io::Result<u16> {
let mut buf = [0u8; 2];
try!(self.read_into(&mut buf));
Ok((buf[1] as u16) << 8 | (buf[0] as u16))
}
fn read_le_u32(&mut self) -> io::Result<u32> {
let mut buf = [0u8; 4];
try!(self.read_into(&mut buf));
Ok((buf[3] as u32) << 24 | (buf[2] as u32) << 16 |
(buf[1] as u32) << 8 | (buf[0] as u32) << 0)
}
}
/// The different chunks that a WAVE file can contain.
enum ChunkKind {
Fmt,
Data,
Unknown
}
/// Describes the structure of a chunk in the WAVE file.
struct ChunkHeader {
pub kind: ChunkKind,
pub len: u32
}
/// A reader that reads the WAVE format from the underlying reader.
///
/// A `WavReader` is a streaming reader. It reads data from the underlying
/// reader on demand, and it reads no more than strictly necessary. No internal
/// buffering is performed on the underlying reader.
pub struct WavReader<R> {
/// Specification of the file as found in the fmt chunk.
spec: WavSpec,
/// The number of samples in the data chunk.
///
/// The data chunk is limited to a 4 GiB length because its header has a
/// 32-bit length field. A sample takes at least one byte to store, so the
/// number of samples is always less than 2^32.
num_samples: u32,
/// The number of samples read so far.
samples_read: u32,
/// The reader from which the WAVE format is read.
reader: R
}
/// An iterator that yields samples of type `S` read from a `WavReader`.
pub struct WavSamples<'wr, R, S> where R: 'wr {
reader: &'wr mut WavReader<R>,
phantom_sample: marker::PhantomData<S>
}
impl<R> WavReader<R> where R: io::Read {
/// Reads the RIFF WAVE header, returns the supposed file size.
fn read_wave_header(reader: &mut R) -> Result<u32> {
// Every WAVE file starts with the four bytes 'RIFF' and a file length.
// TODO: the old approach of having a slice on the stack and reading
// into it is more cumbersome, but also avoids a heap allocation. Is
// the compiler smart enough to avoid the heap allocation anyway? I
// would not expect it to be.
if "RIFF".as_bytes() != &try!(reader.read_bytes(4))[..] {
return Err(Error::FormatError("no RIFF tag found"));
}
// TODO: would this be useful anywhere? Probably not, except for
// validating files, but do we need to be so strict?
let file_len = try!(reader.read_le_u32());
// Next four bytes indicate the file type, which should be WAVE.
if "WAVE".as_bytes() != &try!(reader.read_bytes(4))[..] {
// TODO: use custom error type
return Err(Error::FormatError("no WAVE tag found"));
}
Ok(file_len)
}
/// Attempts to read an 8-byte chunk header.
fn read_chunk_header(reader: &mut R) -> Result<ChunkHeader> {
let mut kind_str = [0; 4];
try!(reader.read_into(&mut kind_str));
let len = try!(reader.read_le_u32());
let kind = match &kind_str[..] {
b"fmt " => ChunkKind::Fmt,
b"data" => ChunkKind::Data,
_ => ChunkKind::Unknown
};
Ok(ChunkHeader { kind: kind, len: len })
}
/// Reads the fmt chunk of the file, returns the information it provides.
fn read_fmt_chunk(reader: &mut R, chunk_len: u32) -> Result<WavSpec> {
// A minimum chunk length of at least 16 is assumed. Note: actually,
// the first 14 bytes contain enough information to fully specify the
// file. I have not encountered a file with a 14-byte fmt section
// though. If you ever encounter such file, please contact me.
if chunk_len < 16 {
return Err(Error::FormatError("invalid fmt chunk size"));
}
// Read the WAVEFORMAT struct, as defined at
// https://msdn.microsoft.com/en-us/library/ms713498.aspx.
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// } WAVEFORMAT;
// ```
// The WAVEFORMATEX struct has two more members, as defined at
// https://msdn.microsoft.com/en-us/library/ms713497.aspx
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// WORD wBitsPerSample;
// WORD cbSize;
// } WAVEFORMATEX;
// ```
// It appears that in either case, the minimal length of the fmt
// section is 16 bytes, meaning that it does include the
// `wBitsPerSample` field. (The name is misleading though, because it
// is the number of bits used to store a sample, not all of the bits
// need to be valid for all versions of the WAVE format.)
let format_tag = try!(reader.read_le_u16());
let n_channels = try!(reader.read_le_u16());
let n_samples_per_sec = try!(reader.read_le_u32());
let n_bytes_per_sec = try!(reader.read_le_u32());
let block_align = try!(reader.read_le_u16());
let bits_per_sample = try!(reader.read_le_u16());
// Two of the stored fields are redundant, and may be ignored. We do
// validate them to fail early for ill-formed files.
if (bits_per_sample != block_align / n_channels * 8)
|| (n_bytes_per_sec != block_align as u32 * n_samples_per_sec) {
return Err(Error::FormatError("inconsistent fmt chunk"));
}
if format_tag != 1 {
// TODO: detect the actual tag, and switch to reading WAVEFORMATEX
// or WAVEFORMATEXTENSIBLE if indicated by the tag.
return Err(Error::FormatError("invalid or unsupported format tag"));
}
// We have read 16 bytes so far. If the fmt chunk is longer, then we
// could be dealing with WAVEFORMATEX or WAVEFORMATEXTENSIBLE. This is
// not supported at this point.
if chunk_len > 16 {
panic!("wave format type not implemented yet");
}
let spec = WavSpec {
channels: n_channels,
sample_rate: n_samples_per_sec,
bits_per_sample: bits_per_sample as u32
};
Ok(spec)
}
/// Reads chunks until a data chunk is encountered.
///
/// Returns the information from the fmt chunk and the length of the data
/// chunk in bytes. Afterwards, the reader will be positioned at the first
/// content byte of the data chunk.
fn read_until_data(mut reader: R) -> Result<(WavSpec, u32)> {
let mut spec_opt = None;
loop {
let header = try!(WavReader::read_chunk_header(&mut reader));
match header.kind {
ChunkKind::Fmt => {
let spec = try!(WavReader::read_fmt_chunk(&mut reader,
header.len));
spec_opt = Some(spec);
},
ChunkKind::Data => {
// The "fmt" chunk must precede the "data" chunk. Any
// chunks that come after the data chunk will be ignored.
if let Some(spec) = spec_opt {
return Ok((spec, header.len));
} else {
return Err(Error::FormatError("missing fmt chunk"));
}
},
ChunkKind::Unknown => {
// Ignore the chunk; skip all of its bytes.
// TODO: this could be more efficient by not allocating
// space on the heap, reading into it and then dropping it
// without use. For now, this solution is simplest. If Seek
// is supported we could skip, but that is a stronger bound
// than what is required ...
try!(reader.read_bytes(header.len as usize));
}
}
// If no data chunk is ever encountered, the function will return
// via one of the try! macros that return an Err on end of file.
}
}
/// Attempts to create a reader that reads the WAVE format.
///
/// The header is read immediately. Reading the data will be done on
/// demand.
pub fn new(mut reader: R) -> Result<WavReader<R>> {
try!(WavReader::read_wave_header(&mut reader));
let (spec, data_len) = try!(WavReader::read_until_data(&mut reader));
let num_samples = data_len / (spec.bits_per_sample / 8);
// The number of samples must be a multiple of the number of channels,
// otherwise the last inter-channel sample would not have data for all
// channels.
if num_samples % spec.channels as u32 != 0 {
return Err(Error::FormatError("invalid data chunk length"));
}
let wav_reader = WavReader {
spec: spec,
num_samples: num_samples,
samples_read: 0,
reader: reader
};
Ok(wav_reader)
}
// TODO: Should this return by value instead? A reference is more consistent
// with Claxon, but the type is only 80 bits, barely larger than a pointer.
// Is it worth the extra indirection? On the other hand, the indirection
// is probably optimised away.
/// Returns information about the WAVE file.
pub fn spec(&self) -> &WavSpec {
&self.spec
}
/// Returns an iterator over all samples.
///
/// The channel data is is interleaved. The iterator is streaming. That is,
/// if you call this method once, read a few samples, and call this method
/// again, the second iterator will not start again from the beginning of
/// the file, it will continue where the first iterator stopped.
pub fn samples<'wr, S: Sample>(&'wr mut self) -> WavSamples<'wr, R, S> {
WavSamples {
reader: self,
phantom_sample: marker::PhantomData
}
}
/// Returns the duration of the file in samples.
///
/// The duration is independent of the number of channels. It is expressed
/// in units of samples. The duration in seconds can be obtained by
/// dividing this number by the sample rate. The duration is independent of
/// how many samples have been read already.
pub fn duration(&self) -> u32 {
self.num_samples / self.spec.channels as u32
}
/// Returns the number of values that the sample iterator will yield.
///
/// The length of the file is its duration (in samples) times the number of
/// channels. The length is independent of how many samples have been read
/// already.
pub fn len(&self) -> u32 {
self.num_samples
}
}
impl WavReader<io::BufReader<fs::File>> {
/// Attempts to create a reader that reads from the specified file.
///
/// This is a convenience constructor that opens a `File`, wraps it in a
/// `BufReader` and then constructs a `WavReader` from it.
pub fn open<P: AsRef<path::Path>>(filename: P)
-> Result<WavReader<io::BufReader<fs::File>>> {
let file = try!(fs::File::open(filename));
let buf_reader = io::BufReader::new(file);
WavReader::new(buf_reader)
}
}
impl<'wr, R, S> Iterator for WavSamples<'wr, R, S>
where R: io::Read,
S: Sample {
type Item = Result<S>;
fn next(&mut self) -> Option<Result<S>> {
let reader = &mut self.reader;
if reader.samples_read < reader.num_samples {
reader.samples_read += 1;
let sample = Sample::read(&mut reader.reader,
reader.spec.bits_per_sample);
Some(sample.map_err(Error::from))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let samples_left = self.reader.num_samples - self.reader.samples_read;
(samples_left as usize, Some(samples_left as usize))
}
}
#[test]
fn duration_and_len_agree() {
// TODO: add test samples with more channels.
let files = &["testsamples/waveformat-16bit-44100Hz-mono.wav"];
for fname in files {
let reader = WavReader::open(fname).unwrap();
assert_eq!(reader.spec().channels as u32 * reader.duration(),
reader.len());
}
}
/// Tests reading the most basic wav file, one with only a WAVEFORMAT struct.
#[test]
fn read_wav_waveformat() {
use std::fs;
let file = fs::File::open("testsamples/waveformat-16bit-44100Hz-mono.wav")
.ok().expect("failed to open file");
let buf_reader = io::BufReader::new(file);
let mut wav_reader = WavReader::new(buf_reader)
.ok().expect("failed to read header");
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.ok().unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2, -3, 5, -7]);
}
#[test]
fn read_wav_waveformat_ex() {
// TODO: add a test sample that uses WAVEFORMATEX and verify that it can be
// read properly.
}
#[test]
fn read_wav_waveformat_extensible() {
// TODO: add a test sample that uses WAVEFORMATEXTENSIBLE (as produced by
// Hound itself actually, so this should not be too hard), and verify that
// it can be read properly.
}
use byte literal instead of as_bytes
// Hound -- A WAV encoding and decoding library in Rust
// Copyright (C) 2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License, version 3,
// as published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::fs;
use std::io;
use std::marker;
use std::path;
use super::{Error, Result, Sample, WavSpec};
// TODO: Can this be unified among Hound and Claxon? Copy + Paste is bad, but
// I refuse to use an external crate just to read into an array of bytes, or
// to read an integer. Such functionality should really be in the standard
// library. Practically _every_ program that does IO will need more high-level
// functionality than what the standard library currently provides.
/// Extends the functionality of `io::Read` with additional methods.
///
/// The methods may be used on any type that implements `io::Read`.
pub trait ReadExt: io::Read {
/// Reads as many bytes as `buf` is long.
///
/// This may issue multiple `read` calls internally. An error is returned
/// if `read` read 0 bytes before the buffer is full.
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()>;
/// Reads `n` bytes and returns them in a vector.
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>>;
/// Reads two bytes and interprets them as a little-endian 16-bit signed integer.
fn read_le_i16(&mut self) -> io::Result<i16>;
/// Reads two bytes and interprets them as a little-endian 16-bit unsigned integer.
fn read_le_u16(&mut self) -> io::Result<u16>;
/// Reads four bytes and interprets them as a little-endian 32-bit unsigned integer.
fn read_le_u32(&mut self) -> io::Result<u32>;
}
impl<R> ReadExt for R where R: io::Read {
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()> {
let mut n = 0;
while n < buf.len() {
let progress = try!(self.read(&mut buf[n ..]));
if progress > 0 {
n += progress;
} else {
return Err(io::Error::new(io::ErrorKind::Other,
"Failed to read enough bytes."));
}
}
Ok(())
}
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>> {
let mut buf = Vec::with_capacity(n);
// TODO: is there a safe alternative that is not crazy like draining
// a repeat(0u8) iterator?
unsafe { buf.set_len(n); }
try!(self.read_into(&mut buf[..]));
Ok(buf)
}
fn read_le_i16(&mut self) -> io::Result<i16> {
self.read_le_u16().map(|x| x as i16)
}
fn read_le_u16(&mut self) -> io::Result<u16> {
let mut buf = [0u8; 2];
try!(self.read_into(&mut buf));
Ok((buf[1] as u16) << 8 | (buf[0] as u16))
}
fn read_le_u32(&mut self) -> io::Result<u32> {
let mut buf = [0u8; 4];
try!(self.read_into(&mut buf));
Ok((buf[3] as u32) << 24 | (buf[2] as u32) << 16 |
(buf[1] as u32) << 8 | (buf[0] as u32) << 0)
}
}
/// The different chunks that a WAVE file can contain.
enum ChunkKind {
Fmt,
Data,
Unknown
}
/// Describes the structure of a chunk in the WAVE file.
struct ChunkHeader {
pub kind: ChunkKind,
pub len: u32
}
/// A reader that reads the WAVE format from the underlying reader.
///
/// A `WavReader` is a streaming reader. It reads data from the underlying
/// reader on demand, and it reads no more than strictly necessary. No internal
/// buffering is performed on the underlying reader.
pub struct WavReader<R> {
/// Specification of the file as found in the fmt chunk.
spec: WavSpec,
/// The number of samples in the data chunk.
///
/// The data chunk is limited to a 4 GiB length because its header has a
/// 32-bit length field. A sample takes at least one byte to store, so the
/// number of samples is always less than 2^32.
num_samples: u32,
/// The number of samples read so far.
samples_read: u32,
/// The reader from which the WAVE format is read.
reader: R
}
/// An iterator that yields samples of type `S` read from a `WavReader`.
pub struct WavSamples<'wr, R, S> where R: 'wr {
reader: &'wr mut WavReader<R>,
phantom_sample: marker::PhantomData<S>
}
impl<R> WavReader<R> where R: io::Read {
/// Reads the RIFF WAVE header, returns the supposed file size.
fn read_wave_header(reader: &mut R) -> Result<u32> {
// Every WAVE file starts with the four bytes 'RIFF' and a file length.
// TODO: the old approach of having a slice on the stack and reading
// into it is more cumbersome, but also avoids a heap allocation. Is
// the compiler smart enough to avoid the heap allocation anyway? I
// would not expect it to be.
if b"RIFF" != &try!(reader.read_bytes(4))[..] {
return Err(Error::FormatError("no RIFF tag found"));
}
// TODO: would this be useful anywhere? Probably not, except for
// validating files, but do we need to be so strict?
let file_len = try!(reader.read_le_u32());
// Next four bytes indicate the file type, which should be WAVE.
if b"WAVE" != &try!(reader.read_bytes(4))[..] {
// TODO: use custom error type
return Err(Error::FormatError("no WAVE tag found"));
}
Ok(file_len)
}
/// Attempts to read an 8-byte chunk header.
fn read_chunk_header(reader: &mut R) -> Result<ChunkHeader> {
let mut kind_str = [0; 4];
try!(reader.read_into(&mut kind_str));
let len = try!(reader.read_le_u32());
let kind = match &kind_str[..] {
b"fmt " => ChunkKind::Fmt,
b"data" => ChunkKind::Data,
_ => ChunkKind::Unknown
};
Ok(ChunkHeader { kind: kind, len: len })
}
/// Reads the fmt chunk of the file, returns the information it provides.
fn read_fmt_chunk(reader: &mut R, chunk_len: u32) -> Result<WavSpec> {
// A minimum chunk length of at least 16 is assumed. Note: actually,
// the first 14 bytes contain enough information to fully specify the
// file. I have not encountered a file with a 14-byte fmt section
// though. If you ever encounter such file, please contact me.
if chunk_len < 16 {
return Err(Error::FormatError("invalid fmt chunk size"));
}
// Read the WAVEFORMAT struct, as defined at
// https://msdn.microsoft.com/en-us/library/ms713498.aspx.
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// } WAVEFORMAT;
// ```
// The WAVEFORMATEX struct has two more members, as defined at
// https://msdn.microsoft.com/en-us/library/ms713497.aspx
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// WORD wBitsPerSample;
// WORD cbSize;
// } WAVEFORMATEX;
// ```
// It appears that in either case, the minimal length of the fmt
// section is 16 bytes, meaning that it does include the
// `wBitsPerSample` field. (The name is misleading though, because it
// is the number of bits used to store a sample, not all of the bits
// need to be valid for all versions of the WAVE format.)
let format_tag = try!(reader.read_le_u16());
let n_channels = try!(reader.read_le_u16());
let n_samples_per_sec = try!(reader.read_le_u32());
let n_bytes_per_sec = try!(reader.read_le_u32());
let block_align = try!(reader.read_le_u16());
let bits_per_sample = try!(reader.read_le_u16());
// Two of the stored fields are redundant, and may be ignored. We do
// validate them to fail early for ill-formed files.
if (bits_per_sample != block_align / n_channels * 8)
|| (n_bytes_per_sec != block_align as u32 * n_samples_per_sec) {
return Err(Error::FormatError("inconsistent fmt chunk"));
}
if format_tag != 1 {
// TODO: detect the actual tag, and switch to reading WAVEFORMATEX
// or WAVEFORMATEXTENSIBLE if indicated by the tag.
return Err(Error::FormatError("invalid or unsupported format tag"));
}
// We have read 16 bytes so far. If the fmt chunk is longer, then we
// could be dealing with WAVEFORMATEX or WAVEFORMATEXTENSIBLE. This is
// not supported at this point.
if chunk_len > 16 {
panic!("wave format type not implemented yet");
}
let spec = WavSpec {
channels: n_channels,
sample_rate: n_samples_per_sec,
bits_per_sample: bits_per_sample as u32
};
Ok(spec)
}
/// Reads chunks until a data chunk is encountered.
///
/// Returns the information from the fmt chunk and the length of the data
/// chunk in bytes. Afterwards, the reader will be positioned at the first
/// content byte of the data chunk.
fn read_until_data(mut reader: R) -> Result<(WavSpec, u32)> {
let mut spec_opt = None;
loop {
let header = try!(WavReader::read_chunk_header(&mut reader));
match header.kind {
ChunkKind::Fmt => {
let spec = try!(WavReader::read_fmt_chunk(&mut reader,
header.len));
spec_opt = Some(spec);
},
ChunkKind::Data => {
// The "fmt" chunk must precede the "data" chunk. Any
// chunks that come after the data chunk will be ignored.
if let Some(spec) = spec_opt {
return Ok((spec, header.len));
} else {
return Err(Error::FormatError("missing fmt chunk"));
}
},
ChunkKind::Unknown => {
// Ignore the chunk; skip all of its bytes.
// TODO: this could be more efficient by not allocating
// space on the heap, reading into it and then dropping it
// without use. For now, this solution is simplest. If Seek
// is supported we could skip, but that is a stronger bound
// than what is required ...
try!(reader.read_bytes(header.len as usize));
}
}
// If no data chunk is ever encountered, the function will return
// via one of the try! macros that return an Err on end of file.
}
}
/// Attempts to create a reader that reads the WAVE format.
///
/// The header is read immediately. Reading the data will be done on
/// demand.
pub fn new(mut reader: R) -> Result<WavReader<R>> {
try!(WavReader::read_wave_header(&mut reader));
let (spec, data_len) = try!(WavReader::read_until_data(&mut reader));
let num_samples = data_len / (spec.bits_per_sample / 8);
// The number of samples must be a multiple of the number of channels,
// otherwise the last inter-channel sample would not have data for all
// channels.
if num_samples % spec.channels as u32 != 0 {
return Err(Error::FormatError("invalid data chunk length"));
}
let wav_reader = WavReader {
spec: spec,
num_samples: num_samples,
samples_read: 0,
reader: reader
};
Ok(wav_reader)
}
// TODO: Should this return by value instead? A reference is more consistent
// with Claxon, but the type is only 80 bits, barely larger than a pointer.
// Is it worth the extra indirection? On the other hand, the indirection
// is probably optimised away.
/// Returns information about the WAVE file.
pub fn spec(&self) -> &WavSpec {
&self.spec
}
/// Returns an iterator over all samples.
///
/// The channel data is is interleaved. The iterator is streaming. That is,
/// if you call this method once, read a few samples, and call this method
/// again, the second iterator will not start again from the beginning of
/// the file, it will continue where the first iterator stopped.
pub fn samples<'wr, S: Sample>(&'wr mut self) -> WavSamples<'wr, R, S> {
WavSamples {
reader: self,
phantom_sample: marker::PhantomData
}
}
/// Returns the duration of the file in samples.
///
/// The duration is independent of the number of channels. It is expressed
/// in units of samples. The duration in seconds can be obtained by
/// dividing this number by the sample rate. The duration is independent of
/// how many samples have been read already.
pub fn duration(&self) -> u32 {
self.num_samples / self.spec.channels as u32
}
/// Returns the number of values that the sample iterator will yield.
///
/// The length of the file is its duration (in samples) times the number of
/// channels. The length is independent of how many samples have been read
/// already.
pub fn len(&self) -> u32 {
self.num_samples
}
}
impl WavReader<io::BufReader<fs::File>> {
/// Attempts to create a reader that reads from the specified file.
///
/// This is a convenience constructor that opens a `File`, wraps it in a
/// `BufReader` and then constructs a `WavReader` from it.
pub fn open<P: AsRef<path::Path>>(filename: P)
-> Result<WavReader<io::BufReader<fs::File>>> {
let file = try!(fs::File::open(filename));
let buf_reader = io::BufReader::new(file);
WavReader::new(buf_reader)
}
}
impl<'wr, R, S> Iterator for WavSamples<'wr, R, S>
where R: io::Read,
S: Sample {
type Item = Result<S>;
fn next(&mut self) -> Option<Result<S>> {
let reader = &mut self.reader;
if reader.samples_read < reader.num_samples {
reader.samples_read += 1;
let sample = Sample::read(&mut reader.reader,
reader.spec.bits_per_sample);
Some(sample.map_err(Error::from))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let samples_left = self.reader.num_samples - self.reader.samples_read;
(samples_left as usize, Some(samples_left as usize))
}
}
#[test]
fn duration_and_len_agree() {
// TODO: add test samples with more channels.
let files = &["testsamples/waveformat-16bit-44100Hz-mono.wav"];
for fname in files {
let reader = WavReader::open(fname).unwrap();
assert_eq!(reader.spec().channels as u32 * reader.duration(),
reader.len());
}
}
/// Tests reading the most basic wav file, one with only a WAVEFORMAT struct.
#[test]
fn read_wav_waveformat() {
use std::fs;
let file = fs::File::open("testsamples/waveformat-16bit-44100Hz-mono.wav")
.ok().expect("failed to open file");
let buf_reader = io::BufReader::new(file);
let mut wav_reader = WavReader::new(buf_reader)
.ok().expect("failed to read header");
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.ok().unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2, -3, 5, -7]);
}
#[test]
fn read_wav_waveformat_ex() {
// TODO: add a test sample that uses WAVEFORMATEX and verify that it can be
// read properly.
}
#[test]
fn read_wav_waveformat_extensible() {
// TODO: add a test sample that uses WAVEFORMATEXTENSIBLE (as produced by
// Hound itself actually, so this should not be too hard), and verify that
// it can be read properly.
}
|
// Hound -- A wav encoding and decoding library in Rust
// Copyright (C) 2015 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::fs;
use std::io;
use std::marker;
use std::mem;
use std::path;
use super::{Error, Result, Sample, SampleFormat, WavSpec};
/// Extends the functionality of `io::Read` with additional methods.
///
/// The methods may be used on any type that implements `io::Read`.
pub trait ReadExt: io::Read {
/// Reads as many bytes as `buf` is long.
///
/// This may issue multiple `read` calls internally. An error is returned
/// if `read` read 0 bytes before the buffer is full.
// TODO: There is an RFC proposing a method like this for the standard library.
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()>;
/// Reads `n` bytes and returns them in a vector.
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>>;
/// Skip over `n` bytes.
fn skip_bytes(&mut self, n: usize) -> io::Result<()>;
/// Reads a single byte and interprets it as an 8-bit signed integer.
fn read_i8(&mut self) -> io::Result<i8>;
/// Reads a single byte and interprets it as an 8-bit unsigned integer.
fn read_u8(&mut self) -> io::Result<u8>;
/// Reads two bytes and interprets them as a little-endian 16-bit signed integer.
fn read_le_i16(&mut self) -> io::Result<i16>;
/// Reads two bytes and interprets them as a little-endian 16-bit unsigned integer.
fn read_le_u16(&mut self) -> io::Result<u16>;
/// Reads three bytes and interprets them as a little-endian 24-bit signed integer.
///
/// The sign bit will be extended into the most significant byte.
fn read_le_i24(&mut self) -> io::Result<i32>;
/// Reads three bytes and interprets them as a little-endian 24-bit unsigned integer.
///
/// The most significant byte will be 0.
fn read_le_u24(&mut self) -> io::Result<u32>;
/// Reads four bytes and interprets them as a little-endian 32-bit signed integer.
fn read_le_i32(&mut self) -> io::Result<i32>;
/// Reads four bytes and interprets them as a little-endian 32-bit unsigned integer.
fn read_le_u32(&mut self) -> io::Result<u32>;
/// Reads four bytes and interprets them as a little-endian 32-bit IEEE float.
fn read_le_f32(&mut self) -> io::Result<f32>;
}
impl<R> ReadExt for R
where R: io::Read
{
#[inline(always)]
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()> {
let mut n = 0;
while n < buf.len() {
let progress = try!(self.read(&mut buf[n..]));
if progress > 0 {
n += progress;
} else {
return Err(io::Error::new(io::ErrorKind::Other, "Failed to read enough bytes."));
}
}
Ok(())
}
#[inline(always)]
fn skip_bytes(&mut self, n: usize) -> io::Result<()> {
// Read from the input in chunks of 1024 bytes at a time, and discard
// the result. 1024 is a tradeoff between doing a lot of calls, and
// using too much stack space. This method is not in a hot path, so it
// can afford to do this.
let mut n_read = 0;
let mut buf = [0u8; 1024];
while n_read < n {
let end = cmp::min(n - n_read, 1024);
let progress = try!(self.read(&mut buf[0..end]));
if progress > 0 {
n_read += progress;
} else {
return Err(io::Error::new(io::ErrorKind::Other, "Failed to read enough bytes."));
}
}
Ok(())
}
#[inline(always)]
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>> {
// We allocate a runtime fixed size buffer, and we are going to read
// into it, so zeroing or filling the buffer is a waste. This method
// is safe, because the contents of the buffer are only exposed when
// they have been overwritten completely by the read.
let mut buf = Vec::with_capacity(n);
unsafe { buf.set_len(n); }
try!(self.read_into(&mut buf[..]));
Ok(buf)
}
#[inline(always)]
fn read_i8(&mut self) -> io::Result<i8> {
self.read_u8().map(|x| x as i8)
}
#[inline(always)]
fn read_u8(&mut self) -> io::Result<u8> {
let mut buf = [0u8; 1];
try!(self.read_into(&mut buf));
Ok(buf[0])
}
#[inline(always)]
fn read_le_i16(&mut self) -> io::Result<i16> {
self.read_le_u16().map(|x| x as i16)
}
#[inline(always)]
fn read_le_u16(&mut self) -> io::Result<u16> {
let mut buf = [0u8; 2];
try!(self.read_into(&mut buf));
Ok((buf[1] as u16) << 8 | (buf[0] as u16))
}
#[inline(always)]
fn read_le_i24(&mut self) -> io::Result<i32> {
self.read_le_u24().map(|x|
// Test the sign bit, if it is set, extend the sign bit into the
// most significant byte.
if x & (1 << 23) == 0 {
x as i32
} else {
(x | 0xff_00_00_00) as i32
}
)
}
#[inline(always)]
fn read_le_u24(&mut self) -> io::Result<u32> {
let mut buf = [0u8; 3];
try!(self.read_into(&mut buf));
Ok((buf[2] as u32) << 16 | (buf[1] as u32) << 8 | (buf[0] as u32))
}
#[inline(always)]
fn read_le_i32(&mut self) -> io::Result<i32> {
self.read_le_u32().map(|x| x as i32)
}
#[inline(always)]
fn read_le_u32(&mut self) -> io::Result<u32> {
let mut buf = [0u8; 4];
try!(self.read_into(&mut buf));
Ok((buf[3] as u32) << 24 | (buf[2] as u32) << 16 |
(buf[1] as u32) << 8 | (buf[0] as u32) << 0)
}
#[inline(always)]
fn read_le_f32(&mut self) -> io::Result<f32> {
self.read_le_u32().map(|u| unsafe { mem::transmute(u) })
}
}
/// A reader for safe Unknown chunks access.
///
/// This reader borrow the underlying low-level reader from
/// the ChunkReader, and enforce chunk boundaries.
pub struct EmbeddedReader<'r, R: 'r + io::Read> {
/// low-level reader
reader: &'r mut R,
/// how long the chunk is, in bytes
pub len: i64,
/// how much bytes remains to be read
pub remaining: i64,
}
/// On drop, the EmbeddedReader will skip the remaining chunk bytes
/// to reposition the underlying reader to the next chunk.
impl<'r, R: 'r + io::Read> Drop for EmbeddedReader<'r, R> {
fn drop(&mut self) {
let _ = self.reader.skip_bytes((self.remaining + self.len % 2) as usize);
}
}
impl<'r, R: io::Read> io::Read for EmbeddedReader<'r, R> {
fn read(&mut self, buffer: &mut[u8]) -> io::Result<usize> {
let max = buffer.len().min(self.remaining as usize);
let read = try!(self.reader.read(&mut buffer[0..max]));
self.remaining -= read as i64;
Ok(read)
}
}
impl<'r, R: io::Read + io::Seek> io::Seek for EmbeddedReader<'r, R> {
fn seek(&mut self, seek: io::SeekFrom) -> io::Result<u64> {
if let io::SeekFrom::Current(offset) = seek {
let current_in_chunk = self.len as i64 - self.remaining as i64;
let wanted_in_chunk = current_in_chunk as i64 + offset;
if wanted_in_chunk < 0 {
Err(io::Error::new(io::ErrorKind::Other, "Seeking befoer begin of chunk"))
} else {
self.reader.seek(io::SeekFrom::Current(offset))
}
} else {
Err(io::Error::new(io::ErrorKind::Other, "Only relative seek is supported."))
}
}
}
/// A chunk in a Riff Wave file.
pub enum Chunk<'r, R: 'r + io::Read> {
/// format chunk, fully parsed into a WavSpecEx
Fmt(WavSpecEx),
/// fact chunk, used by non-pcm encoding but redundant
Fact,
/// data chunk, where the samples are actually stored
Data,
/// any other riff chunk
Unknown([u8; 4], EmbeddedReader<'r, R>),
}
/// A Riff chunk Wave reader, giving access to all chunks in the file.
///
/// For simple wave file decoding, prefer the `WavReader` facade.
/// ChunksReader should only be use when one need to access chunks
/// not specified by the Wave format.
pub struct ChunksReader<R: io::Read> {
/// the underlying reader
reader: R,
/// the Wave format specification, if it has been read already
pub spec_ex: Option<WavSpecEx>,
/// when inside the main data state, keeps track of decoding and chunk
/// boundaries
pub data_state: Option<DataReadingState>,
}
/// This struct helps represent the inner state of the ChunksReader
/// during data chunk parsing.
#[derive(Copy, Clone)]
pub struct DataReadingState {
/// the format specification for the file
pub spec_ex: WavSpecEx,
/// total length of the data chunk, in bytes
pub len: i64,
/// number of remaining bytes to be read in the data chunk
pub remaining: i64,
}
impl<R: io::Read> ChunksReader<R> {
/// Builds a ChunksReader from a std Reader.
///
/// This function will only read the Riff header from the file
/// in order to position the stream to the first chunk.
pub fn new(mut reader: R) -> Result<ChunksReader<R>> {
try!(read_wave_header(&mut reader));
Ok(ChunksReader {
reader: reader,
spec_ex: None,
data_state: None,
})
}
/// Returns an iterator over all samples.
///
/// The channel data is is interleaved. The iterator is streaming. That is,
/// if you call this method once, read a few samples, and call this method
/// again, the second iterator will not start again from the beginning of
/// the file, it will continue where the first iterator stopped.
///
/// The type `S` must have at least `spec().bits_per_sample` bits,
/// otherwise every iteration will return an error. All bit depths up to
/// 32 bits per sample can be decoded into an `i32`, but if you know
/// beforehand that you will be reading a file with 16 bits per sample, you
/// can save memory by decoding into an `i16`.
///
/// The type of `S` (int or float) must match `spec().sample_format`,
/// otherwise every iteration will return an error.
///
/// This function will panic if it is called while the reader is not in
/// the data chunk, or if the format has not been parsed.
pub fn samples<'wr, S: Sample>(&'wr mut self) -> WavSamples<'wr, R, S> {
let _data_state = self.data_state.expect("Not in the data chunk.");
WavSamples {
reader: self,
phantom_sample: marker::PhantomData,
}
}
/// Same as `samples`, but takes ownership of the `WavReader`.
///
/// See `samples()` for more info.
pub fn into_samples<S: Sample>(self) -> WavIntoSamples<R, S> {
let _data_state = self.data_state.expect("Not in the data chunk.");
WavIntoSamples {
reader: self,
phantom_sample: marker::PhantomData,
}
}
/// Returns the duration of the file in samples.
///
/// The duration is independent of the number of channels. It is expressed
/// in units of samples. The duration in seconds can be obtained by
/// dividing this number by the sample rate. The duration is independent of
/// how many samples have been read already.
///
/// This function will panic if it is called while the reader is not in
/// the data chunk, or if the format has not been parsed.
pub fn duration(&self) -> u32 {
let data = self.data_state.expect("Not in the data chunk.");
self.len() / data.spec_ex.spec.channels as u32
}
/// Returns the number of values that the sample iterator will yield.
///
/// The length of the file is its duration (in samples) times the number of
/// channels. The length is independent of how many samples have been read
/// already. To get the number of samples left, use `len()` on the
/// `samples()` iterator.
///
/// This function will panic if it is called while the reader is not in
/// the data chunk, or if the format has not been parsed.
pub fn len(&self) -> u32 {
let data = self.data_state.expect("Not in the data chunk.");
data.len as u32 / data.spec_ex.bytes_per_sample as u32
}
/// Parse the next chunk from the reader.
///
/// Returns None at end of file, or a `Chunk` instance depending
/// on the chunk kind.
///
/// For fmt and fact kinds, the function will actually parse the
/// chunk, returns it, and update `spec_ex`.
///
/// For Data, the underlying reader will be left at the beginning
/// of the first sample, and `data_state` will be created to allow
/// keep track of the audio samples parsing.
pub fn next(&mut self) -> Result<Option<Chunk<R>>> {
if let Some(data) = self.data_state {
try!(self.reader.skip_bytes(data.remaining as usize));
self.data_state = None
}
let mut kind_str = [0; 4];
if let Err(_) = self.reader.read_into(&mut kind_str) {
// assumes EOF
return Ok(None);
}
let len = try!(self.reader.read_le_u32());
match &kind_str {
b"fmt " => {
let spec_ex = try!(self.read_fmt_chunk(len));
self.spec_ex = Some(spec_ex);
return Ok(Some(Chunk::Fmt(spec_ex)))
}
b"fact" => {
// All (compressed) non-PCM formats must have a fact chunk
// (Rev. 3 documentation). The chunk contains at least one
// value, the number of samples in the file.
//
// The number of samples field is redundant for sampled
// data, since the Data chunk indicates the length of the
// data. The number of samples can be determined from the
// length of the data and the container size as determined
// from the Format chunk.
// http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/wave.html
let _samples_per_channel = self.reader.read_le_u32();
return Ok(Some(Chunk::Fact))
}
b"data" => {
if let Some(spec_ex) = self.spec_ex {
self.data_state = Some(DataReadingState {
spec_ex: spec_ex,
len: len as i64,
remaining: len as i64,
});
return Ok(Some(Chunk::Data));
} else {
return Err(Error::FormatError("missing fmt chunk"))
}
}
_ => {
let reader = EmbeddedReader {
reader: &mut self.reader,
len: len as i64,
remaining: len as i64,
};
return Ok(Some(Chunk::Unknown(kind_str, reader)));
}
}
// If no data chunk is ever encountered, the function will return
// via one of the try! macros that return an Err on end of file.
}
/// Reads chunks until a data chunk is encountered.
///
/// Returns true if a data chunk has been found. Afterwards, the reader
/// will be positioned at the first content byte of the data chunk.
pub fn read_until_data(&mut self) -> Result<bool> {
while let Some(chunk) = try!(self.next()) {
if let Chunk::Data = chunk {
return Ok(true)
}
}
Ok(false)
}
/// Reads the fmt chunk of the file, returns the information it provides.
fn read_fmt_chunk(&mut self, chunk_len: u32) -> Result<WavSpecEx> {
// A minimum chunk length of at least 16 is assumed. Note: actually,
// the first 14 bytes contain enough information to fully specify the
// file. I have not encountered a file with a 14-byte fmt section
// though. If you ever encounter such file, please contact me.
if chunk_len < 16 {
return Err(Error::FormatError("invalid fmt chunk size"));
}
// Read the WAVEFORMAT struct, as defined at
// https://msdn.microsoft.com/en-us/library/ms713498.aspx.
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// } WAVEFORMAT;
// ```
// The WAVEFORMATEX struct has two more members, as defined at
// https://msdn.microsoft.com/en-us/library/ms713497.aspx
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// WORD wBitsPerSample;
// WORD cbSize;
// } WAVEFORMATEX;
// ```
// There is also PCMWAVEFORMAT as defined at
// https://msdn.microsoft.com/en-us/library/dd743663.aspx.
// ```
// typedef struct {
// WAVEFORMAT wf;
// WORD wBitsPerSample;
// } PCMWAVEFORMAT;
// ```
// In either case, the minimal length of the fmt section is 16 bytes,
// meaning that it does include the `wBitsPerSample` field. (The name
// is misleading though, because it is the number of bits used to store
// a sample, not all of the bits need to be valid for all versions of
// the WAVE format.)
let format_tag = try!(self.reader.read_le_u16());
let n_channels = try!(self.reader.read_le_u16());
let n_samples_per_sec = try!(self.reader.read_le_u32());
let n_bytes_per_sec = try!(self.reader.read_le_u32());
let block_align = try!(self.reader.read_le_u16());
let bits_per_sample = try!(self.reader.read_le_u16());
if n_channels == 0 {
return Err(Error::FormatError("file contains zero channels"));
}
// Two of the stored fields are redundant, and may be ignored. We do
// validate them to fail early for ill-formed files.
if (Some(bits_per_sample) != (block_align / n_channels).checked_mul(8)) ||
(Some(n_bytes_per_sec) != (block_align as u32).checked_mul(n_samples_per_sec)) {
return Err(Error::FormatError("inconsistent fmt chunk"));
}
// The bits per sample for a WAVEFORMAT struct is the number of bits
// used to store a sample. Therefore, it must be a multiple of 8.
if bits_per_sample % 8 != 0 {
return Err(Error::FormatError("bits per sample is not a multiple of 8"));
}
if bits_per_sample == 0 {
return Err(Error::FormatError("bits per sample is 0"));
}
let spec = WavSpec {
channels: n_channels,
sample_rate: n_samples_per_sec,
bits_per_sample: bits_per_sample,
sample_format: SampleFormat::Int,
};
// The different format tag definitions can be found in mmreg.h that is
// part of the Windows SDK. The vast majority are esoteric vendor-
// specific formats. We handle only a few. The following values could
// be of interest:
const PCM: u16 = 0x0001;
const ADPCM: u16 = 0x0002;
const IEEE_FLOAT: u16 = 0x0003;
const EXTENSIBLE: u16 = 0xfffe;
match format_tag {
PCM => self.read_wave_format_pcm(chunk_len, spec),
ADPCM => Err(Error::Unsupported),
IEEE_FLOAT => self.read_wave_format_ieee_float(chunk_len, spec),
EXTENSIBLE => self.read_wave_format_extensible(chunk_len, spec),
_ => Err(Error::Unsupported),
}
}
fn read_wave_format_pcm(&mut self, chunk_len: u32, spec: WavSpec) -> Result<WavSpecEx> {
// When there is a PCMWAVEFORMAT struct, the chunk is 16 bytes long.
// The WAVEFORMATEX structs includes two extra bytes, `cbSize`.
let is_wave_format_ex = match chunk_len {
16 => false,
18 => true,
// Other sizes are unexpected, but such files do occur in the wild,
// and reading these files is still possible, so we allow this.
40 => true,
_ => return Err(Error::FormatError("unexpected fmt chunk size")),
};
if is_wave_format_ex {
// `cbSize` can be used for non-PCM formats to specify the size of
// additional data. However, for WAVE_FORMAT_PCM, the member should
// be ignored, see https://msdn.microsoft.com/en-us/library/ms713497.aspx.
// Nonzero values do in fact occur in practice.
let _cb_size = try!(self.reader.read_le_u16());
// For WAVE_FORMAT_PCM in WAVEFORMATEX, only 8 or 16 bits per
// sample are valid according to
// https://msdn.microsoft.com/en-us/library/ms713497.aspx.
// 24 bits per sample is explicitly not valid inside a WAVEFORMATEX
// structure, but such files do occur in the wild nonetheless, and
// there is no good reason why we couldn't read them.
match spec.bits_per_sample {
8 => {}
16 => {}
24 => {}
_ => return Err(Error::FormatError("bits per sample is not 8 or 16")),
}
}
// If the chunk len was longer than expected, ignore the additional bytes.
if chunk_len == 40 {
try!(self.reader.skip_bytes(22));
}
let spec_ex = WavSpecEx {
spec: spec,
bytes_per_sample: spec.bits_per_sample / 8,
};
Ok(spec_ex)
}
fn read_wave_format_ieee_float(&mut self, chunk_len: u32, spec: WavSpec)
-> Result<WavSpecEx> {
// When there is a PCMWAVEFORMAT struct, the chunk is 16 bytes long.
// The WAVEFORMATEX structs includes two extra bytes, `cbSize`.
let is_wave_format_ex = chunk_len == 18;
if !is_wave_format_ex && chunk_len != 16 {
return Err(Error::FormatError("unexpected fmt chunk size"));
}
if is_wave_format_ex {
// For WAVE_FORMAT_IEEE_FLOAT which we are reading, there should
// be no extra data, so `cbSize` should be 0.
let cb_size = try!(self.reader.read_le_u16());
if cb_size != 0 {
return Err(Error::FormatError("unexpected WAVEFORMATEX size"));
}
}
// For WAVE_FORMAT_IEEE_FLOAT, the bits_per_sample field should be
// set to `32` according to
// https://msdn.microsoft.com/en-us/library/windows/hardware/ff538799(v=vs.85).aspx.
//
// Note that some applications support 64 bits per sample. This is
// not yet supported by hound.
if spec.bits_per_sample != 32 {
return Err(Error::FormatError("bits per sample is not 32"));
}
let spec_ex = WavSpecEx {
spec: WavSpec {
sample_format: SampleFormat::Float,
..spec
},
bytes_per_sample: spec.bits_per_sample / 8,
};
Ok(spec_ex)
}
fn read_wave_format_extensible(&mut self, chunk_len: u32, spec: WavSpec)
-> Result<WavSpecEx> {
// 16 bytes were read already, there must be two more for the `cbSize`
// field, and `cbSize` itself must be at least 22, so the chunk length
// must be at least 40.
if chunk_len < 40 {
return Err(Error::FormatError("unexpected fmt chunk size"));
}
// `cbSize` is the last field of the WAVEFORMATEX struct.
let cb_size = try!(self.reader.read_le_u16());
// `cbSize` must be at least 22, but in this case we assume that it is
// 22, because we would not know how to handle extra data anyway.
if cb_size != 22 {
return Err(Error::FormatError("unexpected WAVEFORMATEXTENSIBLE size"));
}
// What follows is the rest of the `WAVEFORMATEXTENSIBLE` struct, as
// defined at https://msdn.microsoft.com/en-us/library/ms713496.aspx.
// ```
// typedef struct {
// WAVEFORMATEX Format;
// union {
// WORD wValidBitsPerSample;
// WORD wSamplesPerBlock;
// WORD wReserved;
// } Samples;
// DWORD dwChannelMask;
// GUID SubFormat;
// } WAVEFORMATEXTENSIBLE, *PWAVEFORMATEXTENSIBLE;
// ```
let valid_bits_per_sample = try!(self.reader.read_le_u16());
let _channel_mask = try!(self.reader.read_le_u32()); // Not used for now.
let mut subformat = [0u8; 16];
try!(self.reader.read_into(&mut subformat));
// Several GUIDS are defined. At the moment, only the following are supported:
//
// * KSDATAFORMAT_SUBTYPE_PCM (PCM audio with integer samples).
// * KSDATAFORMAT_SUBTYPE_IEEE_FLOAT (PCM audio with floating point samples).
let sample_format = match subformat {
super::KSDATAFORMAT_SUBTYPE_PCM => SampleFormat::Int,
super::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT => SampleFormat::Float,
_ => return Err(Error::Unsupported),
};
let spec_ex = WavSpecEx {
spec: WavSpec {
bits_per_sample: valid_bits_per_sample,
sample_format: sample_format,
..spec
},
bytes_per_sample: spec.bits_per_sample / 8,
};
Ok(spec_ex)
}
pub fn into_inner(self) -> R {
self.reader
}
/// Seek to the given time within the file.
///
/// The given time is measured in number of samples (independent of the
/// number of channels) since the beginning of the audio data. To seek to
/// a particular time in seconds, multiply the number of seconds with
/// `WavSpec::sample_rate`. The given time should not exceed the duration of
/// the file (returned by `duration()`). The behavior when seeking beyond
/// `duration()` depends on the reader's `Seek` implementation.
///
/// This method requires that the inner reader `R` implements `Seek`.
pub fn seek(&mut self, time: u32) -> io::Result<()>
where R: io::Seek,
{
let data = self.data_state.expect("Not in the data chunk.");
let wanted_sample = time as i64 * data.spec_ex.spec.channels as i64;
let wanted_byte = wanted_sample * data.spec_ex.bytes_per_sample as i64;
let current_byte = data.len - data.remaining;
let offset = wanted_byte - current_byte;
try!(self.reader.seek(io::SeekFrom::Current(offset)));
self.data_state.as_mut().unwrap().remaining = data.remaining - offset;
Ok(())
}
}
impl<R: io::Read> io::Read for ChunksReader<R> {
fn read(&mut self, buffer: &mut[u8]) -> io::Result<usize> {
let data = self.data_state.expect("Not in the data chunk.");
if data.remaining <= 0 {
return Ok(0)
}
let max = buffer.len().min(data.remaining as usize);
let read = try!(self.reader.read(&mut buffer[0..max]));
self.data_state.as_mut().unwrap().remaining -= read as i64;
Ok(read)
}
}
/// Specifies properties of the audio data, as well as the layout of the stream.
#[derive(Clone, Copy, Debug)]
pub struct WavSpecEx {
/// The normal information about the audio data.
///
/// Bits per sample here is the number of _used_ bits per sample, not the
/// number of bits used to _store_ a sample.
pub spec: WavSpec,
/// The number of bytes used to store a sample.
pub bytes_per_sample: u16,
}
/// A reader that reads the WAVE format from the underlying reader.
///
/// A `WavReader` is a streaming reader. It reads data from the underlying
/// reader on demand, and it reads no more than strictly necessary. No internal
/// buffering is performed on the underlying reader, but this can easily be
/// added by wrapping the reader in an `io::BufReader`. The `open` constructor
/// takes care of this for you.
///
/// `WavReader` is a wrapper around `ChunksReader`.
pub struct WavReader<R: io::Read> {
/// The chunk reader from which the WAVE file is read.
reader: ChunksReader<R>,
}
/// An iterator that yields samples of type `S` read from a `WavReader`.
///
/// The type `S` must have at least as many bits as the bits per sample of the
/// file, otherwise every iteration will return an error.
pub struct WavSamples<'wr, R, S>
where R: io::Read + 'wr
{
reader: &'wr mut ChunksReader<R>,
phantom_sample: marker::PhantomData<S>,
}
/// An iterator that yields samples of type `S` read from a `WavReader`.
///
/// The type `S` must have at least as many bits as the bits per sample of the
/// file, otherwise every iteration will return an error.
pub struct WavIntoSamples<R: io::Read, S> {
reader: ChunksReader<R>,
phantom_sample: marker::PhantomData<S>,
}
/// Reads the RIFF WAVE header, returns the supposed file size.
///
/// This function can be used to quickly check if the file could be a wav file
/// by reading 12 bytes of the header. If an `Ok` is returned, the file is
/// likely a wav file. If an `Err` is returned, it is definitely not a wav
/// file.
///
/// The returned file size cannot be larger than 2<sup>32</sup> + 7 bytes.
pub fn read_wave_header<R: io::Read>(reader: &mut R) -> Result<u64> {
// Every WAVE file starts with the four bytes 'RIFF' and a file length.
// TODO: the old approach of having a slice on the stack and reading
// into it is more cumbersome, but also avoids a heap allocation. Is
// the compiler smart enough to avoid the heap allocation anyway? I
// would not expect it to be.
if b"RIFF" != &try!(reader.read_bytes(4))[..] {
return Err(Error::FormatError("no RIFF tag found"));
}
let file_len = try!(reader.read_le_u32());
// Next four bytes indicate the file type, which should be WAVE.
if b"WAVE" != &try!(reader.read_bytes(4))[..] {
return Err(Error::FormatError("no WAVE tag found"));
}
// The stored file length does not include the "RIFF" magic and 4-byte
// length field, so the total size is 8 bytes more than what is stored.
Ok(file_len as u64 + 8)
}
impl<R> WavReader<R>
where R: io::Read
{
/// Attempts to create a reader that reads the WAVE format.
///
/// The header is read immediately. Reading the data will be done on
/// demand.
pub fn new(reader: R) -> Result<WavReader<R>> {
let mut reader = try!(ChunksReader::new(reader));
try!(reader.read_until_data());
if reader.spec_ex.is_none() {
return Err(Error::FormatError("Wave file with no fmt header"))
}
Ok(WavReader {
reader: reader,
})
}
/// Returns information about the WAVE file.
pub fn spec(&self) -> WavSpec {
self.reader.spec_ex
.expect("Using a WavReader wrapping a ChunkReader with no spec")
.spec
}
/// Returns an iterator over all samples.
///
/// The channel data is is interleaved. The iterator is streaming. That is,
/// if you call this method once, read a few samples, and call this method
/// again, the second iterator will not start again from the beginning of
/// the file, it will continue where the first iterator stopped.
///
/// The type `S` must have at least `spec().bits_per_sample` bits,
/// otherwise every iteration will return an error. All bit depths up to
/// 32 bits per sample can be decoded into an `i32`, but if you know
/// beforehand that you will be reading a file with 16 bits per sample, you
/// can save memory by decoding into an `i16`.
///
/// The type of `S` (int or float) must match `spec().sample_format`,
/// otherwise every iteration will return an error.
pub fn samples<'wr, S: Sample>(&'wr mut self) -> WavSamples<'wr, R, S> {
self.reader.samples()
}
/// Same as `samples`, but takes ownership of the `WavReader`.
///
/// See `samples()` for more info.
pub fn into_samples<S: Sample>(self) -> WavIntoSamples<R, S> {
self.reader.into_samples()
}
/// Returns the duration of the file in samples.
///
/// The duration is independent of the number of channels. It is expressed
/// in units of samples. The duration in seconds can be obtained by
/// dividing this number by the sample rate. The duration is independent of
/// how many samples have been read already.
pub fn duration(&self) -> u32 {
self.reader.duration()
}
/// Returns the number of values that the sample iterator will yield.
///
/// The length of the file is its duration (in samples) times the number of
/// channels. The length is independent of how many samples have been read
/// already. To get the number of samples left, use `len()` on the
/// `samples()` iterator.
pub fn len(&self) -> u32 {
self.reader.len()
}
/// Destroys the `WavReader` and returns the underlying reader.
pub fn into_inner(self) -> R {
self.reader.into_inner()
}
/// Seek to the given time within the file.
///
/// The given time is measured in number of samples (independent of the
/// number of channels) since the beginning of the audio data. To seek to
/// a particular time in seconds, multiply the number of seconds with
/// `WavSpec::sample_rate`. The given time should not exceed the duration of
/// the file (returned by `duration()`). The behavior when seeking beyond
/// `duration()` depends on the reader's `Seek` implementation.
///
/// This method requires that the inner reader `R` implements `Seek`.
pub fn seek(&mut self, time: u32) -> io::Result<()>
where R: io::Seek,
{
self.reader.seek(time)
}
}
impl WavReader<io::BufReader<fs::File>> {
/// Attempts to create a reader that reads from the specified file.
///
/// This is a convenience constructor that opens a `File`, wraps it in a
/// `BufReader` and then constructs a `WavReader` from it.
pub fn open<P: AsRef<path::Path>>(filename: P) -> Result<WavReader<io::BufReader<fs::File>>> {
let file = try!(fs::File::open(filename));
let buf_reader = io::BufReader::new(file);
WavReader::new(buf_reader)
}
}
fn iter_next<R, S>(reader: &mut ChunksReader<R>) -> Option<Result<S>>
where R: io::Read,
S: Sample
{
let data = reader.data_state.expect("reader not in data chunk");
if data.remaining > 0 {
let sample = Sample::read(reader,
data.spec_ex.spec.sample_format,
data.spec_ex.bytes_per_sample,
data.spec_ex.spec.bits_per_sample);
Some(sample.map_err(Error::from))
} else {
None
}
}
fn iter_size_hint<R: io::Read>(reader: &ChunksReader<R>) -> (usize, Option<usize>) {
let data = reader.data_state.expect("reader not in data chunk");
let samples_left = (data.remaining / data.spec_ex.bytes_per_sample as i64) as usize;
(samples_left, Some(samples_left))
}
impl<'wr, R, S> Iterator for WavSamples<'wr, R, S>
where R: io::Read,
S: Sample
{
type Item = Result<S>;
fn next(&mut self) -> Option<Result<S>> {
iter_next(&mut self.reader)
}
fn size_hint(&self) -> (usize, Option<usize>) {
iter_size_hint(&self.reader)
}
}
impl<'wr, R, S> ExactSizeIterator for WavSamples<'wr, R, S>
where R: io::Read,
S: Sample
{
}
impl<R, S> Iterator for WavIntoSamples<R, S>
where R: io::Read,
S: Sample
{
type Item = Result<S>;
fn next(&mut self) -> Option<Result<S>> {
iter_next(&mut self.reader)
}
fn size_hint(&self) -> (usize, Option<usize>) {
iter_size_hint(&self.reader)
}
}
impl<R, S> ExactSizeIterator for WavIntoSamples<R, S>
where R: io::Read,
S: Sample
{
}
#[test]
fn duration_and_len_agree() {
let files = &["testsamples/pcmwaveformat-16bit-44100Hz-mono.wav",
"testsamples/waveformatex-16bit-44100Hz-stereo.wav",
"testsamples/waveformatextensible-32bit-48kHz-stereo.wav"];
for fname in files {
let reader = WavReader::open(fname).unwrap();
assert_eq!(reader.spec().channels as u32 * reader.duration(),
reader.len());
}
}
/// Tests reading a wave file with the PCMWAVEFORMAT struct.
#[test]
fn read_wav_pcm_wave_format_pcm() {
let mut wav_reader = WavReader::open("testsamples/pcmwaveformat-16bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2, -3, 5, -7]);
}
#[test]
fn read_wav_skips_unknown_chunks() {
// The test samples are the same as without the -extra suffix, but ffmpeg
// has kindly added some useless chunks in between the fmt and data chunk.
let files = ["testsamples/pcmwaveformat-16bit-44100Hz-mono-extra.wav",
"testsamples/waveformatex-16bit-44100Hz-mono-extra.wav"];
for file in &files {
let mut wav_reader = WavReader::open(file).unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let sample = wav_reader.samples::<i16>().next().unwrap().unwrap();
assert_eq!(sample, 2);
}
}
#[test]
fn len_and_size_hint_are_correct() {
let mut wav_reader = WavReader::open("testsamples/pcmwaveformat-16bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.len(), 4);
{
let mut samples = wav_reader.samples::<i16>();
assert_eq!(samples.size_hint(), (4, Some(4)));
samples.next();
assert_eq!(samples.size_hint(), (3, Some(3)));
}
// Reading should not affect the initial length.
assert_eq!(wav_reader.len(), 4);
// Creating a new iterator resumes where the previous iterator stopped.
{
let mut samples = wav_reader.samples::<i16>();
assert_eq!(samples.size_hint(), (3, Some(3)));
samples.next();
assert_eq!(samples.size_hint(), (2, Some(2)));
}
}
#[test]
fn size_hint_is_exact() {
let files = &["testsamples/pcmwaveformat-16bit-44100Hz-mono.wav",
"testsamples/waveformatex-16bit-44100Hz-stereo.wav",
"testsamples/waveformatextensible-32bit-48kHz-stereo.wav"];
for fname in files {
let mut reader = WavReader::open(fname).unwrap();
let len = reader.len();
let mut iter = reader.samples::<i32>();
for i in 0..len {
let remaining = (len - i) as usize;
assert_eq!(iter.size_hint(), (remaining, Some(remaining)));
assert!(iter.next().is_some());
}
assert!(iter.next().is_none());
}
}
#[test]
fn samples_equals_into_samples() {
let wav_reader_val = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav").unwrap();
let mut wav_reader_ref = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav").unwrap();
let samples_val: Vec<i16> = wav_reader_val.into_samples()
.map(|r| r.unwrap())
.collect();
let samples_ref: Vec<i16> = wav_reader_ref.samples()
.map(|r| r.unwrap())
.collect();
assert_eq!(samples_val, samples_ref);
}
/// Tests reading a wave file with the WAVEFORMATEX struct.
#[test]
fn read_wav_wave_format_ex_pcm() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-16bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2, -3, 5, -7]);
}
#[test]
fn read_wav_wave_format_ex_ieee_float() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-ieeefloat-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 32);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Float);
let samples: Vec<f32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2.0, 3.0, -16411.0, 1019.0]);
}
#[test]
fn read_wav_stereo() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-16bit-44100Hz-stereo.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 2);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact eight samples.
assert_eq!(&samples[..], &[2, -3, 5, -7, 11, -13, 17, -19]);
}
#[test]
fn read_wav_pcm_wave_format_8bit() {
let mut wav_reader = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().bits_per_sample, 8);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[19, -53, 89, -127]);
}
/// Regression test for a real-world wav file encountered in Quake.
#[test]
fn read_wav_wave_format_ex_8bit() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-8bit-11025Hz-mono.wav").unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().bits_per_sample, 8);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The audio data has been zeroed out, but for 8-bit files, a zero means a
// sample value of 128.
assert_eq!(&samples[..], &[-128, -128, -128, -128]);
}
/// This test sample tests both reading the WAVEFORMATEXTENSIBLE header, and 24-bit samples.
#[test]
fn read_wav_wave_format_extensible_pcm_24bit() {
let mut wav_reader = WavReader::open("testsamples/waveformatextensible-24bit-192kHz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 192_000);
assert_eq!(wav_reader.spec().bits_per_sample, 24);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[-17, 4_194_319, -6_291_437, 8_355_817]);
}
#[test]
fn read_wav_32bit() {
let mut wav_reader = WavReader::open("testsamples/waveformatextensible-32bit-48kHz-stereo.wav")
.unwrap();
assert_eq!(wav_reader.spec().bits_per_sample, 32);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[19, -229_373, 33_587_161, -2_147_483_497]);
}
#[test]
fn read_wav_wave_format_extensible_ieee_float() {
let mut wav_reader =
WavReader::open("testsamples/waveformatextensible-ieeefloat-44100Hz-mono.wav").unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 32);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Float);
let samples: Vec<f32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2.0, 3.0, -16411.0, 1019.0]);
}
#[test]
fn read_wav_nonstandard_01() {
// The test sample here is adapted from a file encountered in the wild (data
// chunk replaced with two zero samples, some metadata dropped, and the file
// length in the header fixed). It is not a valid file according to the
// standard, but many players can deal with it nonetheless. (The file even
// contains some metadata; open it in a hex editor if you would like to know
// which program created it.) The file contains a regular PCM format tag,
// but the size of the fmt chunk is one that would be expected of a
// WAVEFORMATEXTENSIBLE chunk. The bits per sample is 24, which is invalid
// for WAVEFORMATEX, but we can read it nonetheless.
let mut wav_reader = WavReader::open("testsamples/nonstandard-01.wav").unwrap();
assert_eq!(wav_reader.spec().bits_per_sample, 24);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
assert_eq!(&samples[..], &[0, 0]);
}
#[test]
fn wide_read_should_signal_error() {
let mut reader24 = WavReader::open("testsamples/waveformatextensible-24bit-192kHz-mono.wav")
.unwrap();
// Even though we know the first value is 17, and it should fit in an `i8`,
// a general 24-bit sample will not fit in an `i8`, so this should fail.
// 16-bit is still not wide enough, but 32-bit should do the trick.
assert!(reader24.samples::<i8>().next().unwrap().is_err());
assert!(reader24.samples::<i16>().next().unwrap().is_err());
assert!(reader24.samples::<i32>().next().unwrap().is_ok());
let mut reader32 = WavReader::open("testsamples/waveformatextensible-32bit-48kHz-stereo.wav")
.unwrap();
// In general, 32-bit samples will not fit in anything but an `i32`.
assert!(reader32.samples::<i8>().next().unwrap().is_err());
assert!(reader32.samples::<i16>().next().unwrap().is_err());
assert!(reader32.samples::<i32>().next().unwrap().is_ok());
}
#[test]
fn sample_format_mismatch_should_signal_error() {
let mut reader_f32 = WavReader::open("testsamples/waveformatex-ieeefloat-44100Hz-mono.wav")
.unwrap();
assert!(reader_f32.samples::<i8>().next().unwrap().is_err());
assert!(reader_f32.samples::<i16>().next().unwrap().is_err());
assert!(reader_f32.samples::<i32>().next().unwrap().is_err());
assert!(reader_f32.samples::<f32>().next().unwrap().is_ok());
let mut reader_i8 = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav").unwrap();
assert!(reader_i8.samples::<i8>().next().unwrap().is_ok());
assert!(reader_i8.samples::<i16>().next().unwrap().is_ok());
assert!(reader_i8.samples::<i32>().next().unwrap().is_ok());
assert!(reader_i8.samples::<f32>().next().unwrap().is_err());
}
#[test]
fn fuzz_crashes_should_be_fixed() {
use std::fs;
use std::ffi::OsStr;
// This is a regression test: all crashes and other issues found through
// fuzzing should not cause a crash.
let dir = fs::read_dir("testsamples/fuzz").ok()
.expect("failed to enumerate fuzz test corpus");
for path in dir {
let path = path.ok().expect("failed to obtain path info").path();
let is_file = fs::metadata(&path).unwrap().file_type().is_file();
if is_file && path.extension() == Some(OsStr::new("wav")) {
println!(" testing {} ...", path.to_str()
.expect("unsupported filename"));
let mut reader = match WavReader::open(path) {
Ok(r) => r,
Err(..) => continue,
};
match reader.spec().sample_format {
SampleFormat::Int => {
for sample in reader.samples::<i32>() {
match sample {
Ok(..) => { }
Err(..) => break,
}
}
}
SampleFormat::Float => {
for sample in reader.samples::<f32>() {
match sample {
Ok(..) => { }
Err(..) => break,
}
}
}
}
}
}
}
#[test]
fn seek_is_consistent() {
let files = &["testsamples/pcmwaveformat-16bit-44100Hz-mono.wav",
"testsamples/waveformatex-16bit-44100Hz-stereo.wav",
"testsamples/waveformatextensible-32bit-48kHz-stereo.wav"];
for fname in files {
let mut reader = WavReader::open(fname).unwrap();
// Seeking back to the start should "reset" the reader.
let count = reader.samples::<i32>().count();
reader.seek(0).unwrap();
assert_eq!(count, reader.samples::<i32>().count());
// Seek to the last sample.
let last_time = reader.duration() - 1;
let channels = reader.spec().channels;
reader.seek(last_time).unwrap();
{
let mut samples = reader.samples::<i32>();
for _ in 0..channels {
assert!(samples.next().is_some());
}
assert!(samples.next().is_none());
}
// Seeking beyond the audio data produces no samples.
let num_samples = reader.len();
reader.seek(num_samples).unwrap();
assert!(reader.samples::<i32>().next().is_none());
reader.seek(::std::u32::MAX / channels as u32).unwrap();
assert!(reader.samples::<i32>().next().is_none());
}
}
Pacify Rust 1.4
// Hound -- A wav encoding and decoding library in Rust
// Copyright (C) 2015 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::fs;
use std::io;
use std::marker;
use std::mem;
use std::path;
use super::{Error, Result, Sample, SampleFormat, WavSpec};
/// Extends the functionality of `io::Read` with additional methods.
///
/// The methods may be used on any type that implements `io::Read`.
pub trait ReadExt: io::Read {
/// Reads as many bytes as `buf` is long.
///
/// This may issue multiple `read` calls internally. An error is returned
/// if `read` read 0 bytes before the buffer is full.
// TODO: There is an RFC proposing a method like this for the standard library.
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()>;
/// Reads `n` bytes and returns them in a vector.
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>>;
/// Skip over `n` bytes.
fn skip_bytes(&mut self, n: usize) -> io::Result<()>;
/// Reads a single byte and interprets it as an 8-bit signed integer.
fn read_i8(&mut self) -> io::Result<i8>;
/// Reads a single byte and interprets it as an 8-bit unsigned integer.
fn read_u8(&mut self) -> io::Result<u8>;
/// Reads two bytes and interprets them as a little-endian 16-bit signed integer.
fn read_le_i16(&mut self) -> io::Result<i16>;
/// Reads two bytes and interprets them as a little-endian 16-bit unsigned integer.
fn read_le_u16(&mut self) -> io::Result<u16>;
/// Reads three bytes and interprets them as a little-endian 24-bit signed integer.
///
/// The sign bit will be extended into the most significant byte.
fn read_le_i24(&mut self) -> io::Result<i32>;
/// Reads three bytes and interprets them as a little-endian 24-bit unsigned integer.
///
/// The most significant byte will be 0.
fn read_le_u24(&mut self) -> io::Result<u32>;
/// Reads four bytes and interprets them as a little-endian 32-bit signed integer.
fn read_le_i32(&mut self) -> io::Result<i32>;
/// Reads four bytes and interprets them as a little-endian 32-bit unsigned integer.
fn read_le_u32(&mut self) -> io::Result<u32>;
/// Reads four bytes and interprets them as a little-endian 32-bit IEEE float.
fn read_le_f32(&mut self) -> io::Result<f32>;
}
impl<R> ReadExt for R
where R: io::Read
{
#[inline(always)]
fn read_into(&mut self, buf: &mut [u8]) -> io::Result<()> {
let mut n = 0;
while n < buf.len() {
let progress = try!(self.read(&mut buf[n..]));
if progress > 0 {
n += progress;
} else {
return Err(io::Error::new(io::ErrorKind::Other, "Failed to read enough bytes."));
}
}
Ok(())
}
#[inline(always)]
fn skip_bytes(&mut self, n: usize) -> io::Result<()> {
// Read from the input in chunks of 1024 bytes at a time, and discard
// the result. 1024 is a tradeoff between doing a lot of calls, and
// using too much stack space. This method is not in a hot path, so it
// can afford to do this.
let mut n_read = 0;
let mut buf = [0u8; 1024];
while n_read < n {
let end = cmp::min(n - n_read, 1024);
let progress = try!(self.read(&mut buf[0..end]));
if progress > 0 {
n_read += progress;
} else {
return Err(io::Error::new(io::ErrorKind::Other, "Failed to read enough bytes."));
}
}
Ok(())
}
#[inline(always)]
fn read_bytes(&mut self, n: usize) -> io::Result<Vec<u8>> {
// We allocate a runtime fixed size buffer, and we are going to read
// into it, so zeroing or filling the buffer is a waste. This method
// is safe, because the contents of the buffer are only exposed when
// they have been overwritten completely by the read.
let mut buf = Vec::with_capacity(n);
unsafe { buf.set_len(n); }
try!(self.read_into(&mut buf[..]));
Ok(buf)
}
#[inline(always)]
fn read_i8(&mut self) -> io::Result<i8> {
self.read_u8().map(|x| x as i8)
}
#[inline(always)]
fn read_u8(&mut self) -> io::Result<u8> {
let mut buf = [0u8; 1];
try!(self.read_into(&mut buf));
Ok(buf[0])
}
#[inline(always)]
fn read_le_i16(&mut self) -> io::Result<i16> {
self.read_le_u16().map(|x| x as i16)
}
#[inline(always)]
fn read_le_u16(&mut self) -> io::Result<u16> {
let mut buf = [0u8; 2];
try!(self.read_into(&mut buf));
Ok((buf[1] as u16) << 8 | (buf[0] as u16))
}
#[inline(always)]
fn read_le_i24(&mut self) -> io::Result<i32> {
self.read_le_u24().map(|x|
// Test the sign bit, if it is set, extend the sign bit into the
// most significant byte.
if x & (1 << 23) == 0 {
x as i32
} else {
(x | 0xff_00_00_00) as i32
}
)
}
#[inline(always)]
fn read_le_u24(&mut self) -> io::Result<u32> {
let mut buf = [0u8; 3];
try!(self.read_into(&mut buf));
Ok((buf[2] as u32) << 16 | (buf[1] as u32) << 8 | (buf[0] as u32))
}
#[inline(always)]
fn read_le_i32(&mut self) -> io::Result<i32> {
self.read_le_u32().map(|x| x as i32)
}
#[inline(always)]
fn read_le_u32(&mut self) -> io::Result<u32> {
let mut buf = [0u8; 4];
try!(self.read_into(&mut buf));
Ok((buf[3] as u32) << 24 | (buf[2] as u32) << 16 |
(buf[1] as u32) << 8 | (buf[0] as u32) << 0)
}
#[inline(always)]
fn read_le_f32(&mut self) -> io::Result<f32> {
self.read_le_u32().map(|u| unsafe { mem::transmute(u) })
}
}
/// A reader for safe Unknown chunks access.
///
/// This reader borrow the underlying low-level reader from
/// the ChunkReader, and enforce chunk boundaries.
pub struct EmbeddedReader<'r, R: 'r + io::Read> {
/// low-level reader
reader: &'r mut R,
/// how long the chunk is, in bytes
pub len: i64,
/// how much bytes remains to be read
pub remaining: i64,
}
/// On drop, the EmbeddedReader will skip the remaining chunk bytes
/// to reposition the underlying reader to the next chunk.
impl<'r, R: 'r + io::Read> Drop for EmbeddedReader<'r, R> {
fn drop(&mut self) {
let _ = self.reader.skip_bytes((self.remaining + self.len % 2) as usize);
}
}
impl<'r, R: io::Read> io::Read for EmbeddedReader<'r, R> {
fn read(&mut self, buffer: &mut[u8]) -> io::Result<usize> {
let max = cmp::min(buffer.len(), self.remaining as usize);
let read = try!(self.reader.read(&mut buffer[0..max]));
self.remaining -= read as i64;
Ok(read)
}
}
impl<'r, R: io::Read + io::Seek> io::Seek for EmbeddedReader<'r, R> {
fn seek(&mut self, seek: io::SeekFrom) -> io::Result<u64> {
if let io::SeekFrom::Current(offset) = seek {
let current_in_chunk = self.len as i64 - self.remaining as i64;
let wanted_in_chunk = current_in_chunk as i64 + offset;
if wanted_in_chunk < 0 {
Err(io::Error::new(io::ErrorKind::Other, "Seeking befoer begin of chunk"))
} else {
self.reader.seek(io::SeekFrom::Current(offset))
}
} else {
Err(io::Error::new(io::ErrorKind::Other, "Only relative seek is supported."))
}
}
}
/// A chunk in a Riff Wave file.
pub enum Chunk<'r, R: 'r + io::Read> {
/// format chunk, fully parsed into a WavSpecEx
Fmt(WavSpecEx),
/// fact chunk, used by non-pcm encoding but redundant
Fact,
/// data chunk, where the samples are actually stored
Data,
/// any other riff chunk
Unknown([u8; 4], EmbeddedReader<'r, R>),
}
/// A Riff chunk Wave reader, giving access to all chunks in the file.
///
/// For simple wave file decoding, prefer the `WavReader` facade.
/// ChunksReader should only be use when one need to access chunks
/// not specified by the Wave format.
pub struct ChunksReader<R: io::Read> {
/// the underlying reader
reader: R,
/// the Wave format specification, if it has been read already
pub spec_ex: Option<WavSpecEx>,
/// when inside the main data state, keeps track of decoding and chunk
/// boundaries
pub data_state: Option<DataReadingState>,
}
/// This struct helps represent the inner state of the ChunksReader
/// during data chunk parsing.
#[derive(Copy, Clone)]
pub struct DataReadingState {
/// the format specification for the file
pub spec_ex: WavSpecEx,
/// total length of the data chunk, in bytes
pub len: i64,
/// number of remaining bytes to be read in the data chunk
pub remaining: i64,
}
impl<R: io::Read> ChunksReader<R> {
/// Builds a ChunksReader from a std Reader.
///
/// This function will only read the Riff header from the file
/// in order to position the stream to the first chunk.
pub fn new(mut reader: R) -> Result<ChunksReader<R>> {
try!(read_wave_header(&mut reader));
Ok(ChunksReader {
reader: reader,
spec_ex: None,
data_state: None,
})
}
/// Returns an iterator over all samples.
///
/// The channel data is is interleaved. The iterator is streaming. That is,
/// if you call this method once, read a few samples, and call this method
/// again, the second iterator will not start again from the beginning of
/// the file, it will continue where the first iterator stopped.
///
/// The type `S` must have at least `spec().bits_per_sample` bits,
/// otherwise every iteration will return an error. All bit depths up to
/// 32 bits per sample can be decoded into an `i32`, but if you know
/// beforehand that you will be reading a file with 16 bits per sample, you
/// can save memory by decoding into an `i16`.
///
/// The type of `S` (int or float) must match `spec().sample_format`,
/// otherwise every iteration will return an error.
///
/// This function will panic if it is called while the reader is not in
/// the data chunk, or if the format has not been parsed.
pub fn samples<'wr, S: Sample>(&'wr mut self) -> WavSamples<'wr, R, S> {
let _data_state = self.data_state.expect("Not in the data chunk.");
WavSamples {
reader: self,
phantom_sample: marker::PhantomData,
}
}
/// Same as `samples`, but takes ownership of the `WavReader`.
///
/// See `samples()` for more info.
pub fn into_samples<S: Sample>(self) -> WavIntoSamples<R, S> {
let _data_state = self.data_state.expect("Not in the data chunk.");
WavIntoSamples {
reader: self,
phantom_sample: marker::PhantomData,
}
}
/// Returns the duration of the file in samples.
///
/// The duration is independent of the number of channels. It is expressed
/// in units of samples. The duration in seconds can be obtained by
/// dividing this number by the sample rate. The duration is independent of
/// how many samples have been read already.
///
/// This function will panic if it is called while the reader is not in
/// the data chunk, or if the format has not been parsed.
pub fn duration(&self) -> u32 {
let data = self.data_state.expect("Not in the data chunk.");
self.len() / data.spec_ex.spec.channels as u32
}
/// Returns the number of values that the sample iterator will yield.
///
/// The length of the file is its duration (in samples) times the number of
/// channels. The length is independent of how many samples have been read
/// already. To get the number of samples left, use `len()` on the
/// `samples()` iterator.
///
/// This function will panic if it is called while the reader is not in
/// the data chunk, or if the format has not been parsed.
pub fn len(&self) -> u32 {
let data = self.data_state.expect("Not in the data chunk.");
data.len as u32 / data.spec_ex.bytes_per_sample as u32
}
/// Parse the next chunk from the reader.
///
/// Returns None at end of file, or a `Chunk` instance depending
/// on the chunk kind.
///
/// For fmt and fact kinds, the function will actually parse the
/// chunk, returns it, and update `spec_ex`.
///
/// For Data, the underlying reader will be left at the beginning
/// of the first sample, and `data_state` will be created to allow
/// keep track of the audio samples parsing.
pub fn next(&mut self) -> Result<Option<Chunk<R>>> {
if let Some(data) = self.data_state {
try!(self.reader.skip_bytes(data.remaining as usize));
self.data_state = None
}
let mut kind_str = [0; 4];
if let Err(_) = self.reader.read_into(&mut kind_str) {
// assumes EOF
return Ok(None);
}
let len = try!(self.reader.read_le_u32());
match &kind_str {
b"fmt " => {
let spec_ex = try!(self.read_fmt_chunk(len));
self.spec_ex = Some(spec_ex);
return Ok(Some(Chunk::Fmt(spec_ex)))
}
b"fact" => {
// All (compressed) non-PCM formats must have a fact chunk
// (Rev. 3 documentation). The chunk contains at least one
// value, the number of samples in the file.
//
// The number of samples field is redundant for sampled
// data, since the Data chunk indicates the length of the
// data. The number of samples can be determined from the
// length of the data and the container size as determined
// from the Format chunk.
// http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/wave.html
let _samples_per_channel = self.reader.read_le_u32();
return Ok(Some(Chunk::Fact))
}
b"data" => {
if let Some(spec_ex) = self.spec_ex {
self.data_state = Some(DataReadingState {
spec_ex: spec_ex,
len: len as i64,
remaining: len as i64,
});
return Ok(Some(Chunk::Data));
} else {
return Err(Error::FormatError("missing fmt chunk"))
}
}
_ => {
let reader = EmbeddedReader {
reader: &mut self.reader,
len: len as i64,
remaining: len as i64,
};
return Ok(Some(Chunk::Unknown(kind_str, reader)));
}
}
// If no data chunk is ever encountered, the function will return
// via one of the try! macros that return an Err on end of file.
}
/// Reads chunks until a data chunk is encountered.
///
/// Returns true if a data chunk has been found. Afterwards, the reader
/// will be positioned at the first content byte of the data chunk.
pub fn read_until_data(&mut self) -> Result<bool> {
while let Some(chunk) = try!(self.next()) {
if let Chunk::Data = chunk {
return Ok(true)
}
}
Ok(false)
}
/// Reads the fmt chunk of the file, returns the information it provides.
fn read_fmt_chunk(&mut self, chunk_len: u32) -> Result<WavSpecEx> {
// A minimum chunk length of at least 16 is assumed. Note: actually,
// the first 14 bytes contain enough information to fully specify the
// file. I have not encountered a file with a 14-byte fmt section
// though. If you ever encounter such file, please contact me.
if chunk_len < 16 {
return Err(Error::FormatError("invalid fmt chunk size"));
}
// Read the WAVEFORMAT struct, as defined at
// https://msdn.microsoft.com/en-us/library/ms713498.aspx.
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// } WAVEFORMAT;
// ```
// The WAVEFORMATEX struct has two more members, as defined at
// https://msdn.microsoft.com/en-us/library/ms713497.aspx
// ```
// typedef struct {
// WORD wFormatTag;
// WORD nChannels;
// DWORD nSamplesPerSec;
// DWORD nAvgBytesPerSec;
// WORD nBlockAlign;
// WORD wBitsPerSample;
// WORD cbSize;
// } WAVEFORMATEX;
// ```
// There is also PCMWAVEFORMAT as defined at
// https://msdn.microsoft.com/en-us/library/dd743663.aspx.
// ```
// typedef struct {
// WAVEFORMAT wf;
// WORD wBitsPerSample;
// } PCMWAVEFORMAT;
// ```
// In either case, the minimal length of the fmt section is 16 bytes,
// meaning that it does include the `wBitsPerSample` field. (The name
// is misleading though, because it is the number of bits used to store
// a sample, not all of the bits need to be valid for all versions of
// the WAVE format.)
let format_tag = try!(self.reader.read_le_u16());
let n_channels = try!(self.reader.read_le_u16());
let n_samples_per_sec = try!(self.reader.read_le_u32());
let n_bytes_per_sec = try!(self.reader.read_le_u32());
let block_align = try!(self.reader.read_le_u16());
let bits_per_sample = try!(self.reader.read_le_u16());
if n_channels == 0 {
return Err(Error::FormatError("file contains zero channels"));
}
// Two of the stored fields are redundant, and may be ignored. We do
// validate them to fail early for ill-formed files.
if (Some(bits_per_sample) != (block_align / n_channels).checked_mul(8)) ||
(Some(n_bytes_per_sec) != (block_align as u32).checked_mul(n_samples_per_sec)) {
return Err(Error::FormatError("inconsistent fmt chunk"));
}
// The bits per sample for a WAVEFORMAT struct is the number of bits
// used to store a sample. Therefore, it must be a multiple of 8.
if bits_per_sample % 8 != 0 {
return Err(Error::FormatError("bits per sample is not a multiple of 8"));
}
if bits_per_sample == 0 {
return Err(Error::FormatError("bits per sample is 0"));
}
let spec = WavSpec {
channels: n_channels,
sample_rate: n_samples_per_sec,
bits_per_sample: bits_per_sample,
sample_format: SampleFormat::Int,
};
// The different format tag definitions can be found in mmreg.h that is
// part of the Windows SDK. The vast majority are esoteric vendor-
// specific formats. We handle only a few. The following values could
// be of interest:
const PCM: u16 = 0x0001;
const ADPCM: u16 = 0x0002;
const IEEE_FLOAT: u16 = 0x0003;
const EXTENSIBLE: u16 = 0xfffe;
match format_tag {
PCM => self.read_wave_format_pcm(chunk_len, spec),
ADPCM => Err(Error::Unsupported),
IEEE_FLOAT => self.read_wave_format_ieee_float(chunk_len, spec),
EXTENSIBLE => self.read_wave_format_extensible(chunk_len, spec),
_ => Err(Error::Unsupported),
}
}
fn read_wave_format_pcm(&mut self, chunk_len: u32, spec: WavSpec) -> Result<WavSpecEx> {
// When there is a PCMWAVEFORMAT struct, the chunk is 16 bytes long.
// The WAVEFORMATEX structs includes two extra bytes, `cbSize`.
let is_wave_format_ex = match chunk_len {
16 => false,
18 => true,
// Other sizes are unexpected, but such files do occur in the wild,
// and reading these files is still possible, so we allow this.
40 => true,
_ => return Err(Error::FormatError("unexpected fmt chunk size")),
};
if is_wave_format_ex {
// `cbSize` can be used for non-PCM formats to specify the size of
// additional data. However, for WAVE_FORMAT_PCM, the member should
// be ignored, see https://msdn.microsoft.com/en-us/library/ms713497.aspx.
// Nonzero values do in fact occur in practice.
let _cb_size = try!(self.reader.read_le_u16());
// For WAVE_FORMAT_PCM in WAVEFORMATEX, only 8 or 16 bits per
// sample are valid according to
// https://msdn.microsoft.com/en-us/library/ms713497.aspx.
// 24 bits per sample is explicitly not valid inside a WAVEFORMATEX
// structure, but such files do occur in the wild nonetheless, and
// there is no good reason why we couldn't read them.
match spec.bits_per_sample {
8 => {}
16 => {}
24 => {}
_ => return Err(Error::FormatError("bits per sample is not 8 or 16")),
}
}
// If the chunk len was longer than expected, ignore the additional bytes.
if chunk_len == 40 {
try!(self.reader.skip_bytes(22));
}
let spec_ex = WavSpecEx {
spec: spec,
bytes_per_sample: spec.bits_per_sample / 8,
};
Ok(spec_ex)
}
fn read_wave_format_ieee_float(&mut self, chunk_len: u32, spec: WavSpec)
-> Result<WavSpecEx> {
// When there is a PCMWAVEFORMAT struct, the chunk is 16 bytes long.
// The WAVEFORMATEX structs includes two extra bytes, `cbSize`.
let is_wave_format_ex = chunk_len == 18;
if !is_wave_format_ex && chunk_len != 16 {
return Err(Error::FormatError("unexpected fmt chunk size"));
}
if is_wave_format_ex {
// For WAVE_FORMAT_IEEE_FLOAT which we are reading, there should
// be no extra data, so `cbSize` should be 0.
let cb_size = try!(self.reader.read_le_u16());
if cb_size != 0 {
return Err(Error::FormatError("unexpected WAVEFORMATEX size"));
}
}
// For WAVE_FORMAT_IEEE_FLOAT, the bits_per_sample field should be
// set to `32` according to
// https://msdn.microsoft.com/en-us/library/windows/hardware/ff538799(v=vs.85).aspx.
//
// Note that some applications support 64 bits per sample. This is
// not yet supported by hound.
if spec.bits_per_sample != 32 {
return Err(Error::FormatError("bits per sample is not 32"));
}
let spec_ex = WavSpecEx {
spec: WavSpec {
sample_format: SampleFormat::Float,
..spec
},
bytes_per_sample: spec.bits_per_sample / 8,
};
Ok(spec_ex)
}
fn read_wave_format_extensible(&mut self, chunk_len: u32, spec: WavSpec)
-> Result<WavSpecEx> {
// 16 bytes were read already, there must be two more for the `cbSize`
// field, and `cbSize` itself must be at least 22, so the chunk length
// must be at least 40.
if chunk_len < 40 {
return Err(Error::FormatError("unexpected fmt chunk size"));
}
// `cbSize` is the last field of the WAVEFORMATEX struct.
let cb_size = try!(self.reader.read_le_u16());
// `cbSize` must be at least 22, but in this case we assume that it is
// 22, because we would not know how to handle extra data anyway.
if cb_size != 22 {
return Err(Error::FormatError("unexpected WAVEFORMATEXTENSIBLE size"));
}
// What follows is the rest of the `WAVEFORMATEXTENSIBLE` struct, as
// defined at https://msdn.microsoft.com/en-us/library/ms713496.aspx.
// ```
// typedef struct {
// WAVEFORMATEX Format;
// union {
// WORD wValidBitsPerSample;
// WORD wSamplesPerBlock;
// WORD wReserved;
// } Samples;
// DWORD dwChannelMask;
// GUID SubFormat;
// } WAVEFORMATEXTENSIBLE, *PWAVEFORMATEXTENSIBLE;
// ```
let valid_bits_per_sample = try!(self.reader.read_le_u16());
let _channel_mask = try!(self.reader.read_le_u32()); // Not used for now.
let mut subformat = [0u8; 16];
try!(self.reader.read_into(&mut subformat));
// Several GUIDS are defined. At the moment, only the following are supported:
//
// * KSDATAFORMAT_SUBTYPE_PCM (PCM audio with integer samples).
// * KSDATAFORMAT_SUBTYPE_IEEE_FLOAT (PCM audio with floating point samples).
let sample_format = match subformat {
super::KSDATAFORMAT_SUBTYPE_PCM => SampleFormat::Int,
super::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT => SampleFormat::Float,
_ => return Err(Error::Unsupported),
};
let spec_ex = WavSpecEx {
spec: WavSpec {
bits_per_sample: valid_bits_per_sample,
sample_format: sample_format,
..spec
},
bytes_per_sample: spec.bits_per_sample / 8,
};
Ok(spec_ex)
}
pub fn into_inner(self) -> R {
self.reader
}
/// Seek to the given time within the file.
///
/// The given time is measured in number of samples (independent of the
/// number of channels) since the beginning of the audio data. To seek to
/// a particular time in seconds, multiply the number of seconds with
/// `WavSpec::sample_rate`. The given time should not exceed the duration of
/// the file (returned by `duration()`). The behavior when seeking beyond
/// `duration()` depends on the reader's `Seek` implementation.
///
/// This method requires that the inner reader `R` implements `Seek`.
pub fn seek(&mut self, time: u32) -> io::Result<()>
where R: io::Seek,
{
let data = self.data_state.expect("Not in the data chunk.");
let wanted_sample = time as i64 * data.spec_ex.spec.channels as i64;
let wanted_byte = wanted_sample * data.spec_ex.bytes_per_sample as i64;
let current_byte = data.len - data.remaining;
let offset = wanted_byte - current_byte;
try!(self.reader.seek(io::SeekFrom::Current(offset)));
self.data_state.as_mut().unwrap().remaining = data.remaining - offset;
Ok(())
}
}
impl<R: io::Read> io::Read for ChunksReader<R> {
fn read(&mut self, buffer: &mut[u8]) -> io::Result<usize> {
let data = self.data_state.expect("Not in the data chunk.");
if data.remaining <= 0 {
return Ok(0)
}
let max = cmp::min(buffer.len(), data.remaining as usize);
let read = try!(self.reader.read(&mut buffer[0..max]));
self.data_state.as_mut().unwrap().remaining -= read as i64;
Ok(read)
}
}
/// Specifies properties of the audio data, as well as the layout of the stream.
#[derive(Clone, Copy, Debug)]
pub struct WavSpecEx {
/// The normal information about the audio data.
///
/// Bits per sample here is the number of _used_ bits per sample, not the
/// number of bits used to _store_ a sample.
pub spec: WavSpec,
/// The number of bytes used to store a sample.
pub bytes_per_sample: u16,
}
/// A reader that reads the WAVE format from the underlying reader.
///
/// A `WavReader` is a streaming reader. It reads data from the underlying
/// reader on demand, and it reads no more than strictly necessary. No internal
/// buffering is performed on the underlying reader, but this can easily be
/// added by wrapping the reader in an `io::BufReader`. The `open` constructor
/// takes care of this for you.
///
/// `WavReader` is a wrapper around `ChunksReader`.
pub struct WavReader<R: io::Read> {
/// The chunk reader from which the WAVE file is read.
reader: ChunksReader<R>,
}
/// An iterator that yields samples of type `S` read from a `WavReader`.
///
/// The type `S` must have at least as many bits as the bits per sample of the
/// file, otherwise every iteration will return an error.
pub struct WavSamples<'wr, R, S>
where R: io::Read + 'wr
{
reader: &'wr mut ChunksReader<R>,
phantom_sample: marker::PhantomData<S>,
}
/// An iterator that yields samples of type `S` read from a `WavReader`.
///
/// The type `S` must have at least as many bits as the bits per sample of the
/// file, otherwise every iteration will return an error.
pub struct WavIntoSamples<R: io::Read, S> {
reader: ChunksReader<R>,
phantom_sample: marker::PhantomData<S>,
}
/// Reads the RIFF WAVE header, returns the supposed file size.
///
/// This function can be used to quickly check if the file could be a wav file
/// by reading 12 bytes of the header. If an `Ok` is returned, the file is
/// likely a wav file. If an `Err` is returned, it is definitely not a wav
/// file.
///
/// The returned file size cannot be larger than 2<sup>32</sup> + 7 bytes.
pub fn read_wave_header<R: io::Read>(reader: &mut R) -> Result<u64> {
// Every WAVE file starts with the four bytes 'RIFF' and a file length.
// TODO: the old approach of having a slice on the stack and reading
// into it is more cumbersome, but also avoids a heap allocation. Is
// the compiler smart enough to avoid the heap allocation anyway? I
// would not expect it to be.
if b"RIFF" != &try!(reader.read_bytes(4))[..] {
return Err(Error::FormatError("no RIFF tag found"));
}
let file_len = try!(reader.read_le_u32());
// Next four bytes indicate the file type, which should be WAVE.
if b"WAVE" != &try!(reader.read_bytes(4))[..] {
return Err(Error::FormatError("no WAVE tag found"));
}
// The stored file length does not include the "RIFF" magic and 4-byte
// length field, so the total size is 8 bytes more than what is stored.
Ok(file_len as u64 + 8)
}
impl<R> WavReader<R>
where R: io::Read
{
/// Attempts to create a reader that reads the WAVE format.
///
/// The header is read immediately. Reading the data will be done on
/// demand.
pub fn new(reader: R) -> Result<WavReader<R>> {
let mut reader = try!(ChunksReader::new(reader));
try!(reader.read_until_data());
if reader.spec_ex.is_none() {
return Err(Error::FormatError("Wave file with no fmt header"))
}
Ok(WavReader {
reader: reader,
})
}
/// Returns information about the WAVE file.
pub fn spec(&self) -> WavSpec {
self.reader.spec_ex
.expect("Using a WavReader wrapping a ChunkReader with no spec")
.spec
}
/// Returns an iterator over all samples.
///
/// The channel data is is interleaved. The iterator is streaming. That is,
/// if you call this method once, read a few samples, and call this method
/// again, the second iterator will not start again from the beginning of
/// the file, it will continue where the first iterator stopped.
///
/// The type `S` must have at least `spec().bits_per_sample` bits,
/// otherwise every iteration will return an error. All bit depths up to
/// 32 bits per sample can be decoded into an `i32`, but if you know
/// beforehand that you will be reading a file with 16 bits per sample, you
/// can save memory by decoding into an `i16`.
///
/// The type of `S` (int or float) must match `spec().sample_format`,
/// otherwise every iteration will return an error.
pub fn samples<'wr, S: Sample>(&'wr mut self) -> WavSamples<'wr, R, S> {
self.reader.samples()
}
/// Same as `samples`, but takes ownership of the `WavReader`.
///
/// See `samples()` for more info.
pub fn into_samples<S: Sample>(self) -> WavIntoSamples<R, S> {
self.reader.into_samples()
}
/// Returns the duration of the file in samples.
///
/// The duration is independent of the number of channels. It is expressed
/// in units of samples. The duration in seconds can be obtained by
/// dividing this number by the sample rate. The duration is independent of
/// how many samples have been read already.
pub fn duration(&self) -> u32 {
self.reader.duration()
}
/// Returns the number of values that the sample iterator will yield.
///
/// The length of the file is its duration (in samples) times the number of
/// channels. The length is independent of how many samples have been read
/// already. To get the number of samples left, use `len()` on the
/// `samples()` iterator.
pub fn len(&self) -> u32 {
self.reader.len()
}
/// Destroys the `WavReader` and returns the underlying reader.
pub fn into_inner(self) -> R {
self.reader.into_inner()
}
/// Seek to the given time within the file.
///
/// The given time is measured in number of samples (independent of the
/// number of channels) since the beginning of the audio data. To seek to
/// a particular time in seconds, multiply the number of seconds with
/// `WavSpec::sample_rate`. The given time should not exceed the duration of
/// the file (returned by `duration()`). The behavior when seeking beyond
/// `duration()` depends on the reader's `Seek` implementation.
///
/// This method requires that the inner reader `R` implements `Seek`.
pub fn seek(&mut self, time: u32) -> io::Result<()>
where R: io::Seek,
{
self.reader.seek(time)
}
}
impl WavReader<io::BufReader<fs::File>> {
/// Attempts to create a reader that reads from the specified file.
///
/// This is a convenience constructor that opens a `File`, wraps it in a
/// `BufReader` and then constructs a `WavReader` from it.
pub fn open<P: AsRef<path::Path>>(filename: P) -> Result<WavReader<io::BufReader<fs::File>>> {
let file = try!(fs::File::open(filename));
let buf_reader = io::BufReader::new(file);
WavReader::new(buf_reader)
}
}
fn iter_next<R, S>(reader: &mut ChunksReader<R>) -> Option<Result<S>>
where R: io::Read,
S: Sample
{
let data = reader.data_state.expect("reader not in data chunk");
if data.remaining > 0 {
let sample = Sample::read(reader,
data.spec_ex.spec.sample_format,
data.spec_ex.bytes_per_sample,
data.spec_ex.spec.bits_per_sample);
Some(sample.map_err(Error::from))
} else {
None
}
}
fn iter_size_hint<R: io::Read>(reader: &ChunksReader<R>) -> (usize, Option<usize>) {
let data = reader.data_state.expect("reader not in data chunk");
let samples_left = (data.remaining / data.spec_ex.bytes_per_sample as i64) as usize;
(samples_left, Some(samples_left))
}
impl<'wr, R, S> Iterator for WavSamples<'wr, R, S>
where R: io::Read,
S: Sample
{
type Item = Result<S>;
fn next(&mut self) -> Option<Result<S>> {
iter_next(&mut self.reader)
}
fn size_hint(&self) -> (usize, Option<usize>) {
iter_size_hint(&self.reader)
}
}
impl<'wr, R, S> ExactSizeIterator for WavSamples<'wr, R, S>
where R: io::Read,
S: Sample
{
}
impl<R, S> Iterator for WavIntoSamples<R, S>
where R: io::Read,
S: Sample
{
type Item = Result<S>;
fn next(&mut self) -> Option<Result<S>> {
iter_next(&mut self.reader)
}
fn size_hint(&self) -> (usize, Option<usize>) {
iter_size_hint(&self.reader)
}
}
impl<R, S> ExactSizeIterator for WavIntoSamples<R, S>
where R: io::Read,
S: Sample
{
}
#[test]
fn duration_and_len_agree() {
let files = &["testsamples/pcmwaveformat-16bit-44100Hz-mono.wav",
"testsamples/waveformatex-16bit-44100Hz-stereo.wav",
"testsamples/waveformatextensible-32bit-48kHz-stereo.wav"];
for fname in files {
let reader = WavReader::open(fname).unwrap();
assert_eq!(reader.spec().channels as u32 * reader.duration(),
reader.len());
}
}
/// Tests reading a wave file with the PCMWAVEFORMAT struct.
#[test]
fn read_wav_pcm_wave_format_pcm() {
let mut wav_reader = WavReader::open("testsamples/pcmwaveformat-16bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2, -3, 5, -7]);
}
#[test]
fn read_wav_skips_unknown_chunks() {
// The test samples are the same as without the -extra suffix, but ffmpeg
// has kindly added some useless chunks in between the fmt and data chunk.
let files = ["testsamples/pcmwaveformat-16bit-44100Hz-mono-extra.wav",
"testsamples/waveformatex-16bit-44100Hz-mono-extra.wav"];
for file in &files {
let mut wav_reader = WavReader::open(file).unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let sample = wav_reader.samples::<i16>().next().unwrap().unwrap();
assert_eq!(sample, 2);
}
}
#[test]
fn len_and_size_hint_are_correct() {
let mut wav_reader = WavReader::open("testsamples/pcmwaveformat-16bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.len(), 4);
{
let mut samples = wav_reader.samples::<i16>();
assert_eq!(samples.size_hint(), (4, Some(4)));
samples.next();
assert_eq!(samples.size_hint(), (3, Some(3)));
}
// Reading should not affect the initial length.
assert_eq!(wav_reader.len(), 4);
// Creating a new iterator resumes where the previous iterator stopped.
{
let mut samples = wav_reader.samples::<i16>();
assert_eq!(samples.size_hint(), (3, Some(3)));
samples.next();
assert_eq!(samples.size_hint(), (2, Some(2)));
}
}
#[test]
fn size_hint_is_exact() {
let files = &["testsamples/pcmwaveformat-16bit-44100Hz-mono.wav",
"testsamples/waveformatex-16bit-44100Hz-stereo.wav",
"testsamples/waveformatextensible-32bit-48kHz-stereo.wav"];
for fname in files {
let mut reader = WavReader::open(fname).unwrap();
let len = reader.len();
let mut iter = reader.samples::<i32>();
for i in 0..len {
let remaining = (len - i) as usize;
assert_eq!(iter.size_hint(), (remaining, Some(remaining)));
assert!(iter.next().is_some());
}
assert!(iter.next().is_none());
}
}
#[test]
fn samples_equals_into_samples() {
let wav_reader_val = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav").unwrap();
let mut wav_reader_ref = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav").unwrap();
let samples_val: Vec<i16> = wav_reader_val.into_samples()
.map(|r| r.unwrap())
.collect();
let samples_ref: Vec<i16> = wav_reader_ref.samples()
.map(|r| r.unwrap())
.collect();
assert_eq!(samples_val, samples_ref);
}
/// Tests reading a wave file with the WAVEFORMATEX struct.
#[test]
fn read_wav_wave_format_ex_pcm() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-16bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2, -3, 5, -7]);
}
#[test]
fn read_wav_wave_format_ex_ieee_float() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-ieeefloat-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 32);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Float);
let samples: Vec<f32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2.0, 3.0, -16411.0, 1019.0]);
}
#[test]
fn read_wav_stereo() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-16bit-44100Hz-stereo.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 2);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 16);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact eight samples.
assert_eq!(&samples[..], &[2, -3, 5, -7, 11, -13, 17, -19]);
}
#[test]
fn read_wav_pcm_wave_format_8bit() {
let mut wav_reader = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().bits_per_sample, 8);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i16> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[19, -53, 89, -127]);
}
/// Regression test for a real-world wav file encountered in Quake.
#[test]
fn read_wav_wave_format_ex_8bit() {
let mut wav_reader = WavReader::open("testsamples/waveformatex-8bit-11025Hz-mono.wav").unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().bits_per_sample, 8);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The audio data has been zeroed out, but for 8-bit files, a zero means a
// sample value of 128.
assert_eq!(&samples[..], &[-128, -128, -128, -128]);
}
/// This test sample tests both reading the WAVEFORMATEXTENSIBLE header, and 24-bit samples.
#[test]
fn read_wav_wave_format_extensible_pcm_24bit() {
let mut wav_reader = WavReader::open("testsamples/waveformatextensible-24bit-192kHz-mono.wav")
.unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 192_000);
assert_eq!(wav_reader.spec().bits_per_sample, 24);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[-17, 4_194_319, -6_291_437, 8_355_817]);
}
#[test]
fn read_wav_32bit() {
let mut wav_reader = WavReader::open("testsamples/waveformatextensible-32bit-48kHz-stereo.wav")
.unwrap();
assert_eq!(wav_reader.spec().bits_per_sample, 32);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[19, -229_373, 33_587_161, -2_147_483_497]);
}
#[test]
fn read_wav_wave_format_extensible_ieee_float() {
let mut wav_reader =
WavReader::open("testsamples/waveformatextensible-ieeefloat-44100Hz-mono.wav").unwrap();
assert_eq!(wav_reader.spec().channels, 1);
assert_eq!(wav_reader.spec().sample_rate, 44100);
assert_eq!(wav_reader.spec().bits_per_sample, 32);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Float);
let samples: Vec<f32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
// The test file has been prepared with these exact four samples.
assert_eq!(&samples[..], &[2.0, 3.0, -16411.0, 1019.0]);
}
#[test]
fn read_wav_nonstandard_01() {
// The test sample here is adapted from a file encountered in the wild (data
// chunk replaced with two zero samples, some metadata dropped, and the file
// length in the header fixed). It is not a valid file according to the
// standard, but many players can deal with it nonetheless. (The file even
// contains some metadata; open it in a hex editor if you would like to know
// which program created it.) The file contains a regular PCM format tag,
// but the size of the fmt chunk is one that would be expected of a
// WAVEFORMATEXTENSIBLE chunk. The bits per sample is 24, which is invalid
// for WAVEFORMATEX, but we can read it nonetheless.
let mut wav_reader = WavReader::open("testsamples/nonstandard-01.wav").unwrap();
assert_eq!(wav_reader.spec().bits_per_sample, 24);
assert_eq!(wav_reader.spec().sample_format, SampleFormat::Int);
let samples: Vec<i32> = wav_reader.samples()
.map(|r| r.unwrap())
.collect();
assert_eq!(&samples[..], &[0, 0]);
}
#[test]
fn wide_read_should_signal_error() {
let mut reader24 = WavReader::open("testsamples/waveformatextensible-24bit-192kHz-mono.wav")
.unwrap();
// Even though we know the first value is 17, and it should fit in an `i8`,
// a general 24-bit sample will not fit in an `i8`, so this should fail.
// 16-bit is still not wide enough, but 32-bit should do the trick.
assert!(reader24.samples::<i8>().next().unwrap().is_err());
assert!(reader24.samples::<i16>().next().unwrap().is_err());
assert!(reader24.samples::<i32>().next().unwrap().is_ok());
let mut reader32 = WavReader::open("testsamples/waveformatextensible-32bit-48kHz-stereo.wav")
.unwrap();
// In general, 32-bit samples will not fit in anything but an `i32`.
assert!(reader32.samples::<i8>().next().unwrap().is_err());
assert!(reader32.samples::<i16>().next().unwrap().is_err());
assert!(reader32.samples::<i32>().next().unwrap().is_ok());
}
#[test]
fn sample_format_mismatch_should_signal_error() {
let mut reader_f32 = WavReader::open("testsamples/waveformatex-ieeefloat-44100Hz-mono.wav")
.unwrap();
assert!(reader_f32.samples::<i8>().next().unwrap().is_err());
assert!(reader_f32.samples::<i16>().next().unwrap().is_err());
assert!(reader_f32.samples::<i32>().next().unwrap().is_err());
assert!(reader_f32.samples::<f32>().next().unwrap().is_ok());
let mut reader_i8 = WavReader::open("testsamples/pcmwaveformat-8bit-44100Hz-mono.wav").unwrap();
assert!(reader_i8.samples::<i8>().next().unwrap().is_ok());
assert!(reader_i8.samples::<i16>().next().unwrap().is_ok());
assert!(reader_i8.samples::<i32>().next().unwrap().is_ok());
assert!(reader_i8.samples::<f32>().next().unwrap().is_err());
}
#[test]
fn fuzz_crashes_should_be_fixed() {
use std::fs;
use std::ffi::OsStr;
// This is a regression test: all crashes and other issues found through
// fuzzing should not cause a crash.
let dir = fs::read_dir("testsamples/fuzz").ok()
.expect("failed to enumerate fuzz test corpus");
for path in dir {
let path = path.ok().expect("failed to obtain path info").path();
let is_file = fs::metadata(&path).unwrap().file_type().is_file();
if is_file && path.extension() == Some(OsStr::new("wav")) {
println!(" testing {} ...", path.to_str()
.expect("unsupported filename"));
let mut reader = match WavReader::open(path) {
Ok(r) => r,
Err(..) => continue,
};
match reader.spec().sample_format {
SampleFormat::Int => {
for sample in reader.samples::<i32>() {
match sample {
Ok(..) => { }
Err(..) => break,
}
}
}
SampleFormat::Float => {
for sample in reader.samples::<f32>() {
match sample {
Ok(..) => { }
Err(..) => break,
}
}
}
}
}
}
}
#[test]
fn seek_is_consistent() {
let files = &["testsamples/pcmwaveformat-16bit-44100Hz-mono.wav",
"testsamples/waveformatex-16bit-44100Hz-stereo.wav",
"testsamples/waveformatextensible-32bit-48kHz-stereo.wav"];
for fname in files {
let mut reader = WavReader::open(fname).unwrap();
// Seeking back to the start should "reset" the reader.
let count = reader.samples::<i32>().count();
reader.seek(0).unwrap();
assert_eq!(count, reader.samples::<i32>().count());
// Seek to the last sample.
let last_time = reader.duration() - 1;
let channels = reader.spec().channels;
reader.seek(last_time).unwrap();
{
let mut samples = reader.samples::<i32>();
for _ in 0..channels {
assert!(samples.next().is_some());
}
assert!(samples.next().is_none());
}
// Seeking beyond the audio data produces no samples.
let num_samples = reader.len();
reader.seek(num_samples).unwrap();
assert!(reader.samples::<i32>().next().is_none());
reader.seek(::std::u32::MAX / channels as u32).unwrap();
assert!(reader.samples::<i32>().next().is_none());
}
}
|
//! Frame a stream of bytes based on a length prefix
//!
//! Many protocols delimit their frames by prefacing frame data with a
//! frame head that specifies the length of the frame. The
//! `length_delimited` module provides utilities for handling the length
//! based framing. This allows the consumer to work with entire frames
//! without having to worry about buffering or other framing logic.
//!
//! # Getting started
//!
//! If implementing a protocol from scratch, using length delimited framing
//! is an easy way to get started. [`LengthDelimitedCodec::new()`] will
//! return a length delimited codec using default configuration values.
//! This can then be used to construct a framer to adapt a full-duplex
//! byte stream into a stream of frames.
//!
//! ```
//! # extern crate tokio;
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio::codec::*;
//!
//! fn bind_transport<T: AsyncRead + AsyncWrite>(io: T)
//! -> Framed<T, LengthDelimitedCodec>
//! {
//! Framed::new(io, LengthDelimitedCodec::new())
//! }
//! # pub fn main() {}
//! ```
//!
//! The returned transport implements `Sink + Stream` for `BytesMut`. It
//! encodes the frame with a big-endian `u32` header denoting the frame
//! payload length:
//!
//! ```text
//! +----------+--------------------------------+
//! | len: u32 | frame payload |
//! +----------+--------------------------------+
//! ```
//!
//! Specifically, given the following:
//!
//! ```
//! # extern crate tokio;
//! # extern crate bytes;
//! # extern crate futures;
//! #
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio::codec::*;
//! use bytes::Bytes;
//! use futures::{Sink, Future};
//!
//! fn write_frame<T: AsyncRead + AsyncWrite>(io: T) {
//! let mut transport = Framed::new(io, LengthDelimitedCodec::new());
//! let frame = Bytes::from("hello world");
//!
//! transport.send(frame).wait().unwrap();
//! }
//! #
//! # pub fn main() {}
//! ```
//!
//! The encoded frame will look like this:
//!
//! ```text
//! +---- len: u32 ----+---- data ----+
//! | \x00\x00\x00\x0b | hello world |
//! +------------------+--------------+
//! ```
//!
//! # Decoding
//!
//! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`],
//! such that each yielded [`BytesMut`] value contains the contents of an
//! entire frame. There are many configuration parameters enabling
//! [`FramedRead`] to handle a wide range of protocols. Here are some
//! examples that will cover the various options at a high level.
//!
//! ## Example 1
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(0) // default value
//! .num_skip(0) // Do not strip frame header
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0B | Hello world | --> | \x00\x0B | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! The value of the length field is 11 (`\x0B`) which represents the length
//! of the payload, `hello world`. By default, [`FramedRead`] assumes that
//! the length field represents the number of bytes that **follows** the
//! length field. Thus, the entire frame has a length of 13: 2 bytes for the
//! frame head + 11 bytes for the payload.
//!
//! ## Example 2
//!
//! The following will parse a `u16` length field at offset 0, omitting the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(0) // default value
//! // `num_skip` is not needed, the default is to skip
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +--- Payload ---+
//! | \x00\x0B | Hello world | --> | Hello world |
//! +----------+---------------+ +---------------+
//! ```
//!
//! This is similar to the first example, the only difference is that the
//! frame head is **not** included in the yielded `BytesMut` value.
//!
//! ## Example 3
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`. In this case, the length field
//! **includes** the frame head length.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(-2) // size of head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0D | Hello world | --> | \x00\x0D | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! In most cases, the length field represents the length of the payload
//! only, as shown in the previous examples. However, in some protocols the
//! length field represents the length of the whole frame, including the
//! head. In such cases, we specify a negative `length_adjustment` to adjust
//! the value provided in the frame head to represent the payload length.
//!
//! ## Example 4
//!
//! The following will parse a 3 byte length field at offset 0 in a 5 byte
//! frame head, including the frame head in the yielded `BytesMut`.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(3)
//! .length_adjustment(2) // remaining head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//!
//! DECODED
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//! ```
//!
//! A more advanced example that shows a case where there is extra frame
//! head data between the length field and the payload. In such cases, it is
//! usually desirable to include the frame head as part of the yielded
//! `BytesMut`. This lets consumers of the length delimited framer to
//! process the frame head as needed.
//!
//! The positive `length_adjustment` value lets `FramedRead` factor in the
//! additional head into the frame length calculation.
//!
//! ## Example 5
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(1) // length of hdr1
//! .length_field_length(2)
//! .length_adjustment(1) // length of hdr2
//! .num_skip(3) // length of hdr1 + LEN
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0B | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! The length field is situated in the middle of the frame head. In this
//! case, the first byte in the frame head could be a version or some other
//! identifier that is not needed for processing. On the other hand, the
//! second half of the head is needed.
//!
//! `length_field_offset` indicates how many bytes to skip before starting
//! to read the length field. `length_adjustment` is the number of bytes to
//! skip starting at the end of the length field. In this case, it is the
//! second half of the head.
//!
//! ## Example 6
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included. In this case, the length field **includes** the frame head
//! length.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(1) // length of hdr1
//! .length_field_length(2)
//! .length_adjustment(-3) // length of hdr1 + LEN, negative
//! .num_skip(3)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0F | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! Similar to the example above, the difference is that the length field
//! represents the length of the entire frame instead of just the payload.
//! The length of `hdr1` and `len` must be counted in `length_adjustment`.
//! Note that the length of `hdr2` does **not** need to be explicitly set
//! anywhere because it already is factored into the total frame length that
//! is read from the byte stream.
//!
//! # Encoding
//!
//! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`],
//! such that each submitted [`BytesMut`] is prefaced by a length field.
//! There are fewer configuration options than [`FramedRead`]. Given
//! protocols that have more complex frame heads, an encoder should probably
//! be written by hand using [`Encoder`].
//!
//! Here is a simple example, given a `FramedWrite` with the following
//! configuration:
//!
//! ```
//! # extern crate tokio;
//! # extern crate bytes;
//! # use tokio::io::AsyncWrite;
//! # use tokio::codec::length_delimited;
//! # use bytes::BytesMut;
//! # fn write_frame<T: AsyncWrite>(io: T) {
//! # let _ =
//! length_delimited::Builder::new()
//! .length_field_length(2)
//! .new_write(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! A payload of `hello world` will be encoded as:
//!
//! ```text
//! +- len: u16 -+---- data ----+
//! | \x00\x0b | hello world |
//! +------------+--------------+
//! ```
//!
//! [`LengthDelimitedCodec::new()`]: struct.LengthDelimitedCodec.html#method.new
//! [`FramedRead`]: struct.FramedRead.html
//! [`FramedWrite`]: struct.FramedWrite.html
//! [`AsyncRead`]: ../../trait.AsyncRead.html
//! [`AsyncWrite`]: ../../trait.AsyncWrite.html
//! [`Encoder`]: ../trait.Encoder.html
//! [`BytesMut`]: https://docs.rs/bytes/0.4/bytes/struct.BytesMut.html
use {
codec::{
Decoder, Encoder, FramedRead, FramedWrite, Framed
},
io::{
AsyncRead, AsyncWrite
},
};
use bytes::{Buf, BufMut, Bytes, BytesMut, IntoBuf};
use std::{cmp, fmt};
use std::error::Error as StdError;
use std::io::{self, Cursor};
/// Configure length delimited `LengthDelimitedCodec`s.
///
/// `Builder` enables constructing configured length delimited codecs. Note
/// that not all configuration settings apply to both encoding and decoding. See
/// the documentation for specific methods for more detail.
#[derive(Debug, Clone, Copy)]
pub struct Builder {
// Maximum frame length
max_frame_len: usize,
// Number of bytes representing the field length
length_field_len: usize,
// Number of bytes in the header before the length field
length_field_offset: usize,
// Adjust the length specified in the header field by this amount
length_adjustment: isize,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: Option<usize>,
// Length field byte order (little or big endian)
length_field_is_big_endian: bool,
}
/// An error when the number of bytes read is more than max frame length.
pub struct FrameTooBig {
_priv: (),
}
/// A codec for frames delimited by a frame head specifying their lengths.
///
/// This allows the consumer to work with entire frames without having to worry
/// about buffering or other framing logic.
///
/// See [module level] documentation for more detail.
///
/// [module level]: index.html
#[derive(Debug)]
pub struct LengthDelimitedCodec {
// Configuration values
builder: Builder,
// Read state
state: DecodeState,
}
#[derive(Debug, Clone, Copy)]
enum DecodeState {
Head,
Data(usize),
}
// ===== impl LengthDelimitedCodec ======
impl LengthDelimitedCodec {
/// Creates a new `LengthDelimitedCodec` with the default configuration values.
pub fn new() -> Self {
Self {
builder: Builder::new(),
state: DecodeState::Head,
}
}
/// Returns the current max frame setting
///
/// This is the largest size this codec will accept from the wire. Larger
/// frames will be rejected.
pub fn max_frame_length(&self) -> usize {
self.builder.max_frame_len
}
/// Updates the max frame setting.
///
/// The change takes effect the next time a frame is decoded. In other
/// words, if a frame is currently in process of being decoded with a frame
/// size greater than `val` but less than the max frame length in effect
/// before calling this function, then the frame will be allowed.
pub fn set_max_frame_length(&mut self, val: usize) {
self.builder.max_frame_length(val);
}
fn decode_head(&mut self, src: &mut BytesMut) -> io::Result<Option<usize>> {
let head_len = self.builder.num_head_bytes();
let field_len = self.builder.length_field_len;
if src.len() < head_len {
// Not enough data
return Ok(None);
}
let n = {
let mut src = Cursor::new(&mut *src);
// Skip the required bytes
src.advance(self.builder.length_field_offset);
// match endianess
let n = if self.builder.length_field_is_big_endian {
src.get_uint_be(field_len)
} else {
src.get_uint_le(field_len)
};
if n > self.builder.max_frame_len as u64 {
return Err(io::Error::new(io::ErrorKind::InvalidData, FrameTooBig {
_priv: (),
}));
}
// The check above ensures there is no overflow
let n = n as usize;
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_sub(-self.builder.length_adjustment as usize)
} else {
n.checked_add(self.builder.length_adjustment as usize)
};
// Error handling
match n {
Some(n) => n,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "provided length would overflow after adjustment")),
}
};
let num_skip = self.builder.get_num_skip();
if num_skip > 0 {
let _ = src.split_to(num_skip);
}
// Ensure that the buffer has enough space to read the incoming
// payload
src.reserve(n);
return Ok(Some(n));
}
fn decode_data(&self, n: usize, src: &mut BytesMut) -> io::Result<Option<BytesMut>> {
// At this point, the buffer has already had the required capacity
// reserved. All there is to do is read.
if src.len() < n {
return Ok(None);
}
Ok(Some(src.split_to(n)))
}
}
impl Decoder for LengthDelimitedCodec {
type Item = BytesMut;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<BytesMut>> {
let n = match self.state {
DecodeState::Head => {
match try!(self.decode_head(src)) {
Some(n) => {
self.state = DecodeState::Data(n);
n
}
None => return Ok(None),
}
}
DecodeState::Data(n) => n,
};
match try!(self.decode_data(n, src)) {
Some(data) => {
// Update the decode state
self.state = DecodeState::Head;
// Make sure the buffer has enough space to read the next head
src.reserve(self.builder.num_head_bytes());
Ok(Some(data))
}
None => Ok(None),
}
}
}
impl Encoder for LengthDelimitedCodec {
type Item = Bytes;
type Error = io::Error;
fn encode(&mut self, data: Bytes, dst: &mut BytesMut) -> Result<(), io::Error> {
let n = (&data).into_buf().remaining();
if n > self.builder.max_frame_len {
return Err(io::Error::new(io::ErrorKind::InvalidInput, FrameTooBig {
_priv: (),
}));
}
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_add(-self.builder.length_adjustment as usize)
} else {
n.checked_sub(self.builder.length_adjustment as usize)
};
let n = n.ok_or_else(|| io::Error::new(
io::ErrorKind::InvalidInput,
"provided length would overflow after adjustment",
))?;
// Reserve capacity in the destination buffer to fit the frame and
// length field (plus adjustment).
dst.reserve(self.builder.length_field_len + n);
if self.builder.length_field_is_big_endian {
dst.put_uint_be(n as u64, self.builder.length_field_len);
} else {
dst.put_uint_le(n as u64, self.builder.length_field_len);
}
// Write the frame to the buffer
dst.extend_from_slice(&data[..]);
Ok(())
}
}
// ===== impl Builder =====
impl Builder {
/// Creates a new length delimited codec builder with default configuration
/// values.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new() -> Builder {
Builder {
// Default max frame length of 8MB
max_frame_len: 8 * 1_024 * 1_024,
// Default byte length of 4
length_field_len: 4,
// Default to the header field being at the start of the header.
length_field_offset: 0,
length_adjustment: 0,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: None,
// Default to reading the length field in network (big) endian.
length_field_is_big_endian: true,
}
}
/// Read the length field as a big endian integer
///
/// This is the default setting.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .big_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn big_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = true;
self
}
/// Read the length field as a little endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .little_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn little_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = false;
self
}
/// Read the length field as a native endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .native_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn native_endian(&mut self) -> &mut Self {
if cfg!(target_endian = "big") {
self.big_endian()
} else {
self.little_endian()
}
}
/// Sets the max frame length
///
/// This configuration option applies to both encoding and decoding. The
/// default value is 8MB.
///
/// When decoding, the length field read from the byte stream is checked
/// against this setting **before** any adjustments are applied. When
/// encoding, the length of the submitted payload is checked against this
/// setting.
///
/// When frames exceed the max length, an `io::Error` with the custom value
/// of the `FrameTooBig` type will be returned.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .max_frame_length(8 * 1024)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn max_frame_length(&mut self, val: usize) -> &mut Self {
self.max_frame_len = val;
self
}
/// Sets the number of bytes used to represent the length field
///
/// The default value is `4`. The max value is `8`.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_length(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_length(&mut self, val: usize) -> &mut Self {
assert!(val > 0 && val <= 8, "invalid length field length");
self.length_field_len = val;
self
}
/// Sets the number of bytes in the header before the length field
///
/// This configuration option only applies to decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_offset(1)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_offset(&mut self, val: usize) -> &mut Self {
self.length_field_offset = val;
self
}
/// Delta between the payload length specified in the header and the real
/// payload length
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_adjustment(-2)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_adjustment(&mut self, val: isize) -> &mut Self {
self.length_adjustment = val;
self
}
/// Sets the number of bytes to skip before reading the payload
///
/// Default value is `length_field_len + length_field_offset`
///
/// This configuration option only applies to decoding
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .num_skip(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn num_skip(&mut self, val: usize) -> &mut Self {
self.num_skip = Some(val);
self
}
/// Create a configured length delimited `LengthDelimitedCodec`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
/// # pub fn main() {
/// Builder::new()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_codec();
/// # }
/// ```
pub fn new_codec(&self) -> LengthDelimitedCodec {
LengthDelimitedCodec {
builder: *self,
state: DecodeState::Head,
}
}
/// Create a configured length delimited `FramedRead`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_read<T>(&self, upstream: T) -> FramedRead<T, LengthDelimitedCodec>
where T: AsyncRead,
{
FramedRead::new(upstream, self.new_codec())
}
/// Create a configured length delimited `FramedWrite`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # extern crate bytes;
/// # use tokio::io::AsyncWrite;
/// # use tokio::codec::length_delimited;
/// # use bytes::BytesMut;
/// # fn write_frame<T: AsyncWrite>(io: T) {
/// length_delimited::Builder::new()
/// .length_field_length(2)
/// .new_write(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_write<T>(&self, inner: T) -> FramedWrite<T, LengthDelimitedCodec>
where T: AsyncWrite,
{
FramedWrite::new(inner, self.new_codec())
}
/// Create a configured length delimited `Framed`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # extern crate bytes;
/// # use tokio::io::{AsyncRead, AsyncWrite};
/// # use tokio::codec::length_delimited;
/// # use bytes::BytesMut;
/// # fn write_frame<T: AsyncRead + AsyncWrite>(io: T) {
/// # let _ =
/// length_delimited::Builder::new()
/// .length_field_length(2)
/// .new_framed(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_framed<T>(&self, inner: T) -> Framed<T, LengthDelimitedCodec>
where T: AsyncRead + AsyncWrite,
{
Framed::new(inner, self.new_codec())
}
fn num_head_bytes(&self) -> usize {
let num = self.length_field_offset + self.length_field_len;
cmp::max(num, self.num_skip.unwrap_or(0))
}
fn get_num_skip(&self) -> usize {
self.num_skip.unwrap_or(self.length_field_offset + self.length_field_len)
}
}
// ===== impl FrameTooBig =====
impl fmt::Debug for FrameTooBig {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FrameTooBig")
.finish()
}
}
impl fmt::Display for FrameTooBig {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.description())
}
}
impl StdError for FrameTooBig {
fn description(&self) -> &str {
"frame size too big"
}
}
docs: deal with Result instead of using unwrap (#860)
//! Frame a stream of bytes based on a length prefix
//!
//! Many protocols delimit their frames by prefacing frame data with a
//! frame head that specifies the length of the frame. The
//! `length_delimited` module provides utilities for handling the length
//! based framing. This allows the consumer to work with entire frames
//! without having to worry about buffering or other framing logic.
//!
//! # Getting started
//!
//! If implementing a protocol from scratch, using length delimited framing
//! is an easy way to get started. [`LengthDelimitedCodec::new()`] will
//! return a length delimited codec using default configuration values.
//! This can then be used to construct a framer to adapt a full-duplex
//! byte stream into a stream of frames.
//!
//! ```
//! # extern crate tokio;
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio::codec::*;
//!
//! fn bind_transport<T: AsyncRead + AsyncWrite>(io: T)
//! -> Framed<T, LengthDelimitedCodec>
//! {
//! Framed::new(io, LengthDelimitedCodec::new())
//! }
//! # pub fn main() {}
//! ```
//!
//! The returned transport implements `Sink + Stream` for `BytesMut`. It
//! encodes the frame with a big-endian `u32` header denoting the frame
//! payload length:
//!
//! ```text
//! +----------+--------------------------------+
//! | len: u32 | frame payload |
//! +----------+--------------------------------+
//! ```
//!
//! Specifically, given the following:
//!
//! ```
//! # extern crate tokio;
//! # extern crate bytes;
//! # extern crate futures;
//! #
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio::codec::*;
//! use bytes::Bytes;
//! use futures::{Sink, Future};
//!
//! fn write_frame<T: AsyncRead + AsyncWrite>(io: T) -> Result<(), Box<std::error::Error>> {
//! let mut transport = Framed::new(io, LengthDelimitedCodec::new());
//! let frame = Bytes::from("hello world");
//!
//! transport.send(frame).wait()?;
//! Ok(())
//! }
//! #
//! # pub fn main() {}
//! ```
//!
//! The encoded frame will look like this:
//!
//! ```text
//! +---- len: u32 ----+---- data ----+
//! | \x00\x00\x00\x0b | hello world |
//! +------------------+--------------+
//! ```
//!
//! # Decoding
//!
//! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`],
//! such that each yielded [`BytesMut`] value contains the contents of an
//! entire frame. There are many configuration parameters enabling
//! [`FramedRead`] to handle a wide range of protocols. Here are some
//! examples that will cover the various options at a high level.
//!
//! ## Example 1
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(0) // default value
//! .num_skip(0) // Do not strip frame header
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0B | Hello world | --> | \x00\x0B | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! The value of the length field is 11 (`\x0B`) which represents the length
//! of the payload, `hello world`. By default, [`FramedRead`] assumes that
//! the length field represents the number of bytes that **follows** the
//! length field. Thus, the entire frame has a length of 13: 2 bytes for the
//! frame head + 11 bytes for the payload.
//!
//! ## Example 2
//!
//! The following will parse a `u16` length field at offset 0, omitting the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(0) // default value
//! // `num_skip` is not needed, the default is to skip
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +--- Payload ---+
//! | \x00\x0B | Hello world | --> | Hello world |
//! +----------+---------------+ +---------------+
//! ```
//!
//! This is similar to the first example, the only difference is that the
//! frame head is **not** included in the yielded `BytesMut` value.
//!
//! ## Example 3
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`. In this case, the length field
//! **includes** the frame head length.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(-2) // size of head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0D | Hello world | --> | \x00\x0D | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! In most cases, the length field represents the length of the payload
//! only, as shown in the previous examples. However, in some protocols the
//! length field represents the length of the whole frame, including the
//! head. In such cases, we specify a negative `length_adjustment` to adjust
//! the value provided in the frame head to represent the payload length.
//!
//! ## Example 4
//!
//! The following will parse a 3 byte length field at offset 0 in a 5 byte
//! frame head, including the frame head in the yielded `BytesMut`.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(0) // default value
//! .length_field_length(3)
//! .length_adjustment(2) // remaining head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//!
//! DECODED
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//! ```
//!
//! A more advanced example that shows a case where there is extra frame
//! head data between the length field and the payload. In such cases, it is
//! usually desirable to include the frame head as part of the yielded
//! `BytesMut`. This lets consumers of the length delimited framer to
//! process the frame head as needed.
//!
//! The positive `length_adjustment` value lets `FramedRead` factor in the
//! additional head into the frame length calculation.
//!
//! ## Example 5
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(1) // length of hdr1
//! .length_field_length(2)
//! .length_adjustment(1) // length of hdr2
//! .num_skip(3) // length of hdr1 + LEN
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0B | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! The length field is situated in the middle of the frame head. In this
//! case, the first byte in the frame head could be a version or some other
//! identifier that is not needed for processing. On the other hand, the
//! second half of the head is needed.
//!
//! `length_field_offset` indicates how many bytes to skip before starting
//! to read the length field. `length_adjustment` is the number of bytes to
//! skip starting at the end of the length field. In this case, it is the
//! second half of the head.
//!
//! ## Example 6
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included. In this case, the length field **includes** the frame head
//! length.
//!
//! ```
//! # extern crate tokio;
//! # use tokio::io::AsyncRead;
//! # use tokio::codec::length_delimited;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! length_delimited::Builder::new()
//! .length_field_offset(1) // length of hdr1
//! .length_field_length(2)
//! .length_adjustment(-3) // length of hdr1 + LEN, negative
//! .num_skip(3)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0F | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! Similar to the example above, the difference is that the length field
//! represents the length of the entire frame instead of just the payload.
//! The length of `hdr1` and `len` must be counted in `length_adjustment`.
//! Note that the length of `hdr2` does **not** need to be explicitly set
//! anywhere because it already is factored into the total frame length that
//! is read from the byte stream.
//!
//! # Encoding
//!
//! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`],
//! such that each submitted [`BytesMut`] is prefaced by a length field.
//! There are fewer configuration options than [`FramedRead`]. Given
//! protocols that have more complex frame heads, an encoder should probably
//! be written by hand using [`Encoder`].
//!
//! Here is a simple example, given a `FramedWrite` with the following
//! configuration:
//!
//! ```
//! # extern crate tokio;
//! # extern crate bytes;
//! # use tokio::io::AsyncWrite;
//! # use tokio::codec::length_delimited;
//! # use bytes::BytesMut;
//! # fn write_frame<T: AsyncWrite>(io: T) {
//! # let _ =
//! length_delimited::Builder::new()
//! .length_field_length(2)
//! .new_write(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! A payload of `hello world` will be encoded as:
//!
//! ```text
//! +- len: u16 -+---- data ----+
//! | \x00\x0b | hello world |
//! +------------+--------------+
//! ```
//!
//! [`LengthDelimitedCodec::new()`]: struct.LengthDelimitedCodec.html#method.new
//! [`FramedRead`]: struct.FramedRead.html
//! [`FramedWrite`]: struct.FramedWrite.html
//! [`AsyncRead`]: ../../trait.AsyncRead.html
//! [`AsyncWrite`]: ../../trait.AsyncWrite.html
//! [`Encoder`]: ../trait.Encoder.html
//! [`BytesMut`]: https://docs.rs/bytes/0.4/bytes/struct.BytesMut.html
use {
codec::{
Decoder, Encoder, FramedRead, FramedWrite, Framed
},
io::{
AsyncRead, AsyncWrite
},
};
use bytes::{Buf, BufMut, Bytes, BytesMut, IntoBuf};
use std::{cmp, fmt};
use std::error::Error as StdError;
use std::io::{self, Cursor};
/// Configure length delimited `LengthDelimitedCodec`s.
///
/// `Builder` enables constructing configured length delimited codecs. Note
/// that not all configuration settings apply to both encoding and decoding. See
/// the documentation for specific methods for more detail.
#[derive(Debug, Clone, Copy)]
pub struct Builder {
// Maximum frame length
max_frame_len: usize,
// Number of bytes representing the field length
length_field_len: usize,
// Number of bytes in the header before the length field
length_field_offset: usize,
// Adjust the length specified in the header field by this amount
length_adjustment: isize,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: Option<usize>,
// Length field byte order (little or big endian)
length_field_is_big_endian: bool,
}
/// An error when the number of bytes read is more than max frame length.
pub struct FrameTooBig {
_priv: (),
}
/// A codec for frames delimited by a frame head specifying their lengths.
///
/// This allows the consumer to work with entire frames without having to worry
/// about buffering or other framing logic.
///
/// See [module level] documentation for more detail.
///
/// [module level]: index.html
#[derive(Debug)]
pub struct LengthDelimitedCodec {
// Configuration values
builder: Builder,
// Read state
state: DecodeState,
}
#[derive(Debug, Clone, Copy)]
enum DecodeState {
Head,
Data(usize),
}
// ===== impl LengthDelimitedCodec ======
impl LengthDelimitedCodec {
/// Creates a new `LengthDelimitedCodec` with the default configuration values.
pub fn new() -> Self {
Self {
builder: Builder::new(),
state: DecodeState::Head,
}
}
/// Returns the current max frame setting
///
/// This is the largest size this codec will accept from the wire. Larger
/// frames will be rejected.
pub fn max_frame_length(&self) -> usize {
self.builder.max_frame_len
}
/// Updates the max frame setting.
///
/// The change takes effect the next time a frame is decoded. In other
/// words, if a frame is currently in process of being decoded with a frame
/// size greater than `val` but less than the max frame length in effect
/// before calling this function, then the frame will be allowed.
pub fn set_max_frame_length(&mut self, val: usize) {
self.builder.max_frame_length(val);
}
fn decode_head(&mut self, src: &mut BytesMut) -> io::Result<Option<usize>> {
let head_len = self.builder.num_head_bytes();
let field_len = self.builder.length_field_len;
if src.len() < head_len {
// Not enough data
return Ok(None);
}
let n = {
let mut src = Cursor::new(&mut *src);
// Skip the required bytes
src.advance(self.builder.length_field_offset);
// match endianess
let n = if self.builder.length_field_is_big_endian {
src.get_uint_be(field_len)
} else {
src.get_uint_le(field_len)
};
if n > self.builder.max_frame_len as u64 {
return Err(io::Error::new(io::ErrorKind::InvalidData, FrameTooBig {
_priv: (),
}));
}
// The check above ensures there is no overflow
let n = n as usize;
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_sub(-self.builder.length_adjustment as usize)
} else {
n.checked_add(self.builder.length_adjustment as usize)
};
// Error handling
match n {
Some(n) => n,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "provided length would overflow after adjustment")),
}
};
let num_skip = self.builder.get_num_skip();
if num_skip > 0 {
let _ = src.split_to(num_skip);
}
// Ensure that the buffer has enough space to read the incoming
// payload
src.reserve(n);
return Ok(Some(n));
}
fn decode_data(&self, n: usize, src: &mut BytesMut) -> io::Result<Option<BytesMut>> {
// At this point, the buffer has already had the required capacity
// reserved. All there is to do is read.
if src.len() < n {
return Ok(None);
}
Ok(Some(src.split_to(n)))
}
}
impl Decoder for LengthDelimitedCodec {
type Item = BytesMut;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<BytesMut>> {
let n = match self.state {
DecodeState::Head => {
match try!(self.decode_head(src)) {
Some(n) => {
self.state = DecodeState::Data(n);
n
}
None => return Ok(None),
}
}
DecodeState::Data(n) => n,
};
match try!(self.decode_data(n, src)) {
Some(data) => {
// Update the decode state
self.state = DecodeState::Head;
// Make sure the buffer has enough space to read the next head
src.reserve(self.builder.num_head_bytes());
Ok(Some(data))
}
None => Ok(None),
}
}
}
impl Encoder for LengthDelimitedCodec {
type Item = Bytes;
type Error = io::Error;
fn encode(&mut self, data: Bytes, dst: &mut BytesMut) -> Result<(), io::Error> {
let n = (&data).into_buf().remaining();
if n > self.builder.max_frame_len {
return Err(io::Error::new(io::ErrorKind::InvalidInput, FrameTooBig {
_priv: (),
}));
}
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_add(-self.builder.length_adjustment as usize)
} else {
n.checked_sub(self.builder.length_adjustment as usize)
};
let n = n.ok_or_else(|| io::Error::new(
io::ErrorKind::InvalidInput,
"provided length would overflow after adjustment",
))?;
// Reserve capacity in the destination buffer to fit the frame and
// length field (plus adjustment).
dst.reserve(self.builder.length_field_len + n);
if self.builder.length_field_is_big_endian {
dst.put_uint_be(n as u64, self.builder.length_field_len);
} else {
dst.put_uint_le(n as u64, self.builder.length_field_len);
}
// Write the frame to the buffer
dst.extend_from_slice(&data[..]);
Ok(())
}
}
// ===== impl Builder =====
impl Builder {
/// Creates a new length delimited codec builder with default configuration
/// values.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new() -> Builder {
Builder {
// Default max frame length of 8MB
max_frame_len: 8 * 1_024 * 1_024,
// Default byte length of 4
length_field_len: 4,
// Default to the header field being at the start of the header.
length_field_offset: 0,
length_adjustment: 0,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: None,
// Default to reading the length field in network (big) endian.
length_field_is_big_endian: true,
}
}
/// Read the length field as a big endian integer
///
/// This is the default setting.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .big_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn big_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = true;
self
}
/// Read the length field as a little endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .little_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn little_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = false;
self
}
/// Read the length field as a native endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .native_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn native_endian(&mut self) -> &mut Self {
if cfg!(target_endian = "big") {
self.big_endian()
} else {
self.little_endian()
}
}
/// Sets the max frame length
///
/// This configuration option applies to both encoding and decoding. The
/// default value is 8MB.
///
/// When decoding, the length field read from the byte stream is checked
/// against this setting **before** any adjustments are applied. When
/// encoding, the length of the submitted payload is checked against this
/// setting.
///
/// When frames exceed the max length, an `io::Error` with the custom value
/// of the `FrameTooBig` type will be returned.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .max_frame_length(8 * 1024)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn max_frame_length(&mut self, val: usize) -> &mut Self {
self.max_frame_len = val;
self
}
/// Sets the number of bytes used to represent the length field
///
/// The default value is `4`. The max value is `8`.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_length(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_length(&mut self, val: usize) -> &mut Self {
assert!(val > 0 && val <= 8, "invalid length field length");
self.length_field_len = val;
self
}
/// Sets the number of bytes in the header before the length field
///
/// This configuration option only applies to decoding.
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_offset(1)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_offset(&mut self, val: usize) -> &mut Self {
self.length_field_offset = val;
self
}
/// Delta between the payload length specified in the header and the real
/// payload length
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_adjustment(-2)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_adjustment(&mut self, val: isize) -> &mut Self {
self.length_adjustment = val;
self
}
/// Sets the number of bytes to skip before reading the payload
///
/// Default value is `length_field_len + length_field_offset`
///
/// This configuration option only applies to decoding
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .num_skip(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn num_skip(&mut self, val: usize) -> &mut Self {
self.num_skip = Some(val);
self
}
/// Create a configured length delimited `LengthDelimitedCodec`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
/// # pub fn main() {
/// Builder::new()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_codec();
/// # }
/// ```
pub fn new_codec(&self) -> LengthDelimitedCodec {
LengthDelimitedCodec {
builder: *self,
state: DecodeState::Head,
}
}
/// Create a configured length delimited `FramedRead`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # use tokio::io::AsyncRead;
/// use tokio::codec::length_delimited::Builder;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// Builder::new()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_read<T>(&self, upstream: T) -> FramedRead<T, LengthDelimitedCodec>
where T: AsyncRead,
{
FramedRead::new(upstream, self.new_codec())
}
/// Create a configured length delimited `FramedWrite`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # extern crate bytes;
/// # use tokio::io::AsyncWrite;
/// # use tokio::codec::length_delimited;
/// # use bytes::BytesMut;
/// # fn write_frame<T: AsyncWrite>(io: T) {
/// length_delimited::Builder::new()
/// .length_field_length(2)
/// .new_write(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_write<T>(&self, inner: T) -> FramedWrite<T, LengthDelimitedCodec>
where T: AsyncWrite,
{
FramedWrite::new(inner, self.new_codec())
}
/// Create a configured length delimited `Framed`
///
/// # Examples
///
/// ```
/// # extern crate tokio;
/// # extern crate bytes;
/// # use tokio::io::{AsyncRead, AsyncWrite};
/// # use tokio::codec::length_delimited;
/// # use bytes::BytesMut;
/// # fn write_frame<T: AsyncRead + AsyncWrite>(io: T) {
/// # let _ =
/// length_delimited::Builder::new()
/// .length_field_length(2)
/// .new_framed(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_framed<T>(&self, inner: T) -> Framed<T, LengthDelimitedCodec>
where T: AsyncRead + AsyncWrite,
{
Framed::new(inner, self.new_codec())
}
fn num_head_bytes(&self) -> usize {
let num = self.length_field_offset + self.length_field_len;
cmp::max(num, self.num_skip.unwrap_or(0))
}
fn get_num_skip(&self) -> usize {
self.num_skip.unwrap_or(self.length_field_offset + self.length_field_len)
}
}
// ===== impl FrameTooBig =====
impl fmt::Debug for FrameTooBig {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FrameTooBig")
.finish()
}
}
impl fmt::Display for FrameTooBig {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.description())
}
}
impl StdError for FrameTooBig {
fn description(&self) -> &str {
"frame size too big"
}
}
|
// Copyright 2015 The noise-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rand::{Rand, Rng, SeedableRng, XorShiftRng};
use std::num::SignedInt;
use math;
const TABLE_SIZE: usize = 256;
#[allow(missing_copy_implementations)]
pub struct Seed {
x_values: [u8; TABLE_SIZE],
y_values: [u8; TABLE_SIZE],
z_values: [u8; TABLE_SIZE],
w_values: [u8; TABLE_SIZE],
}
impl Rand for Seed {
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut x_values: Vec<u8> = ::std::iter::range_inclusive(0, (TABLE_SIZE - 1) as u8).collect();
let mut y_values: Vec<u8> = x_values.clone();
let mut z_values: Vec<u8> = x_values.clone();
let mut w_values: Vec<u8> = x_values.clone();
rng.shuffle(&mut *x_values);
rng.shuffle(&mut *y_values);
rng.shuffle(&mut *z_values);
rng.shuffle(&mut *w_values);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed {
x_values: [0; TABLE_SIZE],
y_values: [0; TABLE_SIZE],
z_values: [0; TABLE_SIZE],
w_values: [0; TABLE_SIZE],
};
let x_iter = x_values.iter().cycle();
for (x, y) in seed.x_values.iter_mut().zip(x_iter) { *x = *y }
let y_iter = y_values.iter().cycle();
for (x, y) in seed.y_values.iter_mut().zip(y_iter) { *x = *y }
let z_iter = z_values.iter().cycle();
for (x, y) in seed.z_values.iter_mut().zip(z_iter) { *x = *y }
let w_iter = w_values.iter().cycle();
for (x, y) in seed.w_values.iter_mut().zip(w_iter) { *x = *y }
seed
}
}
impl Seed {
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: SignedInt>(&self, x: T) -> usize {
self.x_values[math::cast::<T, usize>(x & math::cast(TABLE_SIZE - 1))] as usize
}
#[inline(always)]
pub fn get2<T: SignedInt>(&self, pos: math::Point2<T>) -> usize {
self.get1(pos[0]) ^ self.y_values[math::cast::<T, usize>(pos[1] & math::cast(TABLE_SIZE - 1))] as usize
}
#[inline(always)]
pub fn get3<T: SignedInt>(&self, pos: math::Point3<T>) -> usize {
self.get2([pos[0], pos[1]]) ^ self.z_values[math::cast::<T, usize>(pos[2] & math::cast(TABLE_SIZE - 1))] as usize
}
#[inline(always)]
pub fn get4<T: SignedInt>(&self, pos: math::Point4<T>) -> usize {
self.get3([pos[0], pos[1], pos[2]]) ^ self.w_values[math::cast::<T, usize>(pos[3] & math::cast(TABLE_SIZE - 1))] as usize
}
}
#[cfg(test)]
mod tests {
use std::rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn test_random_seed() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
Change Seed back to one permutation table
The changes in commit ce8e06ff made Seed use multiple permutation tables
which resulted in a significant speed improvement. However, when using
multiple tables in this style you are required to use a much larger
gradient/vector table in order to get correct results. The minimum number
of entries required is 64 while the ideal is 256. Doing this changes the
time for a single perlin2 sample from 13ns to 4000ns which is not
acceptable.
As we can not properly make use of the multiple permutation tables we
instead swhich back to only use one. However we are now using proper
Pearson hashing instead of the variant we had before. This allows us to
avoid having a double length table and seems to still offer a small benefit
in speed over the old style, even if it isn't as large as multiple tables
seemed to be.
// Copyright 2015 The noise-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rand::{Rand, Rng, SeedableRng, XorShiftRng};
use std::num::SignedInt;
use math;
const TABLE_SIZE: usize = 256;
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
}
impl Rand for Seed {
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = ::std::iter::range_inclusive(0, (TABLE_SIZE - 1) as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: SignedInt>(&self, x: T) -> usize {
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
}
#[inline(always)]
pub fn get2<T: SignedInt>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: SignedInt>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: SignedInt>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use std::rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn test_random_seed() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
|
use rustc_serialize::json;
use std::{fs, io};
use std::fs::File;
use std::path::PathBuf;
use datatype::{Config, DeviceReport, DownloadComplete, Error, Package,
UpdateReport, UpdateRequest, UpdateRequestId, Url};
use http::{Client, Response};
/// Encapsulate the client configuration and HTTP client used for
/// software-over-the-air updates.
pub struct Sota<'c, 'h> {
config: &'c Config,
client: &'h Client,
}
impl<'c, 'h> Sota<'c, 'h> {
/// Creates a new instance for Sota communication.
pub fn new(config: &'c Config, client: &'h Client) -> Sota<'c, 'h> {
Sota { config: config, client: client }
}
/// Takes a path and returns a new endpoint of the format
/// `<Core server>/api/v1/device_updates/<device-id>$path`.
fn endpoint(&self, path: &str) -> Url {
let endpoint = format!("/api/v1/device_updates/{}{}", self.config.device.uuid, path);
self.config.core.server.join(&endpoint).expect("couldn't build endpoint url")
}
/// Returns the path to a package on the device.
fn package_path(&self, id: UpdateRequestId) -> Result<String, Error> {
let mut path = PathBuf::new();
path.push(&self.config.device.packages_dir);
path.push(id);
Ok(try!(path.to_str().ok_or(Error::Parse(format!("Path is not valid UTF-8: {:?}", path)))).to_string())
}
/// Query the Core server for any pending or in-flight package updates.
pub fn get_update_requests(&mut self) -> Result<Vec<UpdateRequest>, Error> {
let resp_rx = self.client.get(self.endpoint("/queued"), None);
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't get new updates".to_string())));
let data = match resp {
Response::Success(data) => data,
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
let text = try!(String::from_utf8(data.body));
Ok(try!(json::decode::<Vec<UpdateRequest>>(&text)))
}
/// Download a specific update from the Core server.
pub fn download_update(&mut self, id: UpdateRequestId) -> Result<DownloadComplete, Error> {
let resp_rx = self.client.get(self.endpoint(&format!("/{}/download", id)), None);
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't download update".to_string())));
let data = match resp {
Response::Success(data) => data,
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
let path = try!(self.package_path(id.clone()));
let mut file = try!(File::create(&path));
let _ = io::copy(&mut &*data.body, &mut file);
Ok(DownloadComplete {
update_id: id,
update_image: path.to_string(),
signature: "".to_string()
})
}
/// Install an update using the package manager.
pub fn install_update(&mut self, id: UpdateRequestId) -> Result<UpdateReport, UpdateReport> {
let ref pacman = self.config.device.package_manager;
let path = self.package_path(id.clone()).expect("install_update expects a valid path");
pacman.install_package(&path).and_then(|(code, output)| {
let _ = fs::remove_file(&path).unwrap_or_else(|err| error!("couldn't remove installed package: {}", err));
Ok(UpdateReport::single(id.clone(), code, output))
}).or_else(|(code, output)| {
Err(UpdateReport::single(id.clone(), code, output))
})
}
/// Send a list of the currently installed packages to the Core server.
pub fn send_installed_packages(&mut self, packages: &Vec<Package>) -> Result<(), Error> {
let body = try!(json::encode(packages));
let resp_rx = self.client.put(self.endpoint("/installed"), Some(body.into_bytes()));
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't send installed packages".to_string())));
match resp {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(Error::from(data)),
Response::Error(err) => Err(err)
}
}
/// Send the outcome of a package update to the Core server.
pub fn send_update_report(&mut self, update_report: &UpdateReport) -> Result<(), Error> {
let report = DeviceReport::new(&self.config.device.uuid, update_report);
let body = try!(json::encode(&report));
let url = self.endpoint(&format!("/{}", report.device));
let resp_rx = self.client.post(url, Some(body.into_bytes()));
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't send update report".to_string())));
match resp {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(Error::from(data)),
Response::Error(err) => Err(err)
}
}
/// Send system information from the device to the Core server.
pub fn send_system_info(&mut self, body: &str) -> Result<(), Error> {
let resp_rx = self.client.put(self.endpoint("/system_info"), Some(body.as_bytes().to_vec()));
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't send system info".to_string())));
match resp {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(Error::from(data)),
Response::Error(err) => Err(err)
}
}
}
#[cfg(test)]
mod tests {
use rustc_serialize::json;
use super::*;
use datatype::{Config, Package, UpdateRequest, UpdateRequestStatus};
use http::TestClient;
#[test]
fn test_get_update_requests() {
let pending_update = UpdateRequest {
requestId: "someid".to_string(),
status: UpdateRequestStatus::Pending,
packageId: Package {
name: "fake-pkg".to_string(),
version: "0.1.1".to_string()
},
installPos: 0,
createdAt: "2010-01-01".to_string()
};
let json = format!("[{}]", json::encode(&pending_update).unwrap());
let mut sota = Sota {
config: &Config::default(),
client: &mut TestClient::from(vec![json.to_string()]),
};
let updates: Vec<UpdateRequest> = sota.get_update_requests().unwrap();
let ids: Vec<String> = updates.iter().map(|p| p.requestId.clone()).collect();
assert_eq!(ids, vec!["someid".to_string()])
}
}
Use new 'mydevice' endpoint on sota-core
use rustc_serialize::json;
use std::{fs, io};
use std::fs::File;
use std::path::PathBuf;
use datatype::{Config, DownloadComplete, Error, Package,
UpdateReport, UpdateRequest, UpdateRequestId, Url};
use http::{Client, Response};
/// Encapsulate the client configuration and HTTP client used for
/// software-over-the-air updates.
pub struct Sota<'c, 'h> {
config: &'c Config,
client: &'h Client,
}
impl<'c, 'h> Sota<'c, 'h> {
/// Creates a new instance for Sota communication.
pub fn new(config: &'c Config, client: &'h Client) -> Sota<'c, 'h> {
Sota { config: config, client: client }
}
/// Takes a path and returns a new endpoint of the format
/// `<Core server>/api/v1/mydevice/<device-id>$path`.
fn endpoint(&self, path: &str) -> Url {
let endpoint = format!("/api/v1/mydevice/{}{}", self.config.device.uuid, path);
self.config.core.server.join(&endpoint).expect("couldn't build endpoint url")
}
/// Returns the path to a package on the device.
fn package_path(&self, id: UpdateRequestId) -> Result<String, Error> {
let mut path = PathBuf::new();
path.push(&self.config.device.packages_dir);
path.push(id);
Ok(try!(path.to_str().ok_or(Error::Parse(format!("Path is not valid UTF-8: {:?}", path)))).to_string())
}
/// Query the Core server for any pending or in-flight package updates.
pub fn get_update_requests(&mut self) -> Result<Vec<UpdateRequest>, Error> {
let resp_rx = self.client.get(self.endpoint("/updates"), None);
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't get new updates".to_string())));
let data = match resp {
Response::Success(data) => data,
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
let text = try!(String::from_utf8(data.body));
Ok(try!(json::decode::<Vec<UpdateRequest>>(&text)))
}
/// Download a specific update from the Core server.
pub fn download_update(&mut self, id: UpdateRequestId) -> Result<DownloadComplete, Error> {
let resp_rx = self.client.get(self.endpoint(&format!("/updates/{}/download", id)), None);
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't download update".to_string())));
let data = match resp {
Response::Success(data) => data,
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
let path = try!(self.package_path(id.clone()));
let mut file = try!(File::create(&path));
let _ = io::copy(&mut &*data.body, &mut file);
Ok(DownloadComplete {
update_id: id,
update_image: path.to_string(),
signature: "".to_string()
})
}
/// Install an update using the package manager.
pub fn install_update(&mut self, id: UpdateRequestId) -> Result<UpdateReport, UpdateReport> {
let ref pacman = self.config.device.package_manager;
let path = self.package_path(id.clone()).expect("install_update expects a valid path");
pacman.install_package(&path).and_then(|(code, output)| {
let _ = fs::remove_file(&path).unwrap_or_else(|err| error!("couldn't remove installed package: {}", err));
Ok(UpdateReport::single(id.clone(), code, output))
}).or_else(|(code, output)| {
Err(UpdateReport::single(id.clone(), code, output))
})
}
/// Send a list of the currently installed packages to the Core server.
pub fn send_installed_packages(&mut self, packages: &Vec<Package>) -> Result<(), Error> {
let body = try!(json::encode(packages));
let resp_rx = self.client.put(self.endpoint("/installed"), Some(body.into_bytes()));
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't send installed packages".to_string())));
match resp {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(Error::from(data)),
Response::Error(err) => Err(err)
}
}
/// Send the outcome of a package update to the Core server.
pub fn send_update_report(&mut self, update_report: &UpdateReport) -> Result<(), Error> {
let body = try!(json::encode(&update_report.operation_results));
let url = self.endpoint(&format!("/updates/{}", update_report.update_id));
let resp_rx = self.client.post(url, Some(body.into_bytes()));
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't send update report".to_string())));
match resp {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(Error::from(data)),
Response::Error(err) => Err(err)
}
}
/// Send system information from the device to the Core server.
pub fn send_system_info(&mut self, body: &str) -> Result<(), Error> {
let resp_rx = self.client.put(self.endpoint("/system_info"), Some(body.as_bytes().to_vec()));
let resp = try!(resp_rx.recv().ok_or(Error::Client("couldn't send system info".to_string())));
match resp {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(Error::from(data)),
Response::Error(err) => Err(err)
}
}
}
#[cfg(test)]
mod tests {
use rustc_serialize::json;
use super::*;
use datatype::{Config, Package, UpdateRequest, UpdateRequestStatus};
use http::TestClient;
#[test]
fn test_get_update_requests() {
let pending_update = UpdateRequest {
requestId: "someid".to_string(),
status: UpdateRequestStatus::Pending,
packageId: Package {
name: "fake-pkg".to_string(),
version: "0.1.1".to_string()
},
installPos: 0,
createdAt: "2010-01-01".to_string()
};
let json = format!("[{}]", json::encode(&pending_update).unwrap());
let mut sota = Sota {
config: &Config::default(),
client: &mut TestClient::from(vec![json.to_string()]),
};
let updates: Vec<UpdateRequest> = sota.get_update_requests().unwrap();
let ids: Vec<String> = updates.iter().map(|p| p.requestId.clone()).collect();
assert_eq!(ids, vec!["someid".to_string()])
}
}
|
//! Implements the internal chess board and the move generation logic.
use std::mem::uninitialized;
use std::cell::Cell;
use basetypes::*;
use moves::*;
use notation::parse_fen;
use position::bitsets::*;
use position::IllegalPosition;
use position::tables::{BoardGeometry, ZobristArrays};
/// Holds the current position and can determine which moves are
/// legal.
///
/// In a nutshell, `Board` can generate all possible moves in the
/// current position, play a selected move, and take it back. It can
/// also play a "null move" which can be used to selectively prune the
/// search tree. `Board` does not try to be clever. In particular, it
/// is completely unaware of repeating positions, rule-50, chess
/// strategy or tactics.
#[derive(Clone)]
pub struct Board {
geometry: &'static BoardGeometry,
zobrist: &'static ZobristArrays,
/// The placement of the pieces on the board.
pieces: PiecesPlacement,
/// The side to move.
to_move: Color,
/// The castling rights for both players.
castling: CastlingRights,
/// The file on which an en-passant pawn capture is
/// possible. Values between 8 and 15 indicate that en-passant
/// capture is not possible.
en_passant_file: usize,
/// This will always be equal to `self.pieces.color[WHITE] |
/// self.pieces.color[BLACK]`
_occupied: Bitboard,
/// The square on which the king of the side to move is
/// placed. Lazily calculated, >= 64 if not calculated yet.
_king_square: Cell<Square>,
/// Lazily calculated bitboard of all checkers --
/// `BB_UNIVERSAL_SET` if not calculated yet.
_checkers: Cell<Bitboard>,
/// Lazily calculated bitboard of all pinned pieces and pawns --
/// `BB_UNIVERSAL_SET` if not calculated yet.
_pinned: Cell<Bitboard>,
}
impl Board {
/// Creates a new board instance.
///
/// This function makes expensive verification to make sure that
/// the resulting new board is legal.
pub fn create(pieces_placement: &PiecesPlacement,
to_move: Color,
castling: CastlingRights,
en_passant_square: Option<Square>)
-> Result<Board, IllegalPosition> {
let en_passant_rank = match to_move {
WHITE => RANK_6,
BLACK => RANK_3,
_ => return Err(IllegalPosition),
};
let en_passant_file = match en_passant_square {
None => NO_ENPASSANT_FILE,
Some(x) if x <= 63 && rank(x) == en_passant_rank => file(x),
_ => return Err(IllegalPosition),
};
let b = Board {
geometry: BoardGeometry::get(),
zobrist: ZobristArrays::get(),
pieces: *pieces_placement,
to_move: to_move,
castling: castling,
en_passant_file: en_passant_file,
_occupied: pieces_placement.color[WHITE] | pieces_placement.color[BLACK],
_king_square: Cell::new(64),
_checkers: Cell::new(BB_UNIVERSAL_SET),
_pinned: Cell::new(BB_UNIVERSAL_SET),
};
if b.is_legal() {
Ok(b)
} else {
Err(IllegalPosition)
}
}
/// Creates a new board instance from a FEN string.
///
/// A FEN (Forsyth–Edwards Notation) string defines a particular
/// position using only the ASCII character set. This function
/// makes expensive verification to make sure that the resulting
/// new board is legal.
pub fn from_fen(fen: &str) -> Result<Board, IllegalPosition> {
let (ref placement, to_move, castling, en_passant_square, _, _) =
try!(parse_fen(fen).map_err(|_| IllegalPosition));
Board::create(placement, to_move, castling, en_passant_square)
}
/// Returns a reference to a properly initialized `BoardGeometry`
/// object.
#[inline(always)]
pub fn geometry(&self) -> &BoardGeometry {
self.geometry
}
/// Returns a reference to a properly initialized `ZobristArrays`
/// object.
#[inline(always)]
pub fn zobrist(&self) -> &ZobristArrays {
self.zobrist
}
/// Returns a description of the placement of the pieces on the
/// board.
#[inline(always)]
pub fn pieces(&self) -> &PiecesPlacement {
&self.pieces
}
/// Returns the side to move.
#[inline(always)]
pub fn to_move(&self) -> Color {
self.to_move
}
/// Returns the castling rights.
#[inline(always)]
pub fn castling(&self) -> CastlingRights {
self.castling
}
/// Returns the file on which an en-passant pawn capture is
/// possible.
#[inline(always)]
pub fn en_passant_file(&self) -> Option<File> {
if self.en_passant_file < 8 {
Some(self.en_passant_file)
} else {
None
}
}
/// Returns a bitboard of all occupied squares.
#[inline(always)]
pub fn occupied(&self) -> Bitboard {
self._occupied
}
/// Returns the bitboard of all checkers that are attacking the
/// king.
///
/// The bitboard of all checkers is calculated the first time it
/// is needed and is saved to the `_checkers` filed, in case it is
/// needed again. If there is a saved value already, the call to
/// `checkers` is practically free.
#[inline]
pub fn checkers(&self) -> Bitboard {
if self._checkers.get() == BB_UNIVERSAL_SET {
self._checkers.set(self.attacks_to(1 ^ self.to_move, self.king_square()));
}
self._checkers.get()
}
/// Returns the bitboard of all pinned pieces and pawns of the
/// color of the side to move.
///
/// The bitboard of all pinned pieces and pawns is calculated the
/// first time it is needed and is saved to the `_pinned` filed,
/// in case it is needed again. If there is a saved value already,
/// the call to `pinned` is practically free.
#[inline]
pub fn pinned(&self) -> Bitboard {
if self._pinned.get() == BB_UNIVERSAL_SET {
self._pinned.set(self.find_pinned());
}
self._pinned.get()
}
/// Returns a bitboard of all pieces and pawns of color `us` that
/// attack `square`.
pub fn attacks_to(&self, us: Color, square: Square) -> Bitboard {
let occupied_by_us = self.pieces.color[us];
if square > 63 {
// We call "piece_attacks_from()" here many times, which for
// performance reasons do not do array boundary checks. Since
// "Board::attacks_to()" is a public function, we have to
// guarantee memory safety for all its users.
panic!("invalid square");
}
let square_bb = 1 << square;
unsafe {
let shifts: &[isize; 4] = PAWN_MOVE_SHIFTS.get_unchecked(us);
(self.geometry.piece_attacks_from(ROOK, square, self.occupied()) & occupied_by_us &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(BISHOP, square, self.occupied()) & occupied_by_us &
(self.pieces.piece_type[BISHOP] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(KNIGHT, square, self.occupied()) & occupied_by_us &
self.pieces.piece_type[KNIGHT]) |
(self.geometry.piece_attacks_from(KING, square, self.occupied()) & occupied_by_us &
self.pieces.piece_type[KING]) |
(gen_shift(square_bb, -shifts[PAWN_EAST_CAPTURE]) & occupied_by_us &
self.pieces.piece_type[PAWN] & !(BB_FILE_H | BB_RANK_1 | BB_RANK_8)) |
(gen_shift(square_bb, -shifts[PAWN_WEST_CAPTURE]) & occupied_by_us &
self.pieces.piece_type[PAWN] & !(BB_FILE_A | BB_RANK_1 | BB_RANK_8))
}
}
/// Generates pseudo-legal moves.
///
/// A pseudo-legal move is a move that is otherwise legal, except
/// it might leave the king in check. Every legal move is a
/// pseudo-legal move, but not every pseudo-legal move is legal.
/// The generated moves will be pushed to `move_stack`. When `all`
/// is `true`, all pseudo-legal moves will be generated. When
/// `all` is `false`, only captures, pawn promotions to queen, and
/// check evasions will be generated.
pub fn generate_moves(&self, all: bool, move_stack: &mut MoveStack) {
// All generated moves with pieces other than the king will be
// legal. It is possible that some of the king's moves are
// illegal because the destination square is under check, or
// when castling, king's passing square is attacked. This is
// so because verifying that these squares are not under
// attack is quite expensive, and therefore we hope that the
// alpha-beta pruning will eliminate the need for this
// verification at all.
assert!(self.is_legal());
let king_square = self.king_square();
let checkers = self.checkers();
let occupied_by_us = unsafe { *self.pieces.color.get_unchecked(self.to_move) };
let occupied_by_them = self.occupied() ^ occupied_by_us;
let generate_all_moves = all || checkers != 0;
assert!(king_square <= 63);
// When in check, for every move except king's moves, the only
// legal destination squares are those lying on the line
// between the checker and the king. Also, no piece can move
// to a square that is occupied by a friendly piece.
let legal_dests = !occupied_by_us &
match ls1b(checkers) {
0 =>
// Not in check -- every move destination may be
// considered "covering".
BB_UNIVERSAL_SET,
x if x == checkers =>
// Single check -- calculate the check covering
// destination subset (the squares between the king
// and the checker). Notice that we must OR with "x"
// itself, because knights give check not lying on a
// line with the king.
x |
unsafe {
*self.geometry
.squares_between_including
.get_unchecked(king_square)
.get_unchecked(bitscan_1bit(x))
},
_ =>
// Double check -- no covering moves.
BB_EMPTY_SET,
};
if legal_dests != 0 {
// This block is not executed when the king is in double
// check.
let pinned = self.pinned();
let pin_lines = unsafe { self.geometry.squares_at_line.get_unchecked(king_square) };
let en_passant_bb = self.en_passant_bb();
// Find queen, rook, bishop, and knight moves.
{
// Reduce the set of legal destinations when searching
// only for captures, pawn promotions to queen, and
// check evasions.
let legal_dests = if generate_all_moves {
legal_dests
} else {
assert_eq!(legal_dests, !occupied_by_us);
occupied_by_them
};
for piece in QUEEN..PAWN {
let mut bb = unsafe { *self.pieces.piece_type.get_unchecked(piece) } &
occupied_by_us;
while bb != 0 {
let orig_square = bitscan_forward_and_reset(&mut bb);
let piece_legal_dests = if 1 << orig_square & pinned == 0 {
legal_dests
} else {
// If the piece is pinned, reduce the set
// of legal destination to the squares on
// the line of the pin.
unsafe { legal_dests & *pin_lines.get_unchecked(orig_square) }
};
self.push_piece_moves_to_stack(piece,
orig_square,
piece_legal_dests,
move_stack);
}
}
}
// Find pawn moves.
{
// Reduce the set of legal destinations when searching
// only for captures, pawn promotions to queen, and
// check evasions.
let legal_dests = if generate_all_moves {
legal_dests
} else {
assert_eq!(legal_dests, !occupied_by_us);
legal_dests & (occupied_by_them | en_passant_bb | BB_PAWN_PROMOTION_RANKS)
};
// When in check, the en-passant capture can be a
// legal evasion move, but only when the checking
// piece is the passing pawn itself.
let pawn_legal_dests = if checkers & self.pieces.piece_type[PAWN] == 0 {
legal_dests
} else {
legal_dests | en_passant_bb
};
// Find all free pawn moves at once.
let all_pawns = self.pieces.piece_type[PAWN] & occupied_by_us;
let mut pinned_pawns = all_pawns & pinned;
let free_pawns = all_pawns ^ pinned_pawns;
if free_pawns != 0 {
self.push_pawn_moves_to_stack(free_pawns,
en_passant_bb,
pawn_legal_dests,
!generate_all_moves,
move_stack);
}
// Find pinned pawn moves pawn by pawn, reducing the
// set of legal destination for each pawn to the
// squares on the line of the pin.
while pinned_pawns != 0 {
let pawn_square = bitscan_forward_and_reset(&mut pinned_pawns);
let pin_line = unsafe { *pin_lines.get_unchecked(pawn_square) };
self.push_pawn_moves_to_stack(1 << pawn_square,
en_passant_bb,
pin_line & pawn_legal_dests,
!generate_all_moves,
move_stack);
}
}
}
// Find king moves (pseudo-legal, possibly moving into check
// or passing through an attacked square when castling). This
// is executed even when the king is in double check.
{
// Reduce the set of destinations when searching only for
// captures, pawn promotions to queen, and check evasions.
let king_dests = if generate_all_moves {
self.push_castling_moves_to_stack(move_stack);
!occupied_by_us
} else {
occupied_by_them
};
self.push_piece_moves_to_stack(KING, king_square, king_dests, move_stack);
}
}
/// Returns a null move.
///
/// "Null move" is a pseudo-move that changes nothing on the board
/// except the side to move. It is sometimes useful to include a
/// speculative null move in the search tree so as to achieve more
/// aggressive pruning.
#[inline]
pub fn null_move(&self) -> Move {
let king_square = self.king_square();
assert!(king_square <= 63);
Move::new(self.to_move,
MOVE_NORMAL,
KING,
king_square,
king_square,
NO_PIECE,
self.en_passant_file,
self.castling,
0)
}
/// Checks if `move_digest` represents a pseudo-legal move.
///
/// If a move `m` exists that would be generated by
/// `generate_moves` if called for the current position on the
/// board, and for that move `m.digest() == move_digest`, this
/// method will return `Some(m)`. Otherwise it will return
/// `None`. This is useful when playing moves from the
/// transposition table, without calling `generate_moves`.
pub fn try_move_digest(&self, move_digest: MoveDigest) -> Option<Move> {
// We could easily call `generate_moves` here and verify if
// some of the generated moves has the right digest, but this
// would be much slower. The whole purpose of this method is
// to be able to check if a move is pseudo-legal *without*
// generating all moves.
if move_digest == 0 {
return None;
}
let move_type = get_move_type(move_digest);
let orig_square = get_orig_square(move_digest);
let dest_square = get_dest_square(move_digest);
let promoted_piece_code = get_aux_data(move_digest);
let king_square = self.king_square();
let checkers = self.checkers();
assert!(self.to_move <= 1);
assert!(move_type <= 3);
assert!(orig_square <= 63);
assert!(dest_square <= 63);
if move_type == MOVE_CASTLING {
let side = if dest_square < orig_square {
QUEENSIDE
} else {
KINGSIDE
};
if checkers != 0 || self.castling_obstacles(side) != 0 || orig_square != king_square ||
dest_square != [[C1, C8], [G1, G8]][side][self.to_move] ||
promoted_piece_code != 0 {
return None;
}
return Some(Move::new(self.to_move,
MOVE_CASTLING,
KING,
orig_square,
dest_square,
NO_PIECE,
self.en_passant_file,
self.castling,
0));
}
// Figure out what is the moved piece.
let occupied_by_us = unsafe { *self.pieces.color.get_unchecked(self.to_move) };
let orig_square_bb = occupied_by_us & (1 << orig_square);
let dest_square_bb = 1 << dest_square;
let piece;
'pieces: loop {
for i in (KING..NO_PIECE).rev() {
if orig_square_bb & unsafe { *self.pieces.piece_type.get_unchecked(i) } != 0 {
piece = i;
break 'pieces;
}
}
return None;
}
assert!(piece <= PAWN);
// We will shrink the pseudo-legal destinations set as we go.
let mut pseudo_legal_dests = !occupied_by_us;
if piece != KING {
pseudo_legal_dests &= match ls1b(checkers) {
0 => BB_UNIVERSAL_SET,
x if x == checkers => {
// We are in check.
x |
unsafe {
*self.geometry
.squares_between_including
.get_unchecked(king_square)
.get_unchecked(bitscan_1bit(x))
}
}
// We are in double check.
_ => return None,
};
if orig_square_bb & self.pinned() != 0 {
// The piece is pinned.
pseudo_legal_dests &= unsafe {
*self.geometry
.squares_at_line
.get_unchecked(king_square)
.get_unchecked(orig_square)
}
}
};
// This is a good initial guess.
let mut captured_piece = self.get_piece_type_at(dest_square_bb);
if piece == PAWN {
let en_passant_bb = self.en_passant_bb();
if checkers & self.pieces.piece_type[PAWN] != 0 {
// If we are in check, and the checking piece is the
// passing pawn, the en-passant capture is a legal
// check evasion.
pseudo_legal_dests |= en_passant_bb;
}
let mut dest_sets: [Bitboard; 4] = unsafe { uninitialized() };
self.calc_pawn_dest_sets(orig_square_bb, en_passant_bb, &mut dest_sets);
pseudo_legal_dests &= dest_sets[PAWN_PUSH] | dest_sets[PAWN_DOUBLE_PUSH] |
dest_sets[PAWN_WEST_CAPTURE] |
dest_sets[PAWN_EAST_CAPTURE];
if pseudo_legal_dests & dest_square_bb == 0 {
return None;
}
match dest_square_bb {
x if x == en_passant_bb => {
if move_type != MOVE_ENPASSANT ||
!self.en_passant_special_check_ok(orig_square, dest_square) ||
promoted_piece_code != 0 {
return None;
}
captured_piece = PAWN;
}
x if x & BB_PAWN_PROMOTION_RANKS != 0 => {
if move_type != MOVE_PROMOTION {
return None;
}
}
_ => {
if move_type != MOVE_NORMAL || promoted_piece_code != 0 {
return None;
}
}
}
} else {
pseudo_legal_dests &= unsafe {
self.geometry.piece_attacks_from(piece, orig_square, self.occupied())
};
if move_type != MOVE_NORMAL || pseudo_legal_dests & dest_square_bb == 0 ||
promoted_piece_code != 0 {
return None;
}
}
Some(Move::new(self.to_move,
move_type,
piece,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
promoted_piece_code))
}
/// Plays a move on the board.
///
/// It verifies if the move is legal. If the move is legal, the
/// board is updated and an `u64` value is returned, which should
/// be XOR-ed with the old board's hash value to obtain the new
/// board's hash value. If the move is illegal, `None` is returned
/// without updating the board. The move passed to this method
/// **must** have been generated by `generate_moves`,
/// `try_move_digest`, or `null_move` methods for the current
/// position on the board.
///
/// Moves generated by the `null_move` method are exceptions. For
/// them `do_move(m)` will return `None` if and only if the king
/// is in check.
pub fn do_move(&mut self, m: Move) -> Option<u64> {
let us = self.to_move;
let them = 1 ^ us;
let move_type = m.move_type();
let orig_square = m.orig_square();
let dest_square = m.dest_square();
let piece = m.piece();
let captured_piece = m.captured_piece();
let mut hash = 0;
assert!(us <= 1);
assert!(piece < NO_PIECE);
assert!(move_type <= 3);
assert!(orig_square <= 63);
assert!(dest_square <= 63);
assert!({
// Check if `m` was generated by `null_move`.
m.is_null()
} ||
{
// Check if `m` was generated by `try_move_digest` or
// `generate_moves`.
let mut m1 = m;
let mut m2 = self.try_move_digest(m.digest()).unwrap();
m1.set_score(0);
m2.set_score(0);
m1 == m2
});
if piece >= NO_PIECE {
// Since "Board::do_move()" is a public function, we have
// to guarantee memory safety for all its users.
panic!("invalid piece");
}
unsafe {
// Verify if the move will leave the king in check.
if piece == KING {
if orig_square != dest_square {
if self.king_would_be_in_check(dest_square) {
return None; // the king is in check -- illegal move
}
} else {
if self.checkers() != 0 {
return None; // invalid "null move"
}
}
}
// Move the rook if the move is castling.
if move_type == MOVE_CASTLING {
if self.king_would_be_in_check((orig_square + dest_square) >> 1) {
return None; // king's passing square is attacked -- illegal move
}
let side = if dest_square > orig_square {
KINGSIDE
} else {
QUEENSIDE
};
let mask = CASTLING_ROOK_MASK[us][side];
self.pieces.piece_type[ROOK] ^= mask;
self.pieces.color[us] ^= mask;
hash ^= self.zobrist._castling_rook_movement[us][side];
}
let not_orig_bb = !(1 << orig_square);
let dest_bb = 1 << dest_square;
// empty the origin square
*self.pieces.piece_type.get_unchecked_mut(piece) &= not_orig_bb;
*self.pieces.color.get_unchecked_mut(us) &= not_orig_bb;
hash ^= *self.zobrist
.pieces
.get_unchecked(us)
.get_unchecked(piece)
.get_unchecked(orig_square);
// Remove the captured piece (if any).
if captured_piece < NO_PIECE {
let not_captured_bb = if move_type == MOVE_ENPASSANT {
let shift = PAWN_MOVE_SHIFTS.get_unchecked(them)[PAWN_PUSH];
let captured_pawn_square = (dest_square as isize + shift) as Square;
hash ^= *self.zobrist
.pieces
.get_unchecked(them)
.get_unchecked(captured_piece)
.get_unchecked(captured_pawn_square);
!(1 << captured_pawn_square)
} else {
hash ^= *self.zobrist
.pieces
.get_unchecked(them)
.get_unchecked(captured_piece)
.get_unchecked(dest_square);
!dest_bb
};
*self.pieces.piece_type.get_unchecked_mut(captured_piece) &= not_captured_bb;
*self.pieces.color.get_unchecked_mut(them) &= not_captured_bb;
}
// Occupy the destination square.
let dest_piece = if move_type == MOVE_PROMOTION {
Move::piece_from_aux_data(m.aux_data())
} else {
piece
};
*self.pieces.piece_type.get_unchecked_mut(dest_piece) |= dest_bb;
*self.pieces.color.get_unchecked_mut(us) |= dest_bb;
hash ^= *self.zobrist
.pieces
.get_unchecked(us)
.get_unchecked(dest_piece)
.get_unchecked(dest_square);
// Update castling rights (null moves do not affect castling).
if orig_square != dest_square {
hash ^= *self.zobrist.castling.get_unchecked(self.castling.value());
self.castling.update(orig_square, dest_square);
hash ^= *self.zobrist.castling.get_unchecked(self.castling.value());
}
// Update the en-passant file.
hash ^= *self.zobrist.en_passant_file.get_unchecked(self.en_passant_file);
self.en_passant_file = if piece == PAWN {
match dest_square as isize - orig_square as isize {
16 | -16 => {
let file = file(dest_square);
hash ^= *self.zobrist.en_passant_file.get_unchecked(file);
file
}
_ => NO_ENPASSANT_FILE,
}
} else {
NO_ENPASSANT_FILE
};
// Change the side to move.
self.to_move = them;
hash ^= self.zobrist.to_move;
// Update "_occupied", "_checkers", "_pinned", and
// "_king_square".
self._occupied = self.pieces.color[WHITE] | self.pieces.color[BLACK];
self._king_square.set(64);
self._checkers.set(BB_UNIVERSAL_SET);
self._pinned.set(BB_UNIVERSAL_SET);
}
assert!(self.is_legal());
Some(hash)
}
/// Takes back a previously played move.
///
/// The move passed to this method **must** be the last move passed
/// to `do_move`.
pub fn undo_move(&mut self, m: Move) {
let them = self.to_move;
let us = 1 ^ them;
let move_type = m.move_type();
let orig_square = m.orig_square();
let dest_square = m.dest_square();
let aux_data = m.aux_data();
let piece = m.piece();
let captured_piece = m.captured_piece();
assert!(them <= 1);
assert!(piece < NO_PIECE);
assert!(move_type <= 3);
assert!(orig_square <= 63);
assert!(dest_square <= 63);
assert!(aux_data <= 3);
assert!(m.en_passant_file() <= NO_ENPASSANT_FILE);
if piece >= NO_PIECE {
// Since "Board::undo_move()" is a public function, we
// have to guarantee memory safety for all its users.
panic!("invalid piece");
}
let orig_bb = 1 << orig_square;
let not_dest_bb = !(1 << dest_square);
unsafe {
// Change the side to move.
self.to_move = us;
// Restore the en-passant file.
self.en_passant_file = m.en_passant_file();
// Restore castling rights.
self.castling = m.castling();
// Empty the destination square.
let dest_piece = if move_type == MOVE_PROMOTION {
Move::piece_from_aux_data(aux_data)
} else {
piece
};
*self.pieces.piece_type.get_unchecked_mut(dest_piece) &= not_dest_bb;
*self.pieces.color.get_unchecked_mut(us) &= not_dest_bb;
// Put back the captured piece (if any).
if captured_piece < NO_PIECE {
let captured_bb = if move_type == MOVE_ENPASSANT {
let shift = PAWN_MOVE_SHIFTS.get_unchecked(them)[PAWN_PUSH];
let captured_pawn_square = (dest_square as isize + shift) as Square;
1 << captured_pawn_square
} else {
!not_dest_bb
};
*self.pieces.piece_type.get_unchecked_mut(captured_piece) |= captured_bb;
*self.pieces.color.get_unchecked_mut(them) |= captured_bb;
}
// Restore the piece on the origin square.
*self.pieces.piece_type.get_unchecked_mut(piece) |= orig_bb;
*self.pieces.color.get_unchecked_mut(us) |= orig_bb;
// Move the rook back if the move is castling.
if move_type == MOVE_CASTLING {
let side = if dest_square > orig_square {
KINGSIDE
} else {
QUEENSIDE
};
let mask = *CASTLING_ROOK_MASK.get_unchecked(us).get_unchecked(side);
self.pieces.piece_type[ROOK] ^= mask;
*self.pieces.color.get_unchecked_mut(us) ^= mask;
}
// Update "_occupied", "_checkers", "_pinned", and
// "_king_square".
self._occupied = self.pieces.color[WHITE] | self.pieces.color[BLACK];
self._king_square.set(64);
self._checkers.set(BB_UNIVERSAL_SET);
self._pinned.set(BB_UNIVERSAL_SET);
}
assert!(self.is_legal());
}
/// Calculates and returns the Zobrist hash value for the board.
///
/// This is a relatively expensive operation.
///
/// Zobrist hashing is a technique to transform a board position
/// into a number of a fixed length, with an equal distribution
/// over all possible numbers, invented by Albert Zobrist. The key
/// property of this method is that two similar positions generate
/// entirely different hash numbers.
pub fn calc_hash(&self) -> u64 {
let mut hash = 0;
for color in 0..2 {
for piece in 0..6 {
let mut bb = self.pieces.color[color] & self.pieces.piece_type[piece];
while bb != 0 {
let square = bitscan_forward_and_reset(&mut bb);
hash ^= self.zobrist.pieces[color][piece][square];
}
}
}
hash ^= self.zobrist.castling[self.castling.value()];
hash ^= self.zobrist.en_passant_file[self.en_passant_file];
if self.to_move == BLACK {
hash ^= self.zobrist.to_move;
}
hash
}
/// A helper method for `create`. It analyzes the board and
/// decides if it is a legal board.
///
/// In addition to the obviously wrong boards (that for example
/// declare some pieces having no or more than one color), there
/// are many chess boards that are impossible to create from the
/// starting chess position. Here we are interested to detect and
/// guard against only those of the cases that have a chance of
/// disturbing some of our explicit and unavoidably, implicit
/// presumptions about what a chess position is when writing the
/// code.
///
/// Invalid boards: 1. having more or less than 1 king from each
/// color; 2. having more than 8 pawns of a color; 3. having more
/// than 16 pieces (and pawns) of one color; 4. having the side
/// not to move in check; 5. having pawns on ranks 1 or 8;
/// 6. having castling rights when the king or the corresponding
/// rook is not on its initial square; 7. having an en-passant
/// square that is not having a pawn of corresponding color
/// before, and an empty square on it and behind it; 8. having an
/// en-passant square while the king would be in check if the
/// passing pawn is moved back to its original position.
fn is_legal(&self) -> bool {
if self.to_move > 1 || self.en_passant_file > NO_ENPASSANT_FILE {
return false;
}
let us = self.to_move;
let en_passant_bb = self.en_passant_bb();
let occupied = self.pieces.piece_type.into_iter().fold(0, |acc, x| {
if acc & x == 0 {
acc | x
} else {
BB_UNIVERSAL_SET
}
}); // Returns `UNIVERSAL_SET` if `self.pieces.piece_type` is messed up.
let them = 1 ^ us;
let o_us = self.pieces.color[us];
let o_them = self.pieces.color[them];
let our_king_bb = self.pieces.piece_type[KING] & o_us;
let their_king_bb = self.pieces.piece_type[KING] & o_them;
let pawns = self.pieces.piece_type[PAWN];
occupied != BB_UNIVERSAL_SET && occupied == o_us | o_them && o_us & o_them == 0 &&
pop_count(our_king_bb) == 1 && pop_count(their_king_bb) == 1 &&
pop_count(pawns & o_us) <= 8 &&
pop_count(pawns & o_them) <= 8 && pop_count(o_us) <= 16 &&
pop_count(o_them) <= 16 &&
self.attacks_to(us, bitscan_forward(their_king_bb)) == 0 &&
pawns & BB_PAWN_PROMOTION_RANKS == 0 &&
(!self.castling.can_castle(WHITE, QUEENSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[WHITE] & 1 << A1 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[WHITE] & 1 << E1 != 0)) &&
(!self.castling.can_castle(WHITE, KINGSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[WHITE] & 1 << H1 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[WHITE] & 1 << E1 != 0)) &&
(!self.castling.can_castle(BLACK, QUEENSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[BLACK] & 1 << A8 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[BLACK] & 1 << E8 != 0)) &&
(!self.castling.can_castle(BLACK, KINGSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[BLACK] & 1 << H8 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[BLACK] & 1 << E8 != 0)) &&
(en_passant_bb == 0 ||
{
let shifts: &[isize; 4] = &PAWN_MOVE_SHIFTS[them];
let dest_square_bb = gen_shift(en_passant_bb, shifts[PAWN_PUSH]);
let orig_square_bb = gen_shift(en_passant_bb, -shifts[PAWN_PUSH]);
let our_king_square = bitscan_forward(our_king_bb);
(dest_square_bb & pawns & o_them != 0) && (en_passant_bb & !occupied != 0) &&
(orig_square_bb & !occupied != 0) &&
unsafe {
let mask = orig_square_bb | dest_square_bb;
let pawns = pawns ^ mask;
let o_them = o_them ^ mask;
let occupied = occupied ^ mask;
0 ==
(self.geometry.piece_attacks_from(ROOK, our_king_square, occupied) & o_them &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(BISHOP, our_king_square, occupied) & o_them &
(self.pieces.piece_type[BISHOP] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(KNIGHT, our_king_square, occupied) & o_them &
self.pieces.piece_type[KNIGHT]) |
(gen_shift(our_king_bb, -shifts[PAWN_EAST_CAPTURE]) & o_them & pawns & !BB_FILE_H) |
(gen_shift(our_king_bb, -shifts[PAWN_WEST_CAPTURE]) & o_them & pawns & !BB_FILE_A)
}
}) &&
{
assert_eq!(self._occupied, occupied);
assert!(self._checkers.get() == BB_UNIVERSAL_SET ||
self._checkers.get() == self.attacks_to(them, bitscan_1bit(our_king_bb)));
assert!(self._pinned.get() == BB_UNIVERSAL_SET ||
self._pinned.get() == self.find_pinned());
assert!(self._king_square.get() > 63 ||
self._king_square.get() == bitscan_1bit(our_king_bb));
true
}
}
/// A helper method for `push_piece_moves_to_stack` and
/// `try_move_digest`. It calculates the pseudo-legal destination
/// squares for each pawn in `pawns` and stores them in the
/// `dest_sets` array.
///
/// `dest_sets` is indexed by the type of the pawn move: push,
/// double push, west capture, and east capture. The benefit of
/// this separation is that knowing the destination square and the
/// pawn move type (the index in the `dest_sets` array) is enough
/// to recover the origin square.
#[inline]
fn calc_pawn_dest_sets(&self,
pawns: Bitboard,
en_passant_bb: Bitboard,
dest_sets: &mut [Bitboard; 4]) {
const QUIET: [Bitboard; 4] = [BB_UNIVERSAL_SET, // push
BB_UNIVERSAL_SET, // double push
BB_EMPTY_SET, // west capture
BB_EMPTY_SET]; // east capture
const CANDIDATES: [Bitboard; 4] = [!(BB_RANK_1 | BB_RANK_8),
BB_RANK_2 | BB_RANK_7,
!(BB_FILE_A | BB_RANK_1 | BB_RANK_8),
!(BB_FILE_H | BB_RANK_1 | BB_RANK_8)];
unsafe {
let shifts: &[isize; 4] = PAWN_MOVE_SHIFTS.get_unchecked(self.to_move);
let not_occupied_by_us = !*self.pieces.color.get_unchecked(self.to_move);
let capture_targets = *self.pieces.color.get_unchecked(1 ^ self.to_move) |
en_passant_bb;
for i in 0..4 {
*dest_sets.get_unchecked_mut(i) = gen_shift(pawns & *CANDIDATES.get_unchecked(i),
*shifts.get_unchecked(i)) &
(capture_targets ^ *QUIET.get_unchecked(i)) &
not_occupied_by_us;
}
// Double pushes are trickier.
dest_sets[PAWN_DOUBLE_PUSH] &= gen_shift(dest_sets[PAWN_PUSH], shifts[PAWN_PUSH]);
}
}
/// A helper method for `generate_moves`. It finds all squares
/// attacked by `piece` from square `orig_square`, and for each
/// square that is within the `legal_dests` set pushes a new move
/// to `move_stack`. `piece` must not be a pawn.
#[inline]
fn push_piece_moves_to_stack(&self,
piece: PieceType,
orig_square: Square,
legal_dests: Bitboard,
move_stack: &mut MoveStack) {
assert!(piece < PAWN);
assert!(orig_square <= 63);
let mut piece_dests = unsafe {
self.geometry.piece_attacks_from(piece, orig_square, self.occupied())
} & legal_dests;
while piece_dests != 0 {
let dest_square = bitscan_forward_and_reset(&mut piece_dests);
let captured_piece = self.get_piece_type_at(1 << dest_square);
move_stack.push(Move::new(self.to_move,
MOVE_NORMAL,
piece,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
0));
}
}
/// A helper method for `generate_moves()`. It finds all
/// pseudo-legal moves by the set of pawns given by `pawns`,
/// making sure that all destination squares are within the
/// `legal_dests` set. Then it pushes the moves to `move_stack`.
#[inline]
fn push_pawn_moves_to_stack(&self,
pawns: Bitboard,
en_passant_bb: Bitboard,
legal_dests: Bitboard,
only_queen_promotions: bool,
move_stack: &mut MoveStack) {
let mut dest_sets: [Bitboard; 4] = unsafe { uninitialized() };
self.calc_pawn_dest_sets(pawns, en_passant_bb, &mut dest_sets);
// Make sure all destination squares in all sets are legal.
dest_sets[PAWN_DOUBLE_PUSH] &= legal_dests;
dest_sets[PAWN_PUSH] &= legal_dests;
dest_sets[PAWN_WEST_CAPTURE] &= legal_dests;
dest_sets[PAWN_EAST_CAPTURE] &= legal_dests;
// Scan each destination set (push, double push, west capture,
// east capture). For each move calculate the origin and
// destination squares, and determine the move type
// (en-passant capture, pawn promotion, or a normal move).
let shifts: &[isize; 4] = unsafe { PAWN_MOVE_SHIFTS.get_unchecked(self.to_move) };
for i in 0..4 {
let s = unsafe { dest_sets.get_unchecked_mut(i) };
while *s != 0 {
let dest_square = bitscan_forward_and_reset(s);
let dest_square_bb = 1 << dest_square;
let orig_square = (dest_square as isize -
unsafe {
*shifts.get_unchecked(i)
}) as Square;
let captured_piece = self.get_piece_type_at(dest_square_bb);
match dest_square_bb {
// en-passant capture
x if x == en_passant_bb => {
if self.en_passant_special_check_ok(orig_square, dest_square) {
move_stack.push(Move::new(self.to_move,
MOVE_ENPASSANT,
PAWN,
orig_square,
dest_square,
PAWN,
self.en_passant_file,
self.castling,
0));
}
}
// pawn promotion
x if x & BB_PAWN_PROMOTION_RANKS != 0 => {
for p in 0..4 {
move_stack.push(Move::new(self.to_move,
MOVE_PROMOTION,
PAWN,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
p));
if only_queen_promotions {
break;
}
}
}
// normal pawn move (push or plain capture)
_ => {
move_stack.push(Move::new(self.to_move,
MOVE_NORMAL,
PAWN,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
0));
}
}
}
}
}
/// A helper method for `generate_moves`. It figures out which
/// castling moves are pseudo-legal and pushes them to
/// `move_stack`.
#[inline(always)]
fn push_castling_moves_to_stack(&self, move_stack: &mut MoveStack) {
if self.checkers() == 0 {
for side in 0..2 {
if self.castling_obstacles(side) == 0 {
// It seems that castling is legal unless king's
// passing or final squares are attacked, but we
// do not care about that, because this will be
// verified in "do_move()".
move_stack.push(Move::new(self.to_move,
MOVE_CASTLING,
KING,
self.king_square(),
unsafe {
*[[C1, C8], [G1, G8]]
.get_unchecked(side)
.get_unchecked(self.to_move)
},
NO_PIECE,
self.en_passant_file,
self.castling,
0));
}
}
}
}
/// A helper method for `generate_moves`. It returns all pinned
/// pieces belonging to the side to move.
#[inline(always)]
fn find_pinned(&self) -> Bitboard {
let king_square = self.king_square();
let occupied_by_them = unsafe { *self.pieces.color.get_unchecked(1 ^ self.to_move) };
assert!(king_square <= 63);
// To find all potential pinners, we remove all our pieces
// from the board, and all enemy pieces that can not slide in
// the particular manner (diagonally or straight). Then we
// calculate what enemy pieces a bishop or a rook placed on
// our king's square can attack. The attacked enemy pieces are
// the potential pinners.
let diag_sliders = occupied_by_them &
(self.pieces.piece_type[QUEEN] | self.pieces.piece_type[BISHOP]);
let straight_sliders = occupied_by_them &
(self.pieces.piece_type[QUEEN] | self.pieces.piece_type[ROOK]);
let mut pinners = unsafe {
diag_sliders & self.geometry.piece_attacks_from(BISHOP, king_square, diag_sliders) |
straight_sliders & self.geometry.piece_attacks_from(ROOK, king_square, straight_sliders)
};
if pinners == 0 {
0
} else {
let occupied_by_us = unsafe { *self.pieces.color.get_unchecked(self.to_move) };
let between_king_square_and = unsafe {
self.geometry
.squares_between_including
.get_unchecked(king_square)
};
let blockers = occupied_by_us & !(1 << king_square) | (occupied_by_them & !pinners);
let mut pinned_or_discovered_checkers = 0;
// Scan all potential pinners and see if there is one and only
// one piece between the pinner and our king.
while pinners != 0 {
let pinner_square = bitscan_forward_and_reset(&mut pinners);
let blockers_group = unsafe {
between_king_square_and.get_unchecked(pinner_square)
} & blockers;
if ls1b(blockers_group) == blockers_group {
// A group of blockers consisting of only one
// piece is either a pinned piece of ours or
// enemy's discovered checker.
pinned_or_discovered_checkers |= blockers_group;
}
}
pinned_or_discovered_checkers & occupied_by_us
}
}
/// A helper method for `generate_moves`. It returns a bitboard
/// representing the en-passant target square if there is one.
#[inline]
fn en_passant_bb(&self) -> Bitboard {
assert!(self.en_passant_file <= NO_ENPASSANT_FILE);
if self.en_passant_file >= NO_ENPASSANT_FILE {
0
} else if self.to_move == WHITE {
1 << self.en_passant_file << 40
} else {
1 << self.en_passant_file << 16
}
}
/// A helper method. It returns the square that the king of the
/// side to move occupies. The value is lazily calculated and
/// saved for future use.
#[inline]
fn king_square(&self) -> Square {
if self._king_square.get() > 63 {
self._king_square
.set(bitscan_1bit(self.pieces.piece_type[KING] &
unsafe { *self.pieces.color.get_unchecked(self.to_move) }));
}
self._king_square.get()
}
/// A helper method for `do_move`. It returns if the king of the
/// side to move would be in check if moved to `square`.
#[inline]
fn king_would_be_in_check(&self, square: Square) -> bool {
let them = 1 ^ self.to_move;
let occupied = self.occupied() & !(1 << self.king_square());
assert!(them <= 1);
assert!(square <= 63);
unsafe {
let occupied_by_them = *self.pieces.color.get_unchecked(them);
(self.geometry.piece_attacks_from(ROOK, square, occupied) & occupied_by_them &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN])) != 0 ||
(self.geometry.piece_attacks_from(BISHOP, square, occupied) & occupied_by_them &
(self.pieces.piece_type[BISHOP] | self.pieces.piece_type[QUEEN])) != 0 ||
(self.geometry.piece_attacks_from(KNIGHT, square, occupied) & occupied_by_them &
self.pieces.piece_type[KNIGHT]) != 0 ||
(self.geometry.piece_attacks_from(KING, square, occupied) & occupied_by_them &
self.pieces.piece_type[KING]) != 0 ||
{
let shifts: &[isize; 4] = PAWN_MOVE_SHIFTS.get_unchecked(them);
let square_bb = 1 << square;
(gen_shift(square_bb, -shifts[PAWN_EAST_CAPTURE]) & occupied_by_them &
self.pieces.piece_type[PAWN] &
!(BB_FILE_H | BB_RANK_1 | BB_RANK_8)) != 0 ||
(gen_shift(square_bb, -shifts[PAWN_WEST_CAPTURE]) & occupied_by_them &
self.pieces.piece_type[PAWN] &
!(BB_FILE_A | BB_RANK_1 | BB_RANK_8)) != 0
}
}
}
/// A helper method. It returns the type of the piece at the
/// square represented by the bitboard `square_bb`.
#[inline(always)]
fn get_piece_type_at(&self, square_bb: Bitboard) -> PieceType {
assert!(square_bb != 0);
assert_eq!(square_bb, ls1b(square_bb));
let bb = square_bb & self.occupied();
if bb == 0 {
return NO_PIECE;
}
for i in (KING..NO_PIECE).rev() {
if bb & unsafe { *self.pieces.piece_type.get_unchecked(i) } != 0 {
return i;
}
}
panic!("invalid board");
}
/// A helper method for `push_pawn_moves_to_stack`. It tests for
/// the special case when an en-passant capture discovers check on
/// 4/5-th rank.
///
/// This method tests for the very rare occasion when the two
/// pawns participating in en-passant capture, disappearing in one
/// move, discover an unexpected check along the horizontal (rank
/// 4 of 5). `orig_square` and `dist_square` are the origin square
/// and the destination square of the capturing pawn.
fn en_passant_special_check_ok(&self, orig_square: Square, dest_square: Square) -> bool {
let king_square = self.king_square();
if (1 << king_square) & [BB_RANK_5, BB_RANK_4][self.to_move] == 0 {
// The king is not on the 4/5-th rank -- we are done.
true
} else {
// The king is on the 4/5-th rank -- we have more work to do.
let the_two_pawns = 1 << orig_square |
gen_shift(1,
dest_square as isize -
PAWN_MOVE_SHIFTS[self.to_move][PAWN_PUSH]);
let occupied = self.occupied() & !the_two_pawns;
let occupied_by_them = self.pieces.color[1 ^ self.to_move] & !the_two_pawns;
let checkers = unsafe {
self.geometry.piece_attacks_from(ROOK, king_square, occupied)
} & occupied_by_them &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN]);
checkers == 0
}
}
/// A helper method. It returns a bitboard with the set of pieces
/// between the king and the castling rook.
#[inline]
fn castling_obstacles(&self, side: CastlingSide) -> Bitboard {
assert!(side <= 1);
const BETWEEN: [[Bitboard; 2]; 2] = [[1 << B1 | 1 << C1 | 1 << D1, 1 << F1 | 1 << G1],
[1 << B8 | 1 << C8 | 1 << D8, 1 << F8 | 1 << G8]];
if self.castling.can_castle(self.to_move, side) {
self.occupied() & unsafe { *BETWEEN.get_unchecked(self.to_move).get_unchecked(side) }
} else {
// Castling is not possible, therefore every piece on
// every square on the board can be considered an
// obstacle.
BB_UNIVERSAL_SET
}
}
}
// Pawn move types:
// ================
/// Pawn push.
const PAWN_PUSH: usize = 0;
/// Double pawn push.
const PAWN_DOUBLE_PUSH: usize = 1;
/// Pawn capture toward the queen-side.
const PAWN_WEST_CAPTURE: usize = 2;
/// Pawn capture toward the king-side.
const PAWN_EAST_CAPTURE: usize = 3;
/// Pawn move shifts (one for each color and pawn move type).
///
/// Example: The bitboard for a white pawn on "e2" is `1 << E2`. If
/// the pawn is pushed one square forward, the updated bitboard would
/// be: `gen_shift(1 << E2, PAWN_MOVE_SHIFTS[WHITE][PAWN_PUSH])`
static PAWN_MOVE_SHIFTS: [[isize; 4]; 2] = [[8, 16, 7, 9], [-8, -16, -9, -7]];
/// Indicates that en-passant capture is not possible.
const NO_ENPASSANT_FILE: usize = 8;
/// Bitboards that describe how the castling rook moves during the
/// castling move.
const CASTLING_ROOK_MASK: [[Bitboard; 2]; 2] = [[1 << A1 | 1 << D1, 1 << H1 | 1 << F1],
[1 << A8 | 1 << D8, 1 << H8 | 1 << F8]];
#[cfg(test)]
mod tests {
use super::*;
use basetypes::*;
use moves::*;
#[test]
fn test_attacks_from() {
use position::tables::*;
let b = Board::from_fen("k7/8/8/8/3P4/8/8/7K w - - 0 1").ok().unwrap();
let g = BoardGeometry::get();
unsafe {
assert_eq!(g.piece_attacks_from(BISHOP,
A1,
b.pieces.color[WHITE] | b.pieces.color[BLACK]),
1 << B2 | 1 << C3 | 1 << D4);
assert_eq!(g.piece_attacks_from(BISHOP,
A1,
b.pieces.color[WHITE] | b.pieces.color[BLACK]),
1 << B2 | 1 << C3 | 1 << D4);
assert_eq!(g.piece_attacks_from(KNIGHT,
A1,
b.pieces.color[WHITE] | b.pieces.color[BLACK]),
1 << B3 | 1 << C2);
}
}
#[test]
fn test_attacks_to() {
let b = Board::from_fen("8/8/8/3K1p1P/r4k2/3Pq1N1/7p/1B5Q w - - 0 1").ok().unwrap();
assert_eq!(b.attacks_to(WHITE, E4),
1 << D3 | 1 << G3 | 1 << D5 | 1 << H1);
assert_eq!(b.attacks_to(BLACK, E4),
1 << E3 | 1 << F4 | 1 << F5 | 1 << A4);
assert_eq!(b.attacks_to(BLACK, G6), 0);
assert_eq!(b.attacks_to(WHITE, G6), 1 << H5);
assert_eq!(b.attacks_to(WHITE, C2), 1 << B1);
assert_eq!(b.attacks_to(WHITE, F4), 0);
assert_eq!(b.attacks_to(BLACK, F4), 1 << A4 | 1 << E3);
assert_eq!(b.attacks_to(BLACK, F5), 1 << F4);
assert_eq!(b.attacks_to(WHITE, A6), 0);
assert_eq!(b.attacks_to(BLACK, G1), 1 << H2 | 1 << E3);
assert_eq!(b.attacks_to(BLACK, A1), 1 << A4);
}
#[test]
fn test_piece_type_constants_constraints() {
assert_eq!(KING, 0);
assert_eq!(QUEEN, 1);
assert_eq!(ROOK, 2);
assert_eq!(BISHOP, 3);
assert_eq!(KNIGHT, 4);
assert_eq!(PAWN, 5);
}
#[test]
fn test_pawn_dest_sets() {
let mut stack = MoveStack::new();
let b = Board::from_fen("k2q4/4Ppp1/5P2/6Pp/6P1/8/7P/7K w - h6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let mut pawn_dests = 0u64;
while let Some(m) = stack.pop() {
if m.piece() == PAWN {
pawn_dests |= 1 << m.dest_square();
}
}
assert_eq!(pawn_dests,
1 << H3 | 1 << H4 | 1 << G6 | 1 << E8 | 1 << H5 | 1 << G7 | 1 << H6 | 1 << D8);
let b = Board::from_fen("k2q4/4Ppp1/5P2/6Pp/6P1/8/7P/7K b - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let mut pawn_dests = 0u64;
while let Some(m) = stack.pop() {
if m.piece() == PAWN {
pawn_dests |= 1 << m.dest_square();
}
}
assert_eq!(pawn_dests, 1 << H4 | 1 << G6 | 1 << G4 | 1 << F6);
}
#[test]
fn test_move_generation_1() {
let mut stack = MoveStack::new();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/4K3 w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/6K1 w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
let b = Board::from_fen("8/8/6NK/2pP4/3PR3/2b1q3/3P4/7k w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 8);
stack.clear();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/7K w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 22);
stack.clear();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/7K w - c6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 23);
stack.clear();
let b = Board::from_fen("K7/8/6N1/2pP4/3PR3/2b1q3/3P4/7k b - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 25);
stack.clear();
let b = Board::from_fen("K7/8/6N1/2pP4/3PR2k/2b1q3/3P4/8 b - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
}
#[test]
fn test_move_generation_2() {
let mut stack = MoveStack::new();
assert!(Board::from_fen("8/8/7k/8/4pP2/8/3B4/7K b - f3 0 1").is_err());
assert!(Board::from_fen("8/8/8/8/4pP2/8/3B4/7K b - f3 0 1").is_err());
assert!(Board::from_fen("8/8/8/4k3/4pP2/8/3B4/7K b - f3 0 1").is_ok());
let b = Board::from_fen("8/8/8/7k/5pP1/8/8/5R1K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 6);
stack.clear();
let b = Board::from_fen("8/8/8/5k2/5pP1/8/8/5R1K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
let b = Board::from_fen("8/8/8/8/4pP1k/8/8/4B2K b - f3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
}
#[test]
fn test_move_generation_3() {
let mut stack = MoveStack::new();
let b = Board::from_fen("8/8/8/8/4RpPk/8/8/7K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 6);
stack.clear();
}
#[test]
fn test_move_generation_4() {
let mut stack = MoveStack::new();
let b = Board::from_fen("8/8/8/8/3QPpPk/8/8/7K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
}
#[test]
fn test_move_generation_5() {
let mut stack = MoveStack::new();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 5);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R w K - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 6);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 7);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R b KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 5);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R b KQk - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 6);
stack.clear();
let b = Board::from_fen("4k3/8/8/8/8/5n2/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
let mut b = Board::from_fen("4k3/8/8/8/8/6n1/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let mut count = 0;
while let Some(m) = stack.pop() {
if b.do_move(m).is_some() {
count += 1;
b.undo_move(m);
}
}
assert_eq!(count, 19 + 4);
let b = Board::from_fen("4k3/8/8/8/8/4n3/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 7);
stack.clear();
let b = Board::from_fen("4k3/8/8/8/8/4n3/8/R3K2R w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 5);
stack.clear();
let b = Board::from_fen("4k3/8/1b6/8/8/8/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 7);
stack.clear();
}
#[test]
fn test_do_undo_move() {
let mut stack = MoveStack::new();
let mut b = Board::from_fen("b3k2r/6P1/8/5pP1/8/8/6P1/R3K2R w kKQ f6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let count = stack.count();
while let Some(m) = stack.pop() {
if let Some(h) = b.do_move(m) {
assert!(h != 0);
b.undo_move(m);
let mut other_stack = MoveStack::new();
b.generate_moves(true, &mut other_stack);
assert_eq!(count, other_stack.count());
}
}
assert_eq!(stack.count(), 0);
let mut b = Board::from_fen("b3k2r/6P1/8/5pP1/8/8/8/R3K2R b kKQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let count = stack.count();
while let Some(m) = stack.pop() {
if b.do_move(m).is_some() {
b.undo_move(m);
let mut other_stack = MoveStack::new();
b.generate_moves(true, &mut other_stack);
assert_eq!(count, other_stack.count());
}
}
}
#[test]
fn test_find_pinned() {
use basetypes::*;
let b = Board::from_fen("k2r4/3r4/3N4/5n2/qp1K2Pq/8/3PPR2/6b1 w - - 0 1").ok().unwrap();
assert_eq!(b.find_pinned(), 1 << F2 | 1 << D6 | 1 << G4);
}
#[test]
fn test_generate_only_captures() {
let mut stack = MoveStack::new();
let b = Board::from_fen("k6r/P7/8/6p1/6pP/8/8/7K b - h3 0 1").ok().unwrap();
b.generate_moves(false, &mut stack);
assert_eq!(stack.count(), 4);
stack.clear();
let b = Board::from_fen("k7/8/8/4Pp2/4K3/8/8/8 w - f6 0 1").ok().unwrap();
b.generate_moves(false, &mut stack);
assert_eq!(stack.count(), 8);
stack.clear();
let b = Board::from_fen("k7/8/8/4Pb2/4K3/8/8/8 w - - 0 1").ok().unwrap();
b.generate_moves(false, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
}
#[test]
fn test_null_move() {
let mut stack = MoveStack::new();
let mut b = Board::from_fen("k7/8/8/5Pp1/8/8/8/4K2R w K g6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let count = stack.count();
stack.clear();
let m = b.null_move();
assert!(b.do_move(m).is_some());
b.undo_move(m);
b.generate_moves(true, &mut stack);
assert_eq!(count, stack.count());
stack.clear();
let mut b = Board::from_fen("k7/4r3/8/8/8/8/8/4K3 w - - 0 1").ok().unwrap();
let m = b.null_move();
assert!(b.do_move(m).is_none());
}
#[test]
fn test_move_into_check_bug() {
let mut stack = MoveStack::new();
let mut b = Board::from_fen("rnbq1bn1/pppP3k/8/3P2B1/2B5/5N2/PPPN1PP1/2K4R b - - 0 1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
let m = stack.pop().unwrap();
b.do_move(m);
assert!(b.is_legal());
}
#[test]
fn test_try_move_digest() {
fn try_all(b: &Board, stack: &MoveStack) {
let mut i = 0;
loop {
if let Some(m) = b.try_move_digest(i) {
assert!(stack.iter().find(|x| **x == m).is_some());
}
if i == 0xffff {
break;
} else {
i += 1;
}
}
}
let mut stack = MoveStack::new();
let b = Board::from_fen("rnbqk2r/p1p1pppp/8/8/2Pp4/5NP1/pP1PPPBP/RNBQK2R b KQkq c3 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rnbqk2r/p1p1pppp/8/8/Q1Pp4/5NP1/pP1PPPBP/RNB1K2R b KQkq - 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rnbqk2r/p1p1pppp/3N4/8/Q1Pp4/6P1/pP1PPPBP/RNB1K2R b KQkq - 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rnbq3r/p1p1pppp/8/3k4/2Pp4/5NP1/pP1PPPBP/RNBQK2R b KQ c3 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rn1qk2r/p1pbpppp/8/8/Q1Pp4/5NP1/pP1PPPBP/RNB1K2R b KQkq - 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("8/8/8/8/4RpPk/8/8/7K b - g3 0 1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("8/8/8/8/5pPk/8/8/7K b - g3 0 1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
}
}
fix comments
//! Implements the internal chess board and the move generation logic.
use std::mem::uninitialized;
use std::cell::Cell;
use basetypes::*;
use moves::*;
use notation::parse_fen;
use position::bitsets::*;
use position::IllegalPosition;
use position::tables::{BoardGeometry, ZobristArrays};
/// Holds the current position and can determine which moves are
/// legal.
///
/// In a nutshell, `Board` can generate all possible moves in the
/// current position, play a selected move, and take it back. It can
/// also play a "null move" which can be used to selectively prune the
/// search tree. `Board` does not try to be clever. In particular, it
/// is completely unaware of repeating positions, rule-50, chess
/// strategy or tactics.
#[derive(Clone)]
pub struct Board {
geometry: &'static BoardGeometry,
zobrist: &'static ZobristArrays,
/// The placement of the pieces on the board.
pieces: PiecesPlacement,
/// The side to move.
to_move: Color,
/// The castling rights for both players.
castling: CastlingRights,
/// The file on which an en-passant pawn capture is
/// possible. Values between 8 and 15 indicate that en-passant
/// capture is not possible.
en_passant_file: usize,
/// This will always be equal to `self.pieces.color[WHITE] |
/// self.pieces.color[BLACK]`
_occupied: Bitboard,
/// The square on which the king of the side to move is
/// placed. Lazily calculated, >= 64 if not calculated yet.
_king_square: Cell<Square>,
/// Lazily calculated bitboard of all checkers --
/// `BB_UNIVERSAL_SET` if not calculated yet.
_checkers: Cell<Bitboard>,
/// Lazily calculated bitboard of all pinned pieces and pawns --
/// `BB_UNIVERSAL_SET` if not calculated yet.
_pinned: Cell<Bitboard>,
}
impl Board {
/// Creates a new board instance.
///
/// This function makes expensive verification to make sure that
/// the resulting new board is legal.
pub fn create(pieces_placement: &PiecesPlacement,
to_move: Color,
castling: CastlingRights,
en_passant_square: Option<Square>)
-> Result<Board, IllegalPosition> {
let en_passant_rank = match to_move {
WHITE => RANK_6,
BLACK => RANK_3,
_ => return Err(IllegalPosition),
};
let en_passant_file = match en_passant_square {
None => NO_ENPASSANT_FILE,
Some(x) if x <= 63 && rank(x) == en_passant_rank => file(x),
_ => return Err(IllegalPosition),
};
let b = Board {
geometry: BoardGeometry::get(),
zobrist: ZobristArrays::get(),
pieces: *pieces_placement,
to_move: to_move,
castling: castling,
en_passant_file: en_passant_file,
_occupied: pieces_placement.color[WHITE] | pieces_placement.color[BLACK],
_king_square: Cell::new(64),
_checkers: Cell::new(BB_UNIVERSAL_SET),
_pinned: Cell::new(BB_UNIVERSAL_SET),
};
if b.is_legal() {
Ok(b)
} else {
Err(IllegalPosition)
}
}
/// Creates a new board instance from a FEN string.
///
/// A FEN (Forsyth–Edwards Notation) string defines a particular
/// position using only the ASCII character set. This function
/// makes expensive verification to make sure that the resulting
/// new board is legal.
pub fn from_fen(fen: &str) -> Result<Board, IllegalPosition> {
let (ref placement, to_move, castling, en_passant_square, _, _) =
try!(parse_fen(fen).map_err(|_| IllegalPosition));
Board::create(placement, to_move, castling, en_passant_square)
}
/// Returns a reference to a properly initialized `BoardGeometry`
/// object.
#[inline(always)]
pub fn geometry(&self) -> &BoardGeometry {
self.geometry
}
/// Returns a reference to a properly initialized `ZobristArrays`
/// object.
#[inline(always)]
pub fn zobrist(&self) -> &ZobristArrays {
self.zobrist
}
/// Returns a description of the placement of the pieces on the
/// board.
#[inline(always)]
pub fn pieces(&self) -> &PiecesPlacement {
&self.pieces
}
/// Returns the side to move.
#[inline(always)]
pub fn to_move(&self) -> Color {
self.to_move
}
/// Returns the castling rights.
#[inline(always)]
pub fn castling(&self) -> CastlingRights {
self.castling
}
/// Returns the file on which an en-passant pawn capture is
/// possible.
#[inline(always)]
pub fn en_passant_file(&self) -> Option<File> {
if self.en_passant_file < 8 {
Some(self.en_passant_file)
} else {
None
}
}
/// Returns a bitboard of all occupied squares.
#[inline(always)]
pub fn occupied(&self) -> Bitboard {
self._occupied
}
/// Returns the bitboard of all checkers that are attacking the
/// king.
///
/// The bitboard of all checkers is calculated the first time it
/// is needed and is saved to the `_checkers` filed, in case it is
/// needed again. If there is a saved value already, the call to
/// `checkers` is practically free.
#[inline]
pub fn checkers(&self) -> Bitboard {
if self._checkers.get() == BB_UNIVERSAL_SET {
self._checkers.set(self.attacks_to(1 ^ self.to_move, self.king_square()));
}
self._checkers.get()
}
/// Returns the bitboard of all pinned pieces and pawns of the
/// color of the side to move.
///
/// The bitboard of all pinned pieces and pawns is calculated the
/// first time it is needed and is saved to the `_pinned` filed,
/// in case it is needed again. If there is a saved value already,
/// the call to `pinned` is practically free.
#[inline]
pub fn pinned(&self) -> Bitboard {
if self._pinned.get() == BB_UNIVERSAL_SET {
self._pinned.set(self.find_pinned());
}
self._pinned.get()
}
/// Returns a bitboard of all pieces and pawns of color `us` that
/// attack `square`.
pub fn attacks_to(&self, us: Color, square: Square) -> Bitboard {
let occupied_by_us = self.pieces.color[us];
if square > 63 {
// We call "piece_attacks_from()" here many times, which for
// performance reasons do not do array boundary checks. Since
// "Board::attacks_to()" is a public function, we have to
// guarantee memory safety for all its users.
panic!("invalid square");
}
let square_bb = 1 << square;
unsafe {
let shifts: &[isize; 4] = PAWN_MOVE_SHIFTS.get_unchecked(us);
(self.geometry.piece_attacks_from(ROOK, square, self.occupied()) & occupied_by_us &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(BISHOP, square, self.occupied()) & occupied_by_us &
(self.pieces.piece_type[BISHOP] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(KNIGHT, square, self.occupied()) & occupied_by_us &
self.pieces.piece_type[KNIGHT]) |
(self.geometry.piece_attacks_from(KING, square, self.occupied()) & occupied_by_us &
self.pieces.piece_type[KING]) |
(gen_shift(square_bb, -shifts[PAWN_EAST_CAPTURE]) & occupied_by_us &
self.pieces.piece_type[PAWN] & !(BB_FILE_H | BB_RANK_1 | BB_RANK_8)) |
(gen_shift(square_bb, -shifts[PAWN_WEST_CAPTURE]) & occupied_by_us &
self.pieces.piece_type[PAWN] & !(BB_FILE_A | BB_RANK_1 | BB_RANK_8))
}
}
/// Generates pseudo-legal moves.
///
/// A pseudo-legal move is a move that is otherwise legal, except
/// it might leave the king in check. Every legal move is a
/// pseudo-legal move, but not every pseudo-legal move is legal.
/// The generated moves will be pushed to `move_stack`. When `all`
/// is `true`, all pseudo-legal moves will be generated. When
/// `all` is `false`, only captures, pawn promotions to queen, and
/// check evasions will be generated.
pub fn generate_moves(&self, all: bool, move_stack: &mut MoveStack) {
// All generated moves with pieces other than the king will be
// legal. It is possible that some of the king's moves are
// illegal because the destination square is under check, or
// when castling, king's passing square is attacked. This is
// so because verifying that these squares are not under
// attack is quite expensive, and therefore we hope that the
// alpha-beta pruning will eliminate the need for this
// verification at all.
assert!(self.is_legal());
let king_square = self.king_square();
let checkers = self.checkers();
let occupied_by_us = unsafe { *self.pieces.color.get_unchecked(self.to_move) };
let occupied_by_them = self.occupied() ^ occupied_by_us;
let generate_all_moves = all || checkers != 0;
assert!(king_square <= 63);
// When in check, for every move except king's moves, the only
// legal destination squares are those lying on the line
// between the checker and the king. Also, no piece can move
// to a square that is occupied by a friendly piece.
let legal_dests = !occupied_by_us &
match ls1b(checkers) {
0 =>
// Not in check -- every move destination may be
// considered "covering".
BB_UNIVERSAL_SET,
x if x == checkers =>
// Single check -- calculate the check covering
// destination subset (the squares between the king
// and the checker). Notice that we must OR with "x"
// itself, because knights give check not lying on a
// line with the king.
x |
unsafe {
*self.geometry
.squares_between_including
.get_unchecked(king_square)
.get_unchecked(bitscan_1bit(x))
},
_ =>
// Double check -- no covering moves.
BB_EMPTY_SET,
};
if legal_dests != 0 {
// This block is not executed when the king is in double
// check.
let pinned = self.pinned();
let pin_lines = unsafe { self.geometry.squares_at_line.get_unchecked(king_square) };
let en_passant_bb = self.en_passant_bb();
// Find queen, rook, bishop, and knight moves.
{
// Reduce the set of legal destinations when searching
// only for captures, pawn promotions to queen, and
// check evasions.
let legal_dests = if generate_all_moves {
legal_dests
} else {
assert_eq!(legal_dests, !occupied_by_us);
occupied_by_them
};
for piece in QUEEN..PAWN {
let mut bb = unsafe { *self.pieces.piece_type.get_unchecked(piece) } &
occupied_by_us;
while bb != 0 {
let orig_square = bitscan_forward_and_reset(&mut bb);
let piece_legal_dests = if 1 << orig_square & pinned == 0 {
legal_dests
} else {
// If the piece is pinned, reduce the set
// of legal destination to the squares on
// the line of the pin.
unsafe { legal_dests & *pin_lines.get_unchecked(orig_square) }
};
self.push_piece_moves_to_stack(piece,
orig_square,
piece_legal_dests,
move_stack);
}
}
}
// Find pawn moves.
{
// Reduce the set of legal destinations when searching
// only for captures, pawn promotions to queen, and
// check evasions.
let legal_dests = if generate_all_moves {
legal_dests
} else {
assert_eq!(legal_dests, !occupied_by_us);
legal_dests & (occupied_by_them | en_passant_bb | BB_PAWN_PROMOTION_RANKS)
};
// When in check, the en-passant capture can be a
// legal evasion move, but only when the checking
// piece is the passing pawn itself.
let pawn_legal_dests = if checkers & self.pieces.piece_type[PAWN] == 0 {
legal_dests
} else {
legal_dests | en_passant_bb
};
// Find all free pawn moves at once.
let all_pawns = self.pieces.piece_type[PAWN] & occupied_by_us;
let mut pinned_pawns = all_pawns & pinned;
let free_pawns = all_pawns ^ pinned_pawns;
if free_pawns != 0 {
self.push_pawn_moves_to_stack(free_pawns,
en_passant_bb,
pawn_legal_dests,
!generate_all_moves,
move_stack);
}
// Find pinned pawn moves pawn by pawn, reducing the
// set of legal destination for each pawn to the
// squares on the line of the pin.
while pinned_pawns != 0 {
let pawn_square = bitscan_forward_and_reset(&mut pinned_pawns);
let pin_line = unsafe { *pin_lines.get_unchecked(pawn_square) };
self.push_pawn_moves_to_stack(1 << pawn_square,
en_passant_bb,
pin_line & pawn_legal_dests,
!generate_all_moves,
move_stack);
}
}
}
// Find king moves (pseudo-legal, possibly moving into check
// or passing through an attacked square when castling). This
// is executed even when the king is in double check.
{
// Reduce the set of destinations when searching only for
// captures, pawn promotions to queen, and check evasions.
let king_dests = if generate_all_moves {
self.push_castling_moves_to_stack(move_stack);
!occupied_by_us
} else {
occupied_by_them
};
self.push_piece_moves_to_stack(KING, king_square, king_dests, move_stack);
}
}
/// Returns a null move.
///
/// "Null move" is a pseudo-move that changes nothing on the board
/// except the side to move. It is sometimes useful to include a
/// speculative null move in the search tree so as to achieve more
/// aggressive pruning.
#[inline]
pub fn null_move(&self) -> Move {
let king_square = self.king_square();
assert!(king_square <= 63);
Move::new(self.to_move,
MOVE_NORMAL,
KING,
king_square,
king_square,
NO_PIECE,
self.en_passant_file,
self.castling,
0)
}
/// Checks if `move_digest` represents a pseudo-legal move.
///
/// If a move `m` exists that would be generated by
/// `generate_moves` if called for the current position on the
/// board, and for that move `m.digest() == move_digest`, this
/// method will return `Some(m)`. Otherwise it will return
/// `None`. This is useful when playing moves from the
/// transposition table, without calling `generate_moves`.
pub fn try_move_digest(&self, move_digest: MoveDigest) -> Option<Move> {
// We could easily call `generate_moves` here and verify if
// some of the generated moves has the right digest, but this
// would be much slower. The whole purpose of this method is
// to be able to check if a move is pseudo-legal *without*
// generating all moves.
if move_digest == 0 {
return None;
}
let move_type = get_move_type(move_digest);
let orig_square = get_orig_square(move_digest);
let dest_square = get_dest_square(move_digest);
let promoted_piece_code = get_aux_data(move_digest);
let king_square = self.king_square();
let checkers = self.checkers();
assert!(self.to_move <= 1);
assert!(move_type <= 3);
assert!(orig_square <= 63);
assert!(dest_square <= 63);
if move_type == MOVE_CASTLING {
let side = if dest_square < orig_square {
QUEENSIDE
} else {
KINGSIDE
};
if checkers != 0 || self.castling_obstacles(side) != 0 || orig_square != king_square ||
dest_square != [[C1, C8], [G1, G8]][side][self.to_move] ||
promoted_piece_code != 0 {
return None;
}
return Some(Move::new(self.to_move,
MOVE_CASTLING,
KING,
orig_square,
dest_square,
NO_PIECE,
self.en_passant_file,
self.castling,
0));
}
// Figure out what is the moved piece.
let occupied_by_us = unsafe { *self.pieces.color.get_unchecked(self.to_move) };
let orig_square_bb = occupied_by_us & (1 << orig_square);
let dest_square_bb = 1 << dest_square;
let piece;
'pieces: loop {
for i in (KING..NO_PIECE).rev() {
if orig_square_bb & unsafe { *self.pieces.piece_type.get_unchecked(i) } != 0 {
piece = i;
break 'pieces;
}
}
return None;
}
assert!(piece <= PAWN);
// We will shrink the pseudo-legal destinations set as we go.
let mut pseudo_legal_dests = !occupied_by_us;
if piece != KING {
pseudo_legal_dests &= match ls1b(checkers) {
0 => BB_UNIVERSAL_SET,
x if x == checkers => {
// We are in check.
x |
unsafe {
*self.geometry
.squares_between_including
.get_unchecked(king_square)
.get_unchecked(bitscan_1bit(x))
}
}
// We are in double check.
_ => return None,
};
if orig_square_bb & self.pinned() != 0 {
// The piece is pinned.
pseudo_legal_dests &= unsafe {
*self.geometry
.squares_at_line
.get_unchecked(king_square)
.get_unchecked(orig_square)
}
}
};
// This is a good initial guess.
let mut captured_piece = self.get_piece_type_at(dest_square_bb);
if piece == PAWN {
let en_passant_bb = self.en_passant_bb();
if checkers & self.pieces.piece_type[PAWN] != 0 {
// If we are in check, and the checking piece is the
// passing pawn, the en-passant capture is a legal
// check evasion.
pseudo_legal_dests |= en_passant_bb;
}
let mut dest_sets: [Bitboard; 4] = unsafe { uninitialized() };
self.calc_pawn_dest_sets(orig_square_bb, en_passant_bb, &mut dest_sets);
pseudo_legal_dests &= dest_sets[PAWN_PUSH] | dest_sets[PAWN_DOUBLE_PUSH] |
dest_sets[PAWN_WEST_CAPTURE] |
dest_sets[PAWN_EAST_CAPTURE];
if pseudo_legal_dests & dest_square_bb == 0 {
return None;
}
match dest_square_bb {
x if x == en_passant_bb => {
if move_type != MOVE_ENPASSANT ||
!self.en_passant_special_check_ok(orig_square, dest_square) ||
promoted_piece_code != 0 {
return None;
}
captured_piece = PAWN;
}
x if x & BB_PAWN_PROMOTION_RANKS != 0 => {
if move_type != MOVE_PROMOTION {
return None;
}
}
_ => {
if move_type != MOVE_NORMAL || promoted_piece_code != 0 {
return None;
}
}
}
} else {
pseudo_legal_dests &= unsafe {
self.geometry.piece_attacks_from(piece, orig_square, self.occupied())
};
if move_type != MOVE_NORMAL || pseudo_legal_dests & dest_square_bb == 0 ||
promoted_piece_code != 0 {
return None;
}
}
Some(Move::new(self.to_move,
move_type,
piece,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
promoted_piece_code))
}
/// Plays a move on the board.
///
/// It verifies if the move is legal. If the move is legal, the
/// board is updated and an `u64` value is returned, which should
/// be XOR-ed with the old board's hash value to obtain the new
/// board's hash value. If the move is illegal, `None` is returned
/// without updating the board. The move passed to this method
/// **must** have been generated by `generate_moves`,
/// `try_move_digest`, or `null_move` methods for the current
/// position on the board.
///
/// Moves generated by the `null_move` method are exceptions. For
/// them `do_move(m)` will return `None` if and only if the king
/// is in check.
pub fn do_move(&mut self, m: Move) -> Option<u64> {
let us = self.to_move;
let them = 1 ^ us;
let move_type = m.move_type();
let orig_square = m.orig_square();
let dest_square = m.dest_square();
let piece = m.piece();
let captured_piece = m.captured_piece();
let mut hash = 0;
assert!(us <= 1);
assert!(piece < NO_PIECE);
assert!(move_type <= 3);
assert!(orig_square <= 63);
assert!(dest_square <= 63);
assert!({
// Check if `m` was generated by `null_move`.
m.is_null()
} ||
{
// Check if `m` was generated by `try_move_digest` or
// `generate_moves`.
let mut m1 = m;
let mut m2 = self.try_move_digest(m.digest()).unwrap();
m1.set_score(0);
m2.set_score(0);
m1 == m2
});
if piece >= NO_PIECE {
// Since "Board::do_move()" is a public function, we have
// to guarantee memory safety for all its users.
panic!("invalid piece");
}
unsafe {
// Verify if the move will leave the king in check.
if piece == KING {
if orig_square != dest_square {
if self.king_would_be_in_check(dest_square) {
return None; // the king is in check -- illegal move
}
} else {
if self.checkers() != 0 {
return None; // invalid "null move"
}
}
}
// Move the rook if the move is castling.
if move_type == MOVE_CASTLING {
if self.king_would_be_in_check((orig_square + dest_square) >> 1) {
return None; // king's passing square is attacked -- illegal move
}
let side = if dest_square > orig_square {
KINGSIDE
} else {
QUEENSIDE
};
let mask = CASTLING_ROOK_MASK[us][side];
self.pieces.piece_type[ROOK] ^= mask;
self.pieces.color[us] ^= mask;
hash ^= self.zobrist._castling_rook_movement[us][side];
}
let not_orig_bb = !(1 << orig_square);
let dest_bb = 1 << dest_square;
// empty the origin square
*self.pieces.piece_type.get_unchecked_mut(piece) &= not_orig_bb;
*self.pieces.color.get_unchecked_mut(us) &= not_orig_bb;
hash ^= *self.zobrist
.pieces
.get_unchecked(us)
.get_unchecked(piece)
.get_unchecked(orig_square);
// Remove the captured piece (if any).
if captured_piece < NO_PIECE {
let not_captured_bb = if move_type == MOVE_ENPASSANT {
let shift = PAWN_MOVE_SHIFTS.get_unchecked(them)[PAWN_PUSH];
let captured_pawn_square = (dest_square as isize + shift) as Square;
hash ^= *self.zobrist
.pieces
.get_unchecked(them)
.get_unchecked(captured_piece)
.get_unchecked(captured_pawn_square);
!(1 << captured_pawn_square)
} else {
hash ^= *self.zobrist
.pieces
.get_unchecked(them)
.get_unchecked(captured_piece)
.get_unchecked(dest_square);
!dest_bb
};
*self.pieces.piece_type.get_unchecked_mut(captured_piece) &= not_captured_bb;
*self.pieces.color.get_unchecked_mut(them) &= not_captured_bb;
}
// Occupy the destination square.
let dest_piece = if move_type == MOVE_PROMOTION {
Move::piece_from_aux_data(m.aux_data())
} else {
piece
};
*self.pieces.piece_type.get_unchecked_mut(dest_piece) |= dest_bb;
*self.pieces.color.get_unchecked_mut(us) |= dest_bb;
hash ^= *self.zobrist
.pieces
.get_unchecked(us)
.get_unchecked(dest_piece)
.get_unchecked(dest_square);
// Update castling rights (null moves do not affect castling).
if orig_square != dest_square {
hash ^= *self.zobrist.castling.get_unchecked(self.castling.value());
self.castling.update(orig_square, dest_square);
hash ^= *self.zobrist.castling.get_unchecked(self.castling.value());
}
// Update the en-passant file.
hash ^= *self.zobrist.en_passant_file.get_unchecked(self.en_passant_file);
self.en_passant_file = if piece == PAWN {
match dest_square as isize - orig_square as isize {
16 | -16 => {
let file = file(dest_square);
hash ^= *self.zobrist.en_passant_file.get_unchecked(file);
file
}
_ => NO_ENPASSANT_FILE,
}
} else {
NO_ENPASSANT_FILE
};
// Change the side to move.
self.to_move = them;
hash ^= self.zobrist.to_move;
// Update the auxiliary fields.
self._occupied = self.pieces.color[WHITE] | self.pieces.color[BLACK];
self._king_square.set(64);
self._checkers.set(BB_UNIVERSAL_SET);
self._pinned.set(BB_UNIVERSAL_SET);
}
assert!(self.is_legal());
Some(hash)
}
/// Takes back a previously played move.
///
/// The move passed to this method **must** be the last move passed
/// to `do_move`.
pub fn undo_move(&mut self, m: Move) {
// In this method we basically do the same things that we do
// in `do_move`, but in reverse.
let them = self.to_move;
let us = 1 ^ them;
let move_type = m.move_type();
let orig_square = m.orig_square();
let dest_square = m.dest_square();
let aux_data = m.aux_data();
let piece = m.piece();
let captured_piece = m.captured_piece();
assert!(them <= 1);
assert!(piece < NO_PIECE);
assert!(move_type <= 3);
assert!(orig_square <= 63);
assert!(dest_square <= 63);
assert!(aux_data <= 3);
assert!(m.en_passant_file() <= NO_ENPASSANT_FILE);
if piece >= NO_PIECE {
// Since "Board::undo_move()" is a public function, we
// have to guarantee memory safety for all its users.
panic!("invalid piece");
}
let orig_bb = 1 << orig_square;
let not_dest_bb = !(1 << dest_square);
unsafe {
// Change the side to move.
self.to_move = us;
// Restore the en-passant file.
self.en_passant_file = m.en_passant_file();
// Restore castling rights.
self.castling = m.castling();
// Empty the destination square.
let dest_piece = if move_type == MOVE_PROMOTION {
Move::piece_from_aux_data(aux_data)
} else {
piece
};
*self.pieces.piece_type.get_unchecked_mut(dest_piece) &= not_dest_bb;
*self.pieces.color.get_unchecked_mut(us) &= not_dest_bb;
// Put back the captured piece (if any).
if captured_piece < NO_PIECE {
let captured_bb = if move_type == MOVE_ENPASSANT {
let shift = PAWN_MOVE_SHIFTS.get_unchecked(them)[PAWN_PUSH];
let captured_pawn_square = (dest_square as isize + shift) as Square;
1 << captured_pawn_square
} else {
!not_dest_bb
};
*self.pieces.piece_type.get_unchecked_mut(captured_piece) |= captured_bb;
*self.pieces.color.get_unchecked_mut(them) |= captured_bb;
}
// Restore the piece on the origin square.
*self.pieces.piece_type.get_unchecked_mut(piece) |= orig_bb;
*self.pieces.color.get_unchecked_mut(us) |= orig_bb;
// Move the rook back if the move is castling.
if move_type == MOVE_CASTLING {
let side = if dest_square > orig_square {
KINGSIDE
} else {
QUEENSIDE
};
let mask = *CASTLING_ROOK_MASK.get_unchecked(us).get_unchecked(side);
self.pieces.piece_type[ROOK] ^= mask;
*self.pieces.color.get_unchecked_mut(us) ^= mask;
}
// Update the auxiliary fields.
self._occupied = self.pieces.color[WHITE] | self.pieces.color[BLACK];
self._king_square.set(64);
self._checkers.set(BB_UNIVERSAL_SET);
self._pinned.set(BB_UNIVERSAL_SET);
}
assert!(self.is_legal());
}
/// Calculates and returns the Zobrist hash value for the board.
///
/// This is a relatively expensive operation.
///
/// Zobrist hashing is a technique to transform a board position
/// into a number of a fixed length, with an equal distribution
/// over all possible numbers, invented by Albert Zobrist. The key
/// property of this method is that two similar positions generate
/// entirely different hash numbers.
pub fn calc_hash(&self) -> u64 {
let mut hash = 0;
for color in 0..2 {
for piece in 0..6 {
let mut bb = self.pieces.color[color] & self.pieces.piece_type[piece];
while bb != 0 {
let square = bitscan_forward_and_reset(&mut bb);
hash ^= self.zobrist.pieces[color][piece][square];
}
}
}
hash ^= self.zobrist.castling[self.castling.value()];
hash ^= self.zobrist.en_passant_file[self.en_passant_file];
if self.to_move == BLACK {
hash ^= self.zobrist.to_move;
}
hash
}
/// A helper method for `create`. It analyzes the board and
/// decides if it is a legal board.
///
/// In addition to the obviously wrong boards (that for example
/// declare some pieces having no or more than one color), there
/// are many chess boards that are impossible to create from the
/// starting chess position. Here we are interested to detect and
/// guard against only those of the cases that have a chance of
/// disturbing some of our explicit and unavoidably, implicit
/// presumptions about what a chess position is when writing the
/// code.
///
/// Invalid boards: 1. having more or less than 1 king from each
/// color; 2. having more than 8 pawns of a color; 3. having more
/// than 16 pieces (and pawns) of one color; 4. having the side
/// not to move in check; 5. having pawns on ranks 1 or 8;
/// 6. having castling rights when the king or the corresponding
/// rook is not on its initial square; 7. having an en-passant
/// square that is not having a pawn of corresponding color
/// before, and an empty square on it and behind it; 8. having an
/// en-passant square while the king would be in check if the
/// passing pawn is moved back to its original position.
fn is_legal(&self) -> bool {
if self.to_move > 1 || self.en_passant_file > NO_ENPASSANT_FILE {
return false;
}
let us = self.to_move;
let en_passant_bb = self.en_passant_bb();
let occupied = self.pieces.piece_type.into_iter().fold(0, |acc, x| {
if acc & x == 0 {
acc | x
} else {
BB_UNIVERSAL_SET
}
}); // Returns `UNIVERSAL_SET` if `self.pieces.piece_type` is messed up.
let them = 1 ^ us;
let o_us = self.pieces.color[us];
let o_them = self.pieces.color[them];
let our_king_bb = self.pieces.piece_type[KING] & o_us;
let their_king_bb = self.pieces.piece_type[KING] & o_them;
let pawns = self.pieces.piece_type[PAWN];
occupied != BB_UNIVERSAL_SET && occupied == o_us | o_them && o_us & o_them == 0 &&
pop_count(our_king_bb) == 1 && pop_count(their_king_bb) == 1 &&
pop_count(pawns & o_us) <= 8 &&
pop_count(pawns & o_them) <= 8 && pop_count(o_us) <= 16 &&
pop_count(o_them) <= 16 &&
self.attacks_to(us, bitscan_forward(their_king_bb)) == 0 &&
pawns & BB_PAWN_PROMOTION_RANKS == 0 &&
(!self.castling.can_castle(WHITE, QUEENSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[WHITE] & 1 << A1 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[WHITE] & 1 << E1 != 0)) &&
(!self.castling.can_castle(WHITE, KINGSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[WHITE] & 1 << H1 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[WHITE] & 1 << E1 != 0)) &&
(!self.castling.can_castle(BLACK, QUEENSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[BLACK] & 1 << A8 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[BLACK] & 1 << E8 != 0)) &&
(!self.castling.can_castle(BLACK, KINGSIDE) ||
(self.pieces.piece_type[ROOK] & self.pieces.color[BLACK] & 1 << H8 != 0) &&
(self.pieces.piece_type[KING] & self.pieces.color[BLACK] & 1 << E8 != 0)) &&
(en_passant_bb == 0 ||
{
let shifts: &[isize; 4] = &PAWN_MOVE_SHIFTS[them];
let dest_square_bb = gen_shift(en_passant_bb, shifts[PAWN_PUSH]);
let orig_square_bb = gen_shift(en_passant_bb, -shifts[PAWN_PUSH]);
let our_king_square = bitscan_forward(our_king_bb);
(dest_square_bb & pawns & o_them != 0) && (en_passant_bb & !occupied != 0) &&
(orig_square_bb & !occupied != 0) &&
unsafe {
let mask = orig_square_bb | dest_square_bb;
let pawns = pawns ^ mask;
let o_them = o_them ^ mask;
let occupied = occupied ^ mask;
0 ==
(self.geometry.piece_attacks_from(ROOK, our_king_square, occupied) & o_them &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(BISHOP, our_king_square, occupied) & o_them &
(self.pieces.piece_type[BISHOP] | self.pieces.piece_type[QUEEN])) |
(self.geometry.piece_attacks_from(KNIGHT, our_king_square, occupied) & o_them &
self.pieces.piece_type[KNIGHT]) |
(gen_shift(our_king_bb, -shifts[PAWN_EAST_CAPTURE]) & o_them & pawns & !BB_FILE_H) |
(gen_shift(our_king_bb, -shifts[PAWN_WEST_CAPTURE]) & o_them & pawns & !BB_FILE_A)
}
}) &&
{
assert_eq!(self._occupied, occupied);
assert!(self._checkers.get() == BB_UNIVERSAL_SET ||
self._checkers.get() == self.attacks_to(them, bitscan_1bit(our_king_bb)));
assert!(self._pinned.get() == BB_UNIVERSAL_SET ||
self._pinned.get() == self.find_pinned());
assert!(self._king_square.get() > 63 ||
self._king_square.get() == bitscan_1bit(our_king_bb));
true
}
}
/// A helper method for `push_piece_moves_to_stack` and
/// `try_move_digest`. It calculates the pseudo-legal destination
/// squares for each pawn in `pawns` and stores them in the
/// `dest_sets` array.
///
/// `dest_sets` is indexed by the type of the pawn move: push,
/// double push, west capture, and east capture. The benefit of
/// this separation is that knowing the destination square and the
/// pawn move type (the index in the `dest_sets` array) is enough
/// to recover the origin square.
#[inline]
fn calc_pawn_dest_sets(&self,
pawns: Bitboard,
en_passant_bb: Bitboard,
dest_sets: &mut [Bitboard; 4]) {
const QUIET: [Bitboard; 4] = [BB_UNIVERSAL_SET, // push
BB_UNIVERSAL_SET, // double push
BB_EMPTY_SET, // west capture
BB_EMPTY_SET]; // east capture
const CANDIDATES: [Bitboard; 4] = [!(BB_RANK_1 | BB_RANK_8),
BB_RANK_2 | BB_RANK_7,
!(BB_FILE_A | BB_RANK_1 | BB_RANK_8),
!(BB_FILE_H | BB_RANK_1 | BB_RANK_8)];
unsafe {
let shifts: &[isize; 4] = PAWN_MOVE_SHIFTS.get_unchecked(self.to_move);
let not_occupied_by_us = !*self.pieces.color.get_unchecked(self.to_move);
let capture_targets = *self.pieces.color.get_unchecked(1 ^ self.to_move) |
en_passant_bb;
for i in 0..4 {
*dest_sets.get_unchecked_mut(i) = gen_shift(pawns & *CANDIDATES.get_unchecked(i),
*shifts.get_unchecked(i)) &
(capture_targets ^ *QUIET.get_unchecked(i)) &
not_occupied_by_us;
}
// Double pushes are trickier.
dest_sets[PAWN_DOUBLE_PUSH] &= gen_shift(dest_sets[PAWN_PUSH], shifts[PAWN_PUSH]);
}
}
/// A helper method for `generate_moves`. It finds all squares
/// attacked by `piece` from square `orig_square`, and for each
/// square that is within the `legal_dests` set pushes a new move
/// to `move_stack`. `piece` must not be a pawn.
#[inline]
fn push_piece_moves_to_stack(&self,
piece: PieceType,
orig_square: Square,
legal_dests: Bitboard,
move_stack: &mut MoveStack) {
assert!(piece < PAWN);
assert!(orig_square <= 63);
let mut piece_dests = unsafe {
self.geometry.piece_attacks_from(piece, orig_square, self.occupied())
} & legal_dests;
while piece_dests != 0 {
let dest_square = bitscan_forward_and_reset(&mut piece_dests);
let captured_piece = self.get_piece_type_at(1 << dest_square);
move_stack.push(Move::new(self.to_move,
MOVE_NORMAL,
piece,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
0));
}
}
/// A helper method for `generate_moves()`. It finds all
/// pseudo-legal moves by the set of pawns given by `pawns`,
/// making sure that all destination squares are within the
/// `legal_dests` set. Then it pushes the moves to `move_stack`.
#[inline]
fn push_pawn_moves_to_stack(&self,
pawns: Bitboard,
en_passant_bb: Bitboard,
legal_dests: Bitboard,
only_queen_promotions: bool,
move_stack: &mut MoveStack) {
let mut dest_sets: [Bitboard; 4] = unsafe { uninitialized() };
self.calc_pawn_dest_sets(pawns, en_passant_bb, &mut dest_sets);
// Make sure all destination squares in all sets are legal.
dest_sets[PAWN_DOUBLE_PUSH] &= legal_dests;
dest_sets[PAWN_PUSH] &= legal_dests;
dest_sets[PAWN_WEST_CAPTURE] &= legal_dests;
dest_sets[PAWN_EAST_CAPTURE] &= legal_dests;
// Scan each destination set (push, double push, west capture,
// east capture). For each move calculate the origin and
// destination squares, and determine the move type
// (en-passant capture, pawn promotion, or a normal move).
let shifts: &[isize; 4] = unsafe { PAWN_MOVE_SHIFTS.get_unchecked(self.to_move) };
for i in 0..4 {
let s = unsafe { dest_sets.get_unchecked_mut(i) };
while *s != 0 {
let dest_square = bitscan_forward_and_reset(s);
let dest_square_bb = 1 << dest_square;
let orig_square = (dest_square as isize -
unsafe {
*shifts.get_unchecked(i)
}) as Square;
let captured_piece = self.get_piece_type_at(dest_square_bb);
match dest_square_bb {
// en-passant capture
x if x == en_passant_bb => {
if self.en_passant_special_check_ok(orig_square, dest_square) {
move_stack.push(Move::new(self.to_move,
MOVE_ENPASSANT,
PAWN,
orig_square,
dest_square,
PAWN,
self.en_passant_file,
self.castling,
0));
}
}
// pawn promotion
x if x & BB_PAWN_PROMOTION_RANKS != 0 => {
for p in 0..4 {
move_stack.push(Move::new(self.to_move,
MOVE_PROMOTION,
PAWN,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
p));
if only_queen_promotions {
break;
}
}
}
// normal pawn move (push or plain capture)
_ => {
move_stack.push(Move::new(self.to_move,
MOVE_NORMAL,
PAWN,
orig_square,
dest_square,
captured_piece,
self.en_passant_file,
self.castling,
0));
}
}
}
}
}
/// A helper method for `generate_moves`. It figures out which
/// castling moves are pseudo-legal and pushes them to
/// `move_stack`.
#[inline(always)]
fn push_castling_moves_to_stack(&self, move_stack: &mut MoveStack) {
if self.checkers() == 0 {
for side in 0..2 {
if self.castling_obstacles(side) == 0 {
// It seems that castling is legal unless king's
// passing or final squares are attacked, but we
// do not care about that, because this will be
// verified in "do_move()".
move_stack.push(Move::new(self.to_move,
MOVE_CASTLING,
KING,
self.king_square(),
unsafe {
*[[C1, C8], [G1, G8]]
.get_unchecked(side)
.get_unchecked(self.to_move)
},
NO_PIECE,
self.en_passant_file,
self.castling,
0));
}
}
}
}
/// A helper method for `generate_moves`. It returns all pinned
/// pieces belonging to the side to move.
#[inline(always)]
fn find_pinned(&self) -> Bitboard {
let king_square = self.king_square();
let occupied_by_them = unsafe { *self.pieces.color.get_unchecked(1 ^ self.to_move) };
assert!(king_square <= 63);
// To find all potential pinners, we remove all our pieces
// from the board, and all enemy pieces that can not slide in
// the particular manner (diagonally or straight). Then we
// calculate what enemy pieces a bishop or a rook placed on
// our king's square can attack. The attacked enemy pieces are
// the potential pinners.
let diag_sliders = occupied_by_them &
(self.pieces.piece_type[QUEEN] | self.pieces.piece_type[BISHOP]);
let straight_sliders = occupied_by_them &
(self.pieces.piece_type[QUEEN] | self.pieces.piece_type[ROOK]);
let mut pinners = unsafe {
diag_sliders & self.geometry.piece_attacks_from(BISHOP, king_square, diag_sliders) |
straight_sliders & self.geometry.piece_attacks_from(ROOK, king_square, straight_sliders)
};
if pinners == 0 {
0
} else {
let occupied_by_us = unsafe { *self.pieces.color.get_unchecked(self.to_move) };
let between_king_square_and = unsafe {
self.geometry
.squares_between_including
.get_unchecked(king_square)
};
let blockers = occupied_by_us & !(1 << king_square) | (occupied_by_them & !pinners);
let mut pinned_or_discovered_checkers = 0;
// Scan all potential pinners and see if there is one and only
// one piece between the pinner and our king.
while pinners != 0 {
let pinner_square = bitscan_forward_and_reset(&mut pinners);
let blockers_group = unsafe {
between_king_square_and.get_unchecked(pinner_square)
} & blockers;
if ls1b(blockers_group) == blockers_group {
// A group of blockers consisting of only one
// piece is either a pinned piece of ours or
// enemy's discovered checker.
pinned_or_discovered_checkers |= blockers_group;
}
}
pinned_or_discovered_checkers & occupied_by_us
}
}
/// A helper method for `generate_moves`. It returns a bitboard
/// representing the en-passant target square if there is one.
#[inline]
fn en_passant_bb(&self) -> Bitboard {
assert!(self.en_passant_file <= NO_ENPASSANT_FILE);
if self.en_passant_file >= NO_ENPASSANT_FILE {
0
} else if self.to_move == WHITE {
1 << self.en_passant_file << 40
} else {
1 << self.en_passant_file << 16
}
}
/// A helper method. It returns the square that the king of the
/// side to move occupies. The value is lazily calculated and
/// saved for future use.
#[inline]
fn king_square(&self) -> Square {
if self._king_square.get() > 63 {
self._king_square
.set(bitscan_1bit(self.pieces.piece_type[KING] &
unsafe { *self.pieces.color.get_unchecked(self.to_move) }));
}
self._king_square.get()
}
/// A helper method for `do_move`. It returns if the king of the
/// side to move would be in check if moved to `square`.
#[inline]
fn king_would_be_in_check(&self, square: Square) -> bool {
let them = 1 ^ self.to_move;
let occupied = self.occupied() & !(1 << self.king_square());
assert!(them <= 1);
assert!(square <= 63);
unsafe {
let occupied_by_them = *self.pieces.color.get_unchecked(them);
(self.geometry.piece_attacks_from(ROOK, square, occupied) & occupied_by_them &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN])) != 0 ||
(self.geometry.piece_attacks_from(BISHOP, square, occupied) & occupied_by_them &
(self.pieces.piece_type[BISHOP] | self.pieces.piece_type[QUEEN])) != 0 ||
(self.geometry.piece_attacks_from(KNIGHT, square, occupied) & occupied_by_them &
self.pieces.piece_type[KNIGHT]) != 0 ||
(self.geometry.piece_attacks_from(KING, square, occupied) & occupied_by_them &
self.pieces.piece_type[KING]) != 0 ||
{
let shifts: &[isize; 4] = PAWN_MOVE_SHIFTS.get_unchecked(them);
let square_bb = 1 << square;
(gen_shift(square_bb, -shifts[PAWN_EAST_CAPTURE]) & occupied_by_them &
self.pieces.piece_type[PAWN] &
!(BB_FILE_H | BB_RANK_1 | BB_RANK_8)) != 0 ||
(gen_shift(square_bb, -shifts[PAWN_WEST_CAPTURE]) & occupied_by_them &
self.pieces.piece_type[PAWN] &
!(BB_FILE_A | BB_RANK_1 | BB_RANK_8)) != 0
}
}
}
/// A helper method. It returns the type of the piece at the
/// square represented by the bitboard `square_bb`.
#[inline(always)]
fn get_piece_type_at(&self, square_bb: Bitboard) -> PieceType {
assert!(square_bb != 0);
assert_eq!(square_bb, ls1b(square_bb));
let bb = square_bb & self.occupied();
if bb == 0 {
return NO_PIECE;
}
for i in (KING..NO_PIECE).rev() {
if bb & unsafe { *self.pieces.piece_type.get_unchecked(i) } != 0 {
return i;
}
}
panic!("invalid board");
}
/// A helper method for `push_pawn_moves_to_stack`. It tests for
/// the special case when an en-passant capture discovers check on
/// 4/5-th rank.
///
/// This method tests for the very rare occasion when the two
/// pawns participating in en-passant capture, disappearing in one
/// move, discover an unexpected check along the horizontal (rank
/// 4 of 5). `orig_square` and `dist_square` are the origin square
/// and the destination square of the capturing pawn.
fn en_passant_special_check_ok(&self, orig_square: Square, dest_square: Square) -> bool {
let king_square = self.king_square();
if (1 << king_square) & [BB_RANK_5, BB_RANK_4][self.to_move] == 0 {
// The king is not on the 4/5-th rank -- we are done.
true
} else {
// The king is on the 4/5-th rank -- we have more work to do.
let the_two_pawns = 1 << orig_square |
gen_shift(1,
dest_square as isize -
PAWN_MOVE_SHIFTS[self.to_move][PAWN_PUSH]);
let occupied = self.occupied() & !the_two_pawns;
let occupied_by_them = self.pieces.color[1 ^ self.to_move] & !the_two_pawns;
let checkers = unsafe {
self.geometry.piece_attacks_from(ROOK, king_square, occupied)
} & occupied_by_them &
(self.pieces.piece_type[ROOK] | self.pieces.piece_type[QUEEN]);
checkers == 0
}
}
/// A helper method. It returns a bitboard with the set of pieces
/// between the king and the castling rook.
#[inline]
fn castling_obstacles(&self, side: CastlingSide) -> Bitboard {
assert!(side <= 1);
const BETWEEN: [[Bitboard; 2]; 2] = [[1 << B1 | 1 << C1 | 1 << D1, 1 << F1 | 1 << G1],
[1 << B8 | 1 << C8 | 1 << D8, 1 << F8 | 1 << G8]];
if self.castling.can_castle(self.to_move, side) {
self.occupied() & unsafe { *BETWEEN.get_unchecked(self.to_move).get_unchecked(side) }
} else {
// Castling is not possible, therefore every piece on
// every square on the board can be considered an
// obstacle.
BB_UNIVERSAL_SET
}
}
}
// Pawn move types:
// ================
/// Pawn push.
const PAWN_PUSH: usize = 0;
/// Double pawn push.
const PAWN_DOUBLE_PUSH: usize = 1;
/// Pawn capture toward the queen-side.
const PAWN_WEST_CAPTURE: usize = 2;
/// Pawn capture toward the king-side.
const PAWN_EAST_CAPTURE: usize = 3;
/// Pawn move shifts (one for each color and pawn move type).
///
/// Example: The bitboard for a white pawn on "e2" is `1 << E2`. If
/// the pawn is pushed one square forward, the updated bitboard would
/// be: `gen_shift(1 << E2, PAWN_MOVE_SHIFTS[WHITE][PAWN_PUSH])`
static PAWN_MOVE_SHIFTS: [[isize; 4]; 2] = [[8, 16, 7, 9], [-8, -16, -9, -7]];
/// Indicates that en-passant capture is not possible.
const NO_ENPASSANT_FILE: usize = 8;
/// Bitboards that describe how the castling rook moves during the
/// castling move.
const CASTLING_ROOK_MASK: [[Bitboard; 2]; 2] = [[1 << A1 | 1 << D1, 1 << H1 | 1 << F1],
[1 << A8 | 1 << D8, 1 << H8 | 1 << F8]];
#[cfg(test)]
mod tests {
use super::*;
use basetypes::*;
use moves::*;
#[test]
fn test_attacks_from() {
use position::tables::*;
let b = Board::from_fen("k7/8/8/8/3P4/8/8/7K w - - 0 1").ok().unwrap();
let g = BoardGeometry::get();
unsafe {
assert_eq!(g.piece_attacks_from(BISHOP,
A1,
b.pieces.color[WHITE] | b.pieces.color[BLACK]),
1 << B2 | 1 << C3 | 1 << D4);
assert_eq!(g.piece_attacks_from(BISHOP,
A1,
b.pieces.color[WHITE] | b.pieces.color[BLACK]),
1 << B2 | 1 << C3 | 1 << D4);
assert_eq!(g.piece_attacks_from(KNIGHT,
A1,
b.pieces.color[WHITE] | b.pieces.color[BLACK]),
1 << B3 | 1 << C2);
}
}
#[test]
fn test_attacks_to() {
let b = Board::from_fen("8/8/8/3K1p1P/r4k2/3Pq1N1/7p/1B5Q w - - 0 1").ok().unwrap();
assert_eq!(b.attacks_to(WHITE, E4),
1 << D3 | 1 << G3 | 1 << D5 | 1 << H1);
assert_eq!(b.attacks_to(BLACK, E4),
1 << E3 | 1 << F4 | 1 << F5 | 1 << A4);
assert_eq!(b.attacks_to(BLACK, G6), 0);
assert_eq!(b.attacks_to(WHITE, G6), 1 << H5);
assert_eq!(b.attacks_to(WHITE, C2), 1 << B1);
assert_eq!(b.attacks_to(WHITE, F4), 0);
assert_eq!(b.attacks_to(BLACK, F4), 1 << A4 | 1 << E3);
assert_eq!(b.attacks_to(BLACK, F5), 1 << F4);
assert_eq!(b.attacks_to(WHITE, A6), 0);
assert_eq!(b.attacks_to(BLACK, G1), 1 << H2 | 1 << E3);
assert_eq!(b.attacks_to(BLACK, A1), 1 << A4);
}
#[test]
fn test_piece_type_constants_constraints() {
assert_eq!(KING, 0);
assert_eq!(QUEEN, 1);
assert_eq!(ROOK, 2);
assert_eq!(BISHOP, 3);
assert_eq!(KNIGHT, 4);
assert_eq!(PAWN, 5);
}
#[test]
fn test_pawn_dest_sets() {
let mut stack = MoveStack::new();
let b = Board::from_fen("k2q4/4Ppp1/5P2/6Pp/6P1/8/7P/7K w - h6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let mut pawn_dests = 0u64;
while let Some(m) = stack.pop() {
if m.piece() == PAWN {
pawn_dests |= 1 << m.dest_square();
}
}
assert_eq!(pawn_dests,
1 << H3 | 1 << H4 | 1 << G6 | 1 << E8 | 1 << H5 | 1 << G7 | 1 << H6 | 1 << D8);
let b = Board::from_fen("k2q4/4Ppp1/5P2/6Pp/6P1/8/7P/7K b - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let mut pawn_dests = 0u64;
while let Some(m) = stack.pop() {
if m.piece() == PAWN {
pawn_dests |= 1 << m.dest_square();
}
}
assert_eq!(pawn_dests, 1 << H4 | 1 << G6 | 1 << G4 | 1 << F6);
}
#[test]
fn test_move_generation_1() {
let mut stack = MoveStack::new();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/4K3 w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/6K1 w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
let b = Board::from_fen("8/8/6NK/2pP4/3PR3/2b1q3/3P4/7k w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 8);
stack.clear();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/7K w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 22);
stack.clear();
let b = Board::from_fen("8/8/6Nk/2pP4/3PR3/2b1q3/3P4/7K w - c6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 23);
stack.clear();
let b = Board::from_fen("K7/8/6N1/2pP4/3PR3/2b1q3/3P4/7k b - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 25);
stack.clear();
let b = Board::from_fen("K7/8/6N1/2pP4/3PR2k/2b1q3/3P4/8 b - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
}
#[test]
fn test_move_generation_2() {
let mut stack = MoveStack::new();
assert!(Board::from_fen("8/8/7k/8/4pP2/8/3B4/7K b - f3 0 1").is_err());
assert!(Board::from_fen("8/8/8/8/4pP2/8/3B4/7K b - f3 0 1").is_err());
assert!(Board::from_fen("8/8/8/4k3/4pP2/8/3B4/7K b - f3 0 1").is_ok());
let b = Board::from_fen("8/8/8/7k/5pP1/8/8/5R1K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 6);
stack.clear();
let b = Board::from_fen("8/8/8/5k2/5pP1/8/8/5R1K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
let b = Board::from_fen("8/8/8/8/4pP1k/8/8/4B2K b - f3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
}
#[test]
fn test_move_generation_3() {
let mut stack = MoveStack::new();
let b = Board::from_fen("8/8/8/8/4RpPk/8/8/7K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 6);
stack.clear();
}
#[test]
fn test_move_generation_4() {
let mut stack = MoveStack::new();
let b = Board::from_fen("8/8/8/8/3QPpPk/8/8/7K b - g3 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
}
#[test]
fn test_move_generation_5() {
let mut stack = MoveStack::new();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 5);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R w K - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 6);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 7);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R b KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 5);
stack.clear();
let b = Board::from_fen("rn2k2r/8/8/8/8/8/8/R3K2R b KQk - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 6);
stack.clear();
let b = Board::from_fen("4k3/8/8/8/8/5n2/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 5);
stack.clear();
let mut b = Board::from_fen("4k3/8/8/8/8/6n1/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let mut count = 0;
while let Some(m) = stack.pop() {
if b.do_move(m).is_some() {
count += 1;
b.undo_move(m);
}
}
assert_eq!(count, 19 + 4);
let b = Board::from_fen("4k3/8/8/8/8/4n3/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 7);
stack.clear();
let b = Board::from_fen("4k3/8/8/8/8/4n3/8/R3K2R w - - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 5);
stack.clear();
let b = Board::from_fen("4k3/8/1b6/8/8/8/8/R3K2R w KQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
assert_eq!(stack.count(), 19 + 7);
stack.clear();
}
#[test]
fn test_do_undo_move() {
let mut stack = MoveStack::new();
let mut b = Board::from_fen("b3k2r/6P1/8/5pP1/8/8/6P1/R3K2R w kKQ f6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let count = stack.count();
while let Some(m) = stack.pop() {
if let Some(h) = b.do_move(m) {
assert!(h != 0);
b.undo_move(m);
let mut other_stack = MoveStack::new();
b.generate_moves(true, &mut other_stack);
assert_eq!(count, other_stack.count());
}
}
assert_eq!(stack.count(), 0);
let mut b = Board::from_fen("b3k2r/6P1/8/5pP1/8/8/8/R3K2R b kKQ - 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let count = stack.count();
while let Some(m) = stack.pop() {
if b.do_move(m).is_some() {
b.undo_move(m);
let mut other_stack = MoveStack::new();
b.generate_moves(true, &mut other_stack);
assert_eq!(count, other_stack.count());
}
}
}
#[test]
fn test_find_pinned() {
use basetypes::*;
let b = Board::from_fen("k2r4/3r4/3N4/5n2/qp1K2Pq/8/3PPR2/6b1 w - - 0 1").ok().unwrap();
assert_eq!(b.find_pinned(), 1 << F2 | 1 << D6 | 1 << G4);
}
#[test]
fn test_generate_only_captures() {
let mut stack = MoveStack::new();
let b = Board::from_fen("k6r/P7/8/6p1/6pP/8/8/7K b - h3 0 1").ok().unwrap();
b.generate_moves(false, &mut stack);
assert_eq!(stack.count(), 4);
stack.clear();
let b = Board::from_fen("k7/8/8/4Pp2/4K3/8/8/8 w - f6 0 1").ok().unwrap();
b.generate_moves(false, &mut stack);
assert_eq!(stack.count(), 8);
stack.clear();
let b = Board::from_fen("k7/8/8/4Pb2/4K3/8/8/8 w - - 0 1").ok().unwrap();
b.generate_moves(false, &mut stack);
assert_eq!(stack.count(), 7);
stack.clear();
}
#[test]
fn test_null_move() {
let mut stack = MoveStack::new();
let mut b = Board::from_fen("k7/8/8/5Pp1/8/8/8/4K2R w K g6 0 1").ok().unwrap();
b.generate_moves(true, &mut stack);
let count = stack.count();
stack.clear();
let m = b.null_move();
assert!(b.do_move(m).is_some());
b.undo_move(m);
b.generate_moves(true, &mut stack);
assert_eq!(count, stack.count());
stack.clear();
let mut b = Board::from_fen("k7/4r3/8/8/8/8/8/4K3 w - - 0 1").ok().unwrap();
let m = b.null_move();
assert!(b.do_move(m).is_none());
}
#[test]
fn test_move_into_check_bug() {
let mut stack = MoveStack::new();
let mut b = Board::from_fen("rnbq1bn1/pppP3k/8/3P2B1/2B5/5N2/PPPN1PP1/2K4R b - - 0 1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
let m = stack.pop().unwrap();
b.do_move(m);
assert!(b.is_legal());
}
#[test]
fn test_try_move_digest() {
fn try_all(b: &Board, stack: &MoveStack) {
let mut i = 0;
loop {
if let Some(m) = b.try_move_digest(i) {
assert!(stack.iter().find(|x| **x == m).is_some());
}
if i == 0xffff {
break;
} else {
i += 1;
}
}
}
let mut stack = MoveStack::new();
let b = Board::from_fen("rnbqk2r/p1p1pppp/8/8/2Pp4/5NP1/pP1PPPBP/RNBQK2R b KQkq c3 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rnbqk2r/p1p1pppp/8/8/Q1Pp4/5NP1/pP1PPPBP/RNB1K2R b KQkq - 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rnbqk2r/p1p1pppp/3N4/8/Q1Pp4/6P1/pP1PPPBP/RNB1K2R b KQkq - 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rnbq3r/p1p1pppp/8/3k4/2Pp4/5NP1/pP1PPPBP/RNBQK2R b KQ c3 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("rn1qk2r/p1pbpppp/8/8/Q1Pp4/5NP1/pP1PPPBP/RNB1K2R b KQkq - 0 \
1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("8/8/8/8/4RpPk/8/8/7K b - g3 0 1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
stack.clear();
let b = Board::from_fen("8/8/8/8/5pPk/8/8/7K b - g3 0 1")
.ok()
.unwrap();
b.generate_moves(true, &mut stack);
try_all(&b, &stack);
}
}
|
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use std::rc::Rc;
use std::cmp::min;
use adapton::bitstring::*;
use adapton::engine::*;
use macros::*;
/// Probablistically Balanced Trie
/// Rough implementation of probabilistic tries from OOPSLA 2015 paper.
///
/// See also: [Tries in OCaml](http://github.com/plum-umd/adapton.ocaml)
#[derive(Debug,PartialEq,Eq,Clone)]
pub enum Trie<X> {
Nil(BS),
Leaf(BS, X),
Bin(BS, Box<Trie<X>>, Box<Trie<X>>),
Root(Meta, Box<Trie<X>>),
Name(Name, Box<Trie<X>>),
Art(Art<Trie<X>>),
}
pub const PLACEMENT_SEED: u64 = 42;
/// Metadata held by the root node.
#[derive(Debug,PartialEq,Eq,Hash,Clone)]
pub struct Meta {
pub min_depth: i64,
}
pub trait MetaT {
fn hash_seeded(&self, u64);
}
impl MetaT for Meta {
fn hash_seeded(&self, seed: u64) {
let mut hasher = DefaultHasher::new();
seed.hash(&mut hasher);
"Adapton.Trie.Meta".hash(&mut hasher);
self.min_depth.hash(&mut hasher);
}
}
// impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> PartialEq for Trie<X> {
// fn eq(&self, other: &Trie<X>) -> bool {
// match (self, other) {
// (&Trie::Nil(ref bs_self), &Trie::Nil(ref bs_other)) => bs_self == bs_other,
// (&Trie::Leaf(ref bs_self, ref e_self), &Trie::Leaf(ref bs_other, ref e_other)) => {
// let bs_equal = bs_self == bs_other;
// let b = bs_equal && e_self == e_other;
// // println!("{:?}\n{}\n{:?}", self, b, other);
// b
// }
// (&Trie::Bin(ref bs, ref left, ref right),
// &Trie::Bin(ref bs_other, ref left_other, ref right_other)) => {
// let b = bs == bs_other && left == left_other && right == right_other;
// // println!("{:?}\n{}\n{:?}", self, b, other);
// b
// }
// (&Trie::Root(ref md, ref t), &Trie::Root(ref md_other, ref t_other)) => {
// let b = md == md_other && t == t_other;
// // println!("{:?}\n{}\n{:?}", t, b, t_other);
// b
// }
// (&Trie::Name(ref nm, ref t), &Trie::Name(ref nm_other, ref t_other)) => {
// let b = nm == nm_other && t == t_other;
// // println!("{:?}\n{}\n{:?}", t, b, t_other);
// b
// }
// (&Trie::Art(ref a), &Trie::Art(ref a_other)) => {
// let b = a == a_other;
// // println!("{:?}\n{}\n{:?}", a, b, a_other);
// b
// }
// (t, t_other) => {
// // println!("{:?}\n!=\n{:?}", t, t_other);
// false
// }
// }
// }
// }
// impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> Eq for Trie<X> {}
pub trait TrieIntro<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn nil(BS) -> Self;
fn leaf(BS, X) -> Self;
fn bin(BS, Self, Self) -> Self;
fn root(Meta, Self) -> Self;
// requisite "adaptonic" constructors: `name` and `art`:
fn name(Name, Self) -> Self;
fn art(Art<Self>) -> Self;
fn empty(Meta) -> Self;
fn singleton(Meta, Name, X) -> Self;
fn extend(Name, Self, X) -> Self;
}
pub trait TrieElim<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn find(&Self, &X, i64) -> Option<X>;
fn is_empty(&Self) -> bool;
fn split_atomic(Self) -> Self;
fn elim<Res, NilC, LeafC, BinC, RootC, NameC>(Self, NilC, LeafC, BinC, RootC, NameC) -> Res
where NilC: FnOnce(BS) -> Res,
LeafC: FnOnce(BS, X) -> Res,
BinC: FnOnce(BS, Self, Self) -> Res,
RootC: FnOnce(Meta, Self) -> Res,
NameC: FnOnce(Name, Self) -> Res;
fn elim_arg<Arg, Res, NilC, LeafC, BinC, RootC, NameC>(Self,
Arg,
NilC,
LeafC,
BinC,
RootC,
NameC)
-> Res
where NilC: FnOnce(BS, Arg) -> Res,
LeafC: FnOnce(BS, X, Arg) -> Res,
BinC: FnOnce(BS, Self, Self, Arg) -> Res,
RootC: FnOnce(Meta, Self, Arg) -> Res,
NameC: FnOnce(Name, Self, Arg) -> Res;
fn elim_ref<Res, NilC, LeafC, BinC, RootC, NameC>(&Self,
NilC,
LeafC,
BinC,
RootC,
NameC)
-> Res
where NilC: FnOnce(&BS) -> Res,
LeafC: FnOnce(&BS, &X) -> Res,
BinC: FnOnce(&BS, &Self, &Self) -> Res,
RootC: FnOnce(&Meta, &Self) -> Res,
NameC: FnOnce(&Name, &Self) -> Res;
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> Trie<X> {
fn mfn(nm: Name, meta: Meta, trie: Self, bs: BS, elt: X, hash: u64) -> Self {
match trie {
Trie::Nil(_) if BS::length(bs) < meta.min_depth => {
let h_ = hash >> 1;
let bs0 = BS::prepend(0, bs);
let bs1 = BS::prepend(1, bs);
let mt0 = Self::nil(bs0);
let mt1 = Self::nil(bs1);
if hash % 2 == 0 {
Self::bin(bs, Self::mfn(nm, meta, mt0, bs0, elt, h_), mt1)
} else {
Self::bin(bs, mt0, Self::mfn(nm, meta, mt1, bs1, elt, h_))
}
}
Trie::Nil(_) => Trie::Leaf(bs, elt),
Trie::Leaf(_, e) => {
let depth = BS::length(bs);
if depth >= BS::MAX_LEN || e == elt {
Self::leaf(bs, e)
} else if depth < BS::MAX_LEN {
Self::mfn(nm,
meta,
Self::split_atomic(Self::leaf(bs, e)),
bs,
elt,
hash)
} else {
panic!("Bad value found in nadd:\nLeaf(bs, e)\n{:?}",
Self::leaf(bs, e));
}
}
Trie::Bin(bs, left, right) => {
let h_ = hash >> 1;
if hash % 2 == 0 {
let l = Self::mfn(nm, meta, *left, BS::prepend(0, bs), elt, h_);
Self::bin(bs, l, *right)
} else {
let r = Self::mfn(nm, meta, *right, BS::prepend(1, bs), elt, h_);
Self::bin(bs, *left, r)
}
}
Trie::Name(_, box Trie::Art(a)) => Self::mfn(nm, meta, force(&a), bs, elt, hash),
t => panic!("Bad value found in nadd:\n{:?}\n", t),
}
}
fn root_mfn(_: Name, nm: Name, trie: Self, elt: X) -> Self {
match trie {
Trie::Name(_, box Trie::Art(a)) => {
match force(&a) {
Trie::Root(meta, t) => {
let (nm, nm_) = name_fork(nm);
let mut hasher = DefaultHasher::new();
elt.hash(&mut hasher);
let a = Self::mfn(nm_,
meta.clone(),
*t,
BS {
length: 0,
value: 0,
},
elt,
hasher.finish());
Self::root(meta, Self::name(nm, Self::art(put(a))))
}
t @ Trie::Name(_, box Trie::Art(_)) => Self::root_mfn(nm.clone(), nm, t, elt),
t => panic!("Non-root node entry to `Trie.extend': {:?}", t),
}
}
_ => panic!("None-name node at entry to `Trie.extend'"),
}
}
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> TrieIntro<X> for Trie<X> {
fn nil(bs: BS) -> Self {
Trie::Nil(bs)
}
fn leaf(bs: BS, x: X) -> Self {
Trie::Leaf(bs, x)
}
fn bin(bs: BS, l: Self, r: Self) -> Self {
Trie::Bin(bs, Box::new(l), Box::new(r))
}
fn root(meta: Meta, trie: Self) -> Self {
Trie::Root(meta, Box::new(trie))
}
fn name(nm: Name, trie: Self) -> Self {
Trie::Name(nm, Box::new(trie))
}
fn art(art: Art<Self>) -> Self {
Trie::Art(art)
}
fn empty(meta: Meta) -> Self {
if meta.min_depth > BS::MAX_LEN {
println!("Cannot make Adapton.Trie with min_depth > {} (given {})",
BS::MAX_LEN,
meta.min_depth);
}
let min = min(meta.min_depth, BS::MAX_LEN);
let meta = Meta { min_depth: min };
let nm = name_of_str("empty");
let (nm1, nm2) = name_fork(nm);
let mtbs = BS {
length: 0,
value: 0,
};
let nil_art = thunk!(nm2.clone() =>> Self::nil, bs:mtbs);
let root_art = thunk!(nm1.clone() =>> Self::root, meta:meta,
trie:Self::name(nm2, Self::art(nil_art)));
Self::name(nm1.clone(), Self::art(root_art))
}
fn singleton(meta: Meta, nm: Name, elt: X) -> Self {
Self::extend(nm, TrieIntro::empty(meta), elt)
}
fn extend(nm: Name, trie: Self, elt: X) -> Self {
let (nm, nm_) = name_fork(nm);
// let a = Self::root_mfn(nm.clone(), nm_, trie, elt);
let root_mfn_art = put(Self::root_mfn(nm.clone(), nm_, trie, elt));
Self::name(nm, Self::art(root_mfn_art))
}
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> Hash for Trie<X> {
fn hash<H: Hasher>(&self, state: &mut H) {
match *self {
Trie::Nil(bs) => bs.hash(state),
Trie::Leaf(bs, ref x) => {
x.hash(state);
bs.hash(state)
}
Trie::Bin(bs, ref left, ref right) => {
right.hash(state);
left.hash(state);
bs.hash(state)
}
Trie::Root(ref md, ref t) => {
t.hash(state);
md.hash_seeded(state.finish());
}
Trie::Name(ref nm, ref t) => {
t.hash(state);
nm.hash(state)
}
Trie::Art(ref art_t) => art_t.hash(state),
}
}
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> TrieElim<X> for Trie<X> {
fn find(trie: &Self, elt: &X, i: i64) -> Option<X> {
Self::elim_ref(trie,
|_| None,
|_, x| if *elt == *x { Some(x.clone()) } else { None },
|_, left, right| if i % 2 == 0 {
Self::find(left, elt, i >> 1)
} else {
Self::find(right, elt, i >> 1)
},
|_, t| Self::find(t, elt, i),
|_, t| Self::find(t, elt, i))
}
fn is_empty(trie: &Self) -> bool {
Self::elim_ref(trie,
|_| true,
|_, _| false,
|_, _, _| false,
|_, t| Self::is_empty(t),
|_, t| Self::is_empty(t))
}
fn split_atomic(trie: Self) -> Self {
fn suffix(bs: BS, k: i64) -> bool {
bs.value & k == bs.value
}
match trie {
t @ Trie::Nil(_) |
t @ Trie::Bin(_, _, _) => t,
Trie::Leaf(bs, e) => {
let bs0 = BS::prepend(0, bs);
let bs1 = BS::prepend(1, bs);
let mut hasher = DefaultHasher::new();
e.hash(&mut hasher);
if suffix(bs1, hasher.finish() as i64) {
Self::bin(bs, Self::nil(bs0), Self::leaf(bs1, e))
} else {
Self::bin(bs, Self::leaf(bs0, e), Self::nil(bs1))
}
}
_ => panic!("Bad split_atomic(t)"),
}
}
fn elim<Res, NilC, LeafC, BinC, RootC, NameC>(trie: Self,
nil: NilC,
leaf: LeafC,
bin: BinC,
root: RootC,
name: NameC)
-> Res
where NilC: FnOnce(BS) -> Res,
LeafC: FnOnce(BS, X) -> Res,
BinC: FnOnce(BS, Self, Self) -> Res,
RootC: FnOnce(Meta, Self) -> Res,
NameC: FnOnce(Name, Self) -> Res
{
match trie {
Trie::Nil(bs) => nil(bs),
Trie::Leaf(bs, x) => leaf(bs, x),
Trie::Bin(bs, l, r) => bin(bs, *l, *r),
Trie::Name(nm, t) => name(nm, *t),
Trie::Root(meta, t) => root(meta, *t),
Trie::Art(art) => {
let trie = force(&art);
Self::elim(trie, nil, leaf, bin, root, name)
}
}
}
fn elim_arg<Arg, Res, NilC, LeafC, BinC, RootC, NameC>(trie: Self,
arg: Arg,
nil: NilC,
leaf: LeafC,
bin: BinC,
root: RootC,
name: NameC)
-> Res
where NilC: FnOnce(BS, Arg) -> Res,
LeafC: FnOnce(BS, X, Arg) -> Res,
BinC: FnOnce(BS, Self, Self, Arg) -> Res,
RootC: FnOnce(Meta, Self, Arg) -> Res,
NameC: FnOnce(Name, Self, Arg) -> Res
{
match trie {
Trie::Nil(bs) => nil(bs, arg),
Trie::Leaf(bs, x) => leaf(bs, x, arg),
Trie::Bin(bs, l, r) => bin(bs, *l, *r, arg),
Trie::Name(nm, t) => name(nm, *t, arg),
Trie::Root(meta, t) => root(meta, *t, arg),
Trie::Art(art) => {
let trie = force(&art);
Self::elim_arg(trie, arg, nil, leaf, bin, root, name)
}
}
}
fn elim_ref<Res, NilC, LeafC, BinC, RootC, NameC>(trie: &Self,
nil: NilC,
leaf: LeafC,
bin: BinC,
root: RootC,
name: NameC)
-> Res
where NilC: FnOnce(&BS) -> Res,
LeafC: FnOnce(&BS, &X) -> Res,
BinC: FnOnce(&BS, &Self, &Self) -> Res,
RootC: FnOnce(&Meta, &Self) -> Res,
NameC: FnOnce(&Name, &Self) -> Res
{
match *trie {
Trie::Nil(ref bs) => nil(bs),
Trie::Leaf(ref bs, ref x) => leaf(bs, x),
Trie::Bin(ref bs, ref l, ref r) => bin(bs, &*l, &*r),
Trie::Name(ref nm, ref t) => name(nm, &*t),
Trie::Root(ref meta, ref t) => root(meta, &*t),
Trie::Art(ref art) => {
let trie = force(art);
Self::elim_ref(&trie, nil, leaf, bin, root, name)
}
}
}
}
pub trait SetIntro<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn empty() -> Self;
fn add(Self, e: X) -> Self;
// fn remove(Self, e: &X) -> Self;
// fn union(Self, Self) -> Self;
// fn inter(Self, Self) -> Self;
// fn diff(Self, Self) -> Self;
}
pub trait SetElim<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn mem(&Self, &X) -> bool;
fn fold<Res, F>(Self, Res, Rc<F>) -> Res where F: Fn(X, Res) -> Res;
}
impl<X, Set: TrieIntro<X> + TrieElim<X>> SetIntro<X> for Set {
fn empty() -> Self {
let meta = Meta { min_depth: 1 };
Self::empty(meta)
}
fn add(set: Self, elt: X) -> Self {
Self::extend(name_unit(), set, elt)
}
}
impl<X: Hash, Set: TrieIntro<X> + TrieElim<X>> SetElim<X> for Set {
fn mem(set: &Self, elt: &X) -> bool {
let mut hasher = DefaultHasher::new();
elt.hash(&mut hasher);
match Set::find(set, elt, hasher.finish() as i64) {
Some(_) => true,
None => false,
}
}
fn fold<Res, F>(set: Self, res: Res, f: Rc<F>) -> Res
where F: Fn(X, Res) -> Res
{
Self::elim_arg(set,
res,
|_, arg| arg,
|_, x, arg| f(x, arg),
|_, left, right, arg| {
Self::fold(right, Self::fold(left, arg, f.clone()), f.clone())
},
|_, t, arg| Self::fold(t, arg, f.clone()),
|_, t, arg| Self::fold(t, arg, f.clone()))
}
}
pub type Set<X> = Trie<X>;
Generate a trie from a list
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use std::rc::Rc;
use std::cmp::min;
use adapton::collections::{ListIntro, ListElim, list_fold};
use adapton::bitstring::*;
use adapton::engine::*;
use macros::*;
/// Probablistically Balanced Trie
/// Rough implementation of probabilistic tries from OOPSLA 2015 paper.
///
/// See also: [Tries in OCaml](http://github.com/plum-umd/adapton.ocaml)
#[derive(Debug,PartialEq,Eq,Clone)]
pub enum Trie<X> {
Nil(BS),
Leaf(BS, X),
Bin(BS, Box<Trie<X>>, Box<Trie<X>>),
Root(Meta, Box<Trie<X>>),
Name(Name, Box<Trie<X>>),
Art(Art<Trie<X>>),
}
pub const PLACEMENT_SEED: u64 = 42;
/// Metadata held by the root node.
#[derive(Debug,PartialEq,Eq,Hash,Clone)]
pub struct Meta {
pub min_depth: i64,
}
pub trait MetaT {
fn hash_seeded(&self, u64);
}
impl MetaT for Meta {
fn hash_seeded(&self, seed: u64) {
let mut hasher = DefaultHasher::new();
seed.hash(&mut hasher);
"Adapton.Trie.Meta".hash(&mut hasher);
self.min_depth.hash(&mut hasher);
}
}
// impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> PartialEq for Trie<X> {
// fn eq(&self, other: &Trie<X>) -> bool {
// match (self, other) {
// (&Trie::Nil(ref bs_self), &Trie::Nil(ref bs_other)) => bs_self == bs_other,
// (&Trie::Leaf(ref bs_self, ref e_self), &Trie::Leaf(ref bs_other, ref e_other)) => {
// let bs_equal = bs_self == bs_other;
// let b = bs_equal && e_self == e_other;
// // println!("{:?}\n{}\n{:?}", self, b, other);
// b
// }
// (&Trie::Bin(ref bs, ref left, ref right),
// &Trie::Bin(ref bs_other, ref left_other, ref right_other)) => {
// let b = bs == bs_other && left == left_other && right == right_other;
// // println!("{:?}\n{}\n{:?}", self, b, other);
// b
// }
// (&Trie::Root(ref md, ref t), &Trie::Root(ref md_other, ref t_other)) => {
// let b = md == md_other && t == t_other;
// // println!("{:?}\n{}\n{:?}", t, b, t_other);
// b
// }
// (&Trie::Name(ref nm, ref t), &Trie::Name(ref nm_other, ref t_other)) => {
// let b = nm == nm_other && t == t_other;
// // println!("{:?}\n{}\n{:?}", t, b, t_other);
// b
// }
// (&Trie::Art(ref a), &Trie::Art(ref a_other)) => {
// let b = a == a_other;
// // println!("{:?}\n{}\n{:?}", a, b, a_other);
// b
// }
// (t, t_other) => {
// // println!("{:?}\n!=\n{:?}", t, t_other);
// false
// }
// }
// }
// }
// impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> Eq for Trie<X> {}
pub trait TrieIntro<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn nil(BS) -> Self;
fn leaf(BS, X) -> Self;
fn bin(BS, Self, Self) -> Self;
fn root(Meta, Self) -> Self;
// requisite "adaptonic" constructors: `name` and `art`:
fn name(Name, Self) -> Self;
fn art(Art<Self>) -> Self;
fn empty(Meta) -> Self;
fn singleton(Meta, Name, X) -> Self;
fn extend(Name, Self, X) -> Self;
}
pub trait TrieElim<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn find(&Self, &X, i64) -> Option<X>;
fn is_empty(&Self) -> bool;
fn split_atomic(Self) -> Self;
fn elim<Res, NilC, LeafC, BinC, RootC, NameC>(Self, NilC, LeafC, BinC, RootC, NameC) -> Res
where NilC: FnOnce(BS) -> Res,
LeafC: FnOnce(BS, X) -> Res,
BinC: FnOnce(BS, Self, Self) -> Res,
RootC: FnOnce(Meta, Self) -> Res,
NameC: FnOnce(Name, Self) -> Res;
fn elim_arg<Arg, Res, NilC, LeafC, BinC, RootC, NameC>(Self,
Arg,
NilC,
LeafC,
BinC,
RootC,
NameC)
-> Res
where NilC: FnOnce(BS, Arg) -> Res,
LeafC: FnOnce(BS, X, Arg) -> Res,
BinC: FnOnce(BS, Self, Self, Arg) -> Res,
RootC: FnOnce(Meta, Self, Arg) -> Res,
NameC: FnOnce(Name, Self, Arg) -> Res;
fn elim_ref<Res, NilC, LeafC, BinC, RootC, NameC>(&Self,
NilC,
LeafC,
BinC,
RootC,
NameC)
-> Res
where NilC: FnOnce(&BS) -> Res,
LeafC: FnOnce(&BS, &X) -> Res,
BinC: FnOnce(&BS, &Self, &Self) -> Res,
RootC: FnOnce(&Meta, &Self) -> Res,
NameC: FnOnce(&Name, &Self) -> Res;
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> Trie<X> {
fn mfn(nm: Name, meta: Meta, trie: Self, bs: BS, elt: X, hash: u64) -> Self {
match trie {
Trie::Nil(_) if BS::length(bs) < meta.min_depth => {
let h_ = hash >> 1;
let bs0 = BS::prepend(0, bs);
let bs1 = BS::prepend(1, bs);
let mt0 = Self::nil(bs0);
let mt1 = Self::nil(bs1);
if hash % 2 == 0 {
Self::bin(bs, Self::mfn(nm, meta, mt0, bs0, elt, h_), mt1)
} else {
Self::bin(bs, mt0, Self::mfn(nm, meta, mt1, bs1, elt, h_))
}
}
Trie::Nil(_) => Trie::Leaf(bs, elt),
Trie::Leaf(_, e) => {
let depth = BS::length(bs);
if depth >= BS::MAX_LEN || e == elt {
Self::leaf(bs, e)
} else if depth < BS::MAX_LEN {
Self::mfn(nm,
meta,
Self::split_atomic(Self::leaf(bs, e)),
bs,
elt,
hash)
} else {
panic!("Bad value found in nadd:\nLeaf(bs, e)\n{:?}",
Self::leaf(bs, e));
}
}
Trie::Bin(bs, left, right) => {
let h_ = hash >> 1;
if hash % 2 == 0 {
let l = Self::mfn(nm, meta, *left, BS::prepend(0, bs), elt, h_);
Self::bin(bs, l, *right)
} else {
let r = Self::mfn(nm, meta, *right, BS::prepend(1, bs), elt, h_);
Self::bin(bs, *left, r)
}
}
Trie::Name(_, box Trie::Art(a)) => Self::mfn(nm, meta, force(&a), bs, elt, hash),
t => panic!("Bad value found in nadd:\n{:?}\n", t),
}
}
fn root_mfn(_: Name, nm: Name, trie: Self, elt: X) -> Self {
match trie {
Trie::Name(_, box Trie::Art(a)) => {
match force(&a) {
Trie::Root(meta, t) => {
let (nm, nm_) = name_fork(nm);
let mut hasher = DefaultHasher::new();
elt.hash(&mut hasher);
let a = Self::mfn(nm_,
meta.clone(),
*t,
BS {
length: 0,
value: 0,
},
elt,
hasher.finish());
Self::root(meta, Self::name(nm, Self::art(put(a))))
}
t @ Trie::Name(_, box Trie::Art(_)) => Self::root_mfn(nm.clone(), nm, t, elt),
t => panic!("Non-root node entry to `Trie.extend': {:?}", t),
}
}
_ => panic!("None-name node at entry to `Trie.extend'"),
}
}
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> TrieIntro<X> for Trie<X> {
fn nil(bs: BS) -> Self {
Trie::Nil(bs)
}
fn leaf(bs: BS, x: X) -> Self {
Trie::Leaf(bs, x)
}
fn bin(bs: BS, l: Self, r: Self) -> Self {
Trie::Bin(bs, Box::new(l), Box::new(r))
}
fn root(meta: Meta, trie: Self) -> Self {
Trie::Root(meta, Box::new(trie))
}
fn name(nm: Name, trie: Self) -> Self {
Trie::Name(nm, Box::new(trie))
}
fn art(art: Art<Self>) -> Self {
Trie::Art(art)
}
fn empty(meta: Meta) -> Self {
if meta.min_depth > BS::MAX_LEN {
println!("Cannot make Adapton.Trie with min_depth > {} (given {})",
BS::MAX_LEN,
meta.min_depth);
}
let min = min(meta.min_depth, BS::MAX_LEN);
let meta = Meta { min_depth: min };
let nm = name_of_str("empty");
let (nm1, nm2) = name_fork(nm);
let mtbs = BS {
length: 0,
value: 0,
};
let nil_art = thunk!(nm2.clone() =>> Self::nil, bs:mtbs);
let root_art = thunk!(nm1.clone() =>> Self::root, meta:meta,
trie:Self::name(nm2, Self::art(nil_art)));
Self::name(nm1.clone(), Self::art(root_art))
}
fn singleton(meta: Meta, nm: Name, elt: X) -> Self {
Self::extend(nm, TrieIntro::empty(meta), elt)
}
fn extend(nm: Name, trie: Self, elt: X) -> Self {
let (nm, nm_) = name_fork(nm);
// let a = Self::root_mfn(nm.clone(), nm_, trie, elt);
let root_mfn_art = put(Self::root_mfn(nm.clone(), nm_, trie, elt));
Self::name(nm, Self::art(root_mfn_art))
}
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> Hash for Trie<X> {
fn hash<H: Hasher>(&self, state: &mut H) {
match *self {
Trie::Nil(bs) => bs.hash(state),
Trie::Leaf(bs, ref x) => {
x.hash(state);
bs.hash(state)
}
Trie::Bin(bs, ref left, ref right) => {
right.hash(state);
left.hash(state);
bs.hash(state)
}
Trie::Root(ref md, ref t) => {
t.hash(state);
md.hash_seeded(state.finish());
}
Trie::Name(ref nm, ref t) => {
t.hash(state);
nm.hash(state)
}
Trie::Art(ref art_t) => art_t.hash(state),
}
}
}
impl<X: Debug + Hash + PartialEq + Eq + Clone + 'static> TrieElim<X> for Trie<X> {
fn find(trie: &Self, elt: &X, i: i64) -> Option<X> {
Self::elim_ref(trie,
|_| None,
|_, x| if *elt == *x { Some(x.clone()) } else { None },
|_, left, right| if i % 2 == 0 {
Self::find(left, elt, i >> 1)
} else {
Self::find(right, elt, i >> 1)
},
|_, t| Self::find(t, elt, i),
|_, t| Self::find(t, elt, i))
}
fn is_empty(trie: &Self) -> bool {
Self::elim_ref(trie,
|_| true,
|_, _| false,
|_, _, _| false,
|_, t| Self::is_empty(t),
|_, t| Self::is_empty(t))
}
fn split_atomic(trie: Self) -> Self {
fn suffix(bs: BS, k: i64) -> bool {
bs.value & k == bs.value
}
match trie {
t @ Trie::Nil(_) |
t @ Trie::Bin(_, _, _) => t,
Trie::Leaf(bs, e) => {
let bs0 = BS::prepend(0, bs);
let bs1 = BS::prepend(1, bs);
let mut hasher = DefaultHasher::new();
e.hash(&mut hasher);
if suffix(bs1, hasher.finish() as i64) {
Self::bin(bs, Self::nil(bs0), Self::leaf(bs1, e))
} else {
Self::bin(bs, Self::leaf(bs0, e), Self::nil(bs1))
}
}
_ => panic!("Bad split_atomic(t)"),
}
}
fn elim<Res, NilC, LeafC, BinC, RootC, NameC>(trie: Self,
nil: NilC,
leaf: LeafC,
bin: BinC,
root: RootC,
name: NameC)
-> Res
where NilC: FnOnce(BS) -> Res,
LeafC: FnOnce(BS, X) -> Res,
BinC: FnOnce(BS, Self, Self) -> Res,
RootC: FnOnce(Meta, Self) -> Res,
NameC: FnOnce(Name, Self) -> Res
{
match trie {
Trie::Nil(bs) => nil(bs),
Trie::Leaf(bs, x) => leaf(bs, x),
Trie::Bin(bs, l, r) => bin(bs, *l, *r),
Trie::Name(nm, t) => name(nm, *t),
Trie::Root(meta, t) => root(meta, *t),
Trie::Art(art) => {
let trie = force(&art);
Self::elim(trie, nil, leaf, bin, root, name)
}
}
}
fn elim_arg<Arg, Res, NilC, LeafC, BinC, RootC, NameC>(trie: Self,
arg: Arg,
nil: NilC,
leaf: LeafC,
bin: BinC,
root: RootC,
name: NameC)
-> Res
where NilC: FnOnce(BS, Arg) -> Res,
LeafC: FnOnce(BS, X, Arg) -> Res,
BinC: FnOnce(BS, Self, Self, Arg) -> Res,
RootC: FnOnce(Meta, Self, Arg) -> Res,
NameC: FnOnce(Name, Self, Arg) -> Res
{
match trie {
Trie::Nil(bs) => nil(bs, arg),
Trie::Leaf(bs, x) => leaf(bs, x, arg),
Trie::Bin(bs, l, r) => bin(bs, *l, *r, arg),
Trie::Name(nm, t) => name(nm, *t, arg),
Trie::Root(meta, t) => root(meta, *t, arg),
Trie::Art(art) => {
let trie = force(&art);
Self::elim_arg(trie, arg, nil, leaf, bin, root, name)
}
}
}
fn elim_ref<Res, NilC, LeafC, BinC, RootC, NameC>(trie: &Self,
nil: NilC,
leaf: LeafC,
bin: BinC,
root: RootC,
name: NameC)
-> Res
where NilC: FnOnce(&BS) -> Res,
LeafC: FnOnce(&BS, &X) -> Res,
BinC: FnOnce(&BS, &Self, &Self) -> Res,
RootC: FnOnce(&Meta, &Self) -> Res,
NameC: FnOnce(&Name, &Self) -> Res
{
match *trie {
Trie::Nil(ref bs) => nil(bs),
Trie::Leaf(ref bs, ref x) => leaf(bs, x),
Trie::Bin(ref bs, ref l, ref r) => bin(bs, &*l, &*r),
Trie::Name(ref nm, ref t) => name(nm, &*t),
Trie::Root(ref meta, ref t) => root(meta, &*t),
Trie::Art(ref art) => {
let trie = force(art);
Self::elim_ref(&trie, nil, leaf, bin, root, name)
}
}
}
}
pub trait SetIntro<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn empty() -> Self;
fn add(Self, e: X) -> Self;
// fn remove(Self, e: &X) -> Self;
// fn union(Self, Self) -> Self;
// fn inter(Self, Self) -> Self;
// fn diff(Self, Self) -> Self;
}
pub trait SetElim<X>: Debug + Hash + PartialEq + Eq + Clone + 'static {
fn mem(&Self, &X) -> bool;
fn fold<Res, F>(Self, Res, Rc<F>) -> Res where F: Fn(X, Res) -> Res;
}
impl<X, Set: TrieIntro<X> + TrieElim<X>> SetIntro<X> for Set {
fn empty() -> Self {
let meta = Meta { min_depth: 1 };
Self::empty(meta)
}
fn add(set: Self, elt: X) -> Self {
Self::extend(name_unit(), set, elt)
}
}
impl<X: Hash, Set: TrieIntro<X> + TrieElim<X>> SetElim<X> for Set {
fn mem(set: &Self, elt: &X) -> bool {
let mut hasher = DefaultHasher::new();
elt.hash(&mut hasher);
match Set::find(set, elt, hasher.finish() as i64) {
Some(_) => true,
None => false,
}
}
fn fold<Res, F>(set: Self, res: Res, f: Rc<F>) -> Res
where F: Fn(X, Res) -> Res
{
Self::elim_arg(set,
res,
|_, arg| arg,
|_, x, arg| f(x, arg),
|_, left, right, arg| {
Self::fold(right, Self::fold(left, arg, f.clone()), f.clone())
},
|_, t, arg| Self::fold(t, arg, f.clone()),
|_, t, arg| Self::fold(t, arg, f.clone()))
}
}
pub type Set<X> = Trie<X>;
pub fn trie_of_list<X: Hash+Clone+Debug, T:TrieIntro<X>+'static, L:ListElim<X>+ListIntro<X>+'static>
(list: L) -> T {
list_fold(list, T::empty(Meta { min_depth: 1 }), Rc::new(|x, trie_acc| T::extend(name_unit(), trie_acc, x)))
}
|
//! Unix-specific types for signal handling.
//!
//! This module is only defined on Unix platforms and contains the primary
//! `Signal` type for receiving notifications of signals.
#![cfg(unix)]
pub extern crate libc;
extern crate mio;
extern crate tokio_uds;
use std::cell::RefCell;
use std::io::{self, Write, Read};
use std::mem;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Once, ONCE_INIT, Mutex};
use futures::stream::{Stream, Fuse};
use futures::{self, Future, IntoFuture, Complete, Oneshot, Poll, Async};
use self::libc::c_int;
use self::tokio_uds::UnixStream;
use tokio_core::io::IoFuture;
use tokio_core::reactor::{PollEvented, Handle};
use tokio_core::channel::{channel, Sender, Receiver};
static INIT: Once = ONCE_INIT;
static mut GLOBAL_STATE: *mut GlobalState = 0 as *mut _;
/// An implementation of `Stream` for receiving a particular type of signal.
///
/// This structure implements the `Stream` trait and represents notifications
/// of the current process receiving a particular signal. The signal being
/// listened for is passed to `Signal::new`, and the same signal number is then
/// yielded as each element for the stream.
///
/// In general signal handling on Unix is a pretty tricky topic, and this
/// structure is no exception! There are some important limitations to keep in
/// mind when using `Signal` streams:
///
/// * While multiple event loops are supported, the *first* event loop to
/// register a signal handler is required to be active to ensure that signals
/// for other event loops are delivered. In other words, once an event loop
/// registers a signal, it's best to keep it around and running. This is
/// normally just a problem for tests, and the "workaround" is to spawn a
/// thread in the background at the beginning of the test suite which is
/// running an event loop (and listening for a signal).
///
/// * Signals handling in Unix already necessitates coalescing signals
/// together sometimes. This `Signal` stream is also no exception here in
/// that it will also coalesce signals. That is, even if the signal handler
/// for this process runs multiple times, the `Signal` stream may only return
/// one signal notification. Specifically, before `poll` is called, all
/// signal notifications are coalesced into one item returned from `poll`.
/// Once `poll` has been called, however, a further signal is guaranteed to
/// be yielded as an item.
///
/// * Signal handling in general is relatively inefficient. Although some
/// improvements are possible in this crate, it's recommended to not plan on
/// having millions of signal channels open.
///
/// * Currently the "driver task" to process incoming signals never exits.
///
/// If you've got any questions about this feel free to open an issue on the
/// repo, though, as I'd love to chat about this! In other words, I'd love to
/// alleviate some of these limitations if possible!
pub struct Signal {
signum: c_int,
reg: PollEvented<MyRegistration>,
_finished: Complete<()>,
}
struct GlobalState {
write: UnixStream,
tx: Mutex<Sender<Message>>,
signals: [GlobalSignalState; 32],
}
struct GlobalSignalState {
ready: AtomicBool,
prev: libc::sigaction,
}
enum Message {
NewSignal(c_int, Complete<io::Result<Signal>>),
}
struct DriverTask {
handle: Handle,
read: UnixStream,
rx: Fuse<Receiver<Message>>,
signals: [SignalState; 32],
}
struct SignalState {
registered: bool,
tasks: Vec<(RefCell<Oneshot<()>>, mio::SetReadiness)>,
}
pub use self::libc::{SIGINT, SIGKILL, SIGTERM, SIGUSR1, SIGUSR2};
pub use self::libc::{SIGHUP, SIGQUIT, SIGPIPE, SIGALRM, SIGTRAP};
impl Signal {
/// Creates a new stream which will receive notifications when the current
/// process receives the signal `signum`.
///
/// This function will create a new stream which may be based on the
/// event loop handle provided. This function returns a future which will
/// then resolve to the signal stream, if successful.
///
/// The `Signal` stream is an infinite stream which will receive
/// notifications whenever a signal is received. More documentation can be
/// found on `Signal` itself, but to reiterate:
///
/// * Signals may be coalesced beyond what the kernel already does.
/// * While multiple event loops are supported, the first event loop to
/// register a signal handler must be active to deliver signal
/// notifications
/// * Once a signal handle is registered with the process the underlying
/// libc signal handler is never unregistered.
///
/// A `Signal` stream can be created for a particular signal number
/// multiple times. When a signal is received then all the associated
/// channels will receive the signal notification.
pub fn new(signum: c_int, handle: &Handle) -> IoFuture<Signal> {
let mut init = None;
INIT.call_once(|| {
init = Some(global_init(handle));
});
let new_signal = futures::lazy(move || {
let (tx, rx) = futures::oneshot();
let msg = Message::NewSignal(signum, tx);
let res = unsafe {
(*GLOBAL_STATE).tx.lock().unwrap().send(msg)
};
res.expect("failed to request a new signal stream, did the \
first event loop go away?");
rx.then(|r| r.unwrap())
});
match init {
Some(init) => init.into_future().and_then(|()| new_signal).boxed(),
None => new_signal.boxed(),
}
}
}
impl Stream for Signal {
type Item = c_int;
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<c_int>, io::Error> {
if !self.reg.poll_read().is_ready() {
return Ok(Async::NotReady)
}
self.reg.need_read();
self.reg.get_ref()
.inner.borrow()
.as_ref().unwrap().1
.set_readiness(mio::Ready::none())
.expect("failed to set readiness");
Ok(Async::Ready(Some(self.signum)))
}
}
fn global_init(handle: &Handle) -> io::Result<()> {
let (tx, rx) = try!(channel(handle));
let (read, write) = try!(UnixStream::pair(handle));
unsafe {
let state = Box::new(GlobalState {
write: write,
signals: {
fn new() -> GlobalSignalState {
GlobalSignalState {
ready: AtomicBool::new(false),
prev: unsafe { mem::zeroed() },
}
}
[
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
]
},
tx: Mutex::new(tx.clone()),
});
GLOBAL_STATE = Box::into_raw(state);
handle.spawn(DriverTask {
handle: handle.clone(),
rx: rx.fuse(),
read: read,
signals: {
fn new() -> SignalState {
SignalState { registered: false, tasks: Vec::new() }
}
[
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
]
},
});
Ok(())
}
}
impl Future for DriverTask {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.check_signal_drops();
self.check_messages();
self.check_signals();
// TODO: when to finish this task?
Ok(Async::NotReady)
}
}
impl DriverTask {
fn check_signal_drops(&mut self) {
for signal in self.signals.iter_mut() {
signal.tasks.retain(|task| {
!task.0.borrow_mut().poll().is_err()
});
}
}
fn check_messages(&mut self) {
loop {
// Acquire the next message
let message = match self.rx.poll() {
Ok(Async::Ready(Some(e))) => e,
Ok(Async::Ready(None)) |
Ok(Async::NotReady) => break,
Err(e) => panic!("error on rx: {}", e),
};
let (sig, complete) = match message {
Message::NewSignal(sig, complete) => (sig, complete),
};
// If the signal's too large, then we return an error, otherwise we
// use this index to look at the signal slot.
//
// If the signal wasn't previously registered then we do so now.
let signal = match self.signals.get_mut(sig as usize) {
Some(signal) => signal,
None => {
complete.complete(Err(io::Error::new(io::ErrorKind::Other,
"signum too large")));
continue
}
};
if !signal.registered {
unsafe {
let mut new: libc::sigaction = mem::zeroed();
new.sa_sigaction = handler as usize;
new.sa_flags = libc::SA_RESTART | libc::SA_SIGINFO;
let mut prev = mem::zeroed();
if libc::sigaction(sig, &new, &mut prev) != 0 {
complete.complete(Err(io::Error::last_os_error()));
continue
}
signal.registered = true;
}
}
// Acquire the (registration, set_readiness) pair by... assuming
// we're on the event loop (true because of the spawn above).
let reg = MyRegistration { inner: RefCell::new(None) };
let reg = match PollEvented::new(reg, &self.handle) {
Ok(reg) => reg,
Err(e) => {
complete.complete(Err(e));
continue
}
};
// Create the `Signal` to pass back and then also keep a handle to
// the `SetReadiness` for ourselves internally.
let (tx, rx) = futures::oneshot();
let ready = reg.get_ref().inner.borrow_mut().as_mut().unwrap().1.clone();
complete.complete(Ok(Signal {
signum: sig,
reg: reg,
_finished: tx,
}));
signal.tasks.push((RefCell::new(rx), ready));
}
}
fn check_signals(&mut self) {
// Drain all data from the pipe
let mut buf = [0; 32];
let mut any = false;
loop {
match self.read.read(&mut buf) {
Ok(0) => { // EOF == something happened
any = true;
break
}
Ok(..) => any = true, // data read, but keep draining
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break,
Err(e) => panic!("bad read: {}", e),
}
}
// If nothing happened, no need to check the signals
if !any {
return
}
for (i, slot) in self.signals.iter().enumerate() {
// No need to go farther if we haven't even registered a signal
if !slot.registered {
continue
}
// See if this signal actually happened since we last checked
unsafe {
if !(*GLOBAL_STATE).signals[i].ready.swap(false, Ordering::SeqCst) {
continue
}
}
// Wake up all the tasks waiting on this signal
for task in slot.tasks.iter() {
task.1.set_readiness(mio::Ready::readable())
.expect("failed to set readiness");
}
}
}
}
extern fn handler(signum: c_int,
info: *mut libc::siginfo_t,
ptr: *mut libc::c_void) {
type FnSigaction = extern fn(c_int, *mut libc::siginfo_t, *mut libc::c_void);
type FnHandler = extern fn(c_int);
unsafe {
let state = match (*GLOBAL_STATE).signals.get(signum as usize) {
Some(state) => state,
None => return,
};
if !state.ready.swap(true, Ordering::SeqCst) {
match (&(*GLOBAL_STATE).write).write(&[1]) {
Ok(..) => {}
Err(e) => {
if e.kind() != io::ErrorKind::WouldBlock {
panic!("bad error on write fd: {}", e)
}
}
}
}
let fnptr = state.prev.sa_sigaction;
if fnptr == 0 || fnptr == libc::SIG_DFL || fnptr == libc::SIG_IGN {
return
}
if state.prev.sa_flags & libc::SA_SIGINFO == 0 {
let action = mem::transmute::<usize, FnHandler>(fnptr);
action(signum)
} else {
let action = mem::transmute::<usize, FnSigaction>(fnptr);
action(signum, info, ptr)
}
}
}
struct MyRegistration {
inner: RefCell<Option<(mio::Registration, mio::SetReadiness)>>,
}
impl mio::Evented for MyRegistration {
fn register(&self,
poll: &mio::Poll,
token: mio::Token,
events: mio::Ready,
opts: mio::PollOpt) -> io::Result<()> {
let reg = mio::Registration::new(poll, token, events, opts);
*self.inner.borrow_mut() = Some(reg);
Ok(())
}
fn reregister(&self,
_poll: &mio::Poll,
_token: mio::Token,
_events: mio::Ready,
_opts: mio::PollOpt) -> io::Result<()> {
Ok(())
}
fn deregister(&self, _poll: &mio::Poll) -> io::Result<()> {
Ok(())
}
}
signal: Remove SIGKILL reexport
//! Unix-specific types for signal handling.
//!
//! This module is only defined on Unix platforms and contains the primary
//! `Signal` type for receiving notifications of signals.
#![cfg(unix)]
pub extern crate libc;
extern crate mio;
extern crate tokio_uds;
use std::cell::RefCell;
use std::io::{self, Write, Read};
use std::mem;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Once, ONCE_INIT, Mutex};
use futures::stream::{Stream, Fuse};
use futures::{self, Future, IntoFuture, Complete, Oneshot, Poll, Async};
use self::libc::c_int;
use self::tokio_uds::UnixStream;
use tokio_core::io::IoFuture;
use tokio_core::reactor::{PollEvented, Handle};
use tokio_core::channel::{channel, Sender, Receiver};
static INIT: Once = ONCE_INIT;
static mut GLOBAL_STATE: *mut GlobalState = 0 as *mut _;
/// An implementation of `Stream` for receiving a particular type of signal.
///
/// This structure implements the `Stream` trait and represents notifications
/// of the current process receiving a particular signal. The signal being
/// listened for is passed to `Signal::new`, and the same signal number is then
/// yielded as each element for the stream.
///
/// In general signal handling on Unix is a pretty tricky topic, and this
/// structure is no exception! There are some important limitations to keep in
/// mind when using `Signal` streams:
///
/// * While multiple event loops are supported, the *first* event loop to
/// register a signal handler is required to be active to ensure that signals
/// for other event loops are delivered. In other words, once an event loop
/// registers a signal, it's best to keep it around and running. This is
/// normally just a problem for tests, and the "workaround" is to spawn a
/// thread in the background at the beginning of the test suite which is
/// running an event loop (and listening for a signal).
///
/// * Signals handling in Unix already necessitates coalescing signals
/// together sometimes. This `Signal` stream is also no exception here in
/// that it will also coalesce signals. That is, even if the signal handler
/// for this process runs multiple times, the `Signal` stream may only return
/// one signal notification. Specifically, before `poll` is called, all
/// signal notifications are coalesced into one item returned from `poll`.
/// Once `poll` has been called, however, a further signal is guaranteed to
/// be yielded as an item.
///
/// * Signal handling in general is relatively inefficient. Although some
/// improvements are possible in this crate, it's recommended to not plan on
/// having millions of signal channels open.
///
/// * Currently the "driver task" to process incoming signals never exits.
///
/// If you've got any questions about this feel free to open an issue on the
/// repo, though, as I'd love to chat about this! In other words, I'd love to
/// alleviate some of these limitations if possible!
pub struct Signal {
signum: c_int,
reg: PollEvented<MyRegistration>,
_finished: Complete<()>,
}
struct GlobalState {
write: UnixStream,
tx: Mutex<Sender<Message>>,
signals: [GlobalSignalState; 32],
}
struct GlobalSignalState {
ready: AtomicBool,
prev: libc::sigaction,
}
enum Message {
NewSignal(c_int, Complete<io::Result<Signal>>),
}
struct DriverTask {
handle: Handle,
read: UnixStream,
rx: Fuse<Receiver<Message>>,
signals: [SignalState; 32],
}
struct SignalState {
registered: bool,
tasks: Vec<(RefCell<Oneshot<()>>, mio::SetReadiness)>,
}
pub use self::libc::{SIGINT, SIGTERM, SIGUSR1, SIGUSR2};
pub use self::libc::{SIGHUP, SIGQUIT, SIGPIPE, SIGALRM, SIGTRAP};
impl Signal {
/// Creates a new stream which will receive notifications when the current
/// process receives the signal `signum`.
///
/// This function will create a new stream which may be based on the
/// event loop handle provided. This function returns a future which will
/// then resolve to the signal stream, if successful.
///
/// The `Signal` stream is an infinite stream which will receive
/// notifications whenever a signal is received. More documentation can be
/// found on `Signal` itself, but to reiterate:
///
/// * Signals may be coalesced beyond what the kernel already does.
/// * While multiple event loops are supported, the first event loop to
/// register a signal handler must be active to deliver signal
/// notifications
/// * Once a signal handle is registered with the process the underlying
/// libc signal handler is never unregistered.
///
/// A `Signal` stream can be created for a particular signal number
/// multiple times. When a signal is received then all the associated
/// channels will receive the signal notification.
pub fn new(signum: c_int, handle: &Handle) -> IoFuture<Signal> {
let mut init = None;
INIT.call_once(|| {
init = Some(global_init(handle));
});
let new_signal = futures::lazy(move || {
let (tx, rx) = futures::oneshot();
let msg = Message::NewSignal(signum, tx);
let res = unsafe {
(*GLOBAL_STATE).tx.lock().unwrap().send(msg)
};
res.expect("failed to request a new signal stream, did the \
first event loop go away?");
rx.then(|r| r.unwrap())
});
match init {
Some(init) => init.into_future().and_then(|()| new_signal).boxed(),
None => new_signal.boxed(),
}
}
}
impl Stream for Signal {
type Item = c_int;
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<c_int>, io::Error> {
if !self.reg.poll_read().is_ready() {
return Ok(Async::NotReady)
}
self.reg.need_read();
self.reg.get_ref()
.inner.borrow()
.as_ref().unwrap().1
.set_readiness(mio::Ready::none())
.expect("failed to set readiness");
Ok(Async::Ready(Some(self.signum)))
}
}
fn global_init(handle: &Handle) -> io::Result<()> {
let (tx, rx) = try!(channel(handle));
let (read, write) = try!(UnixStream::pair(handle));
unsafe {
let state = Box::new(GlobalState {
write: write,
signals: {
fn new() -> GlobalSignalState {
GlobalSignalState {
ready: AtomicBool::new(false),
prev: unsafe { mem::zeroed() },
}
}
[
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
]
},
tx: Mutex::new(tx.clone()),
});
GLOBAL_STATE = Box::into_raw(state);
handle.spawn(DriverTask {
handle: handle.clone(),
rx: rx.fuse(),
read: read,
signals: {
fn new() -> SignalState {
SignalState { registered: false, tasks: Vec::new() }
}
[
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
new(), new(), new(), new(), new(), new(), new(), new(),
]
},
});
Ok(())
}
}
impl Future for DriverTask {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.check_signal_drops();
self.check_messages();
self.check_signals();
// TODO: when to finish this task?
Ok(Async::NotReady)
}
}
impl DriverTask {
fn check_signal_drops(&mut self) {
for signal in self.signals.iter_mut() {
signal.tasks.retain(|task| {
!task.0.borrow_mut().poll().is_err()
});
}
}
fn check_messages(&mut self) {
loop {
// Acquire the next message
let message = match self.rx.poll() {
Ok(Async::Ready(Some(e))) => e,
Ok(Async::Ready(None)) |
Ok(Async::NotReady) => break,
Err(e) => panic!("error on rx: {}", e),
};
let (sig, complete) = match message {
Message::NewSignal(sig, complete) => (sig, complete),
};
// If the signal's too large, then we return an error, otherwise we
// use this index to look at the signal slot.
//
// If the signal wasn't previously registered then we do so now.
let signal = match self.signals.get_mut(sig as usize) {
Some(signal) => signal,
None => {
complete.complete(Err(io::Error::new(io::ErrorKind::Other,
"signum too large")));
continue
}
};
if !signal.registered {
unsafe {
let mut new: libc::sigaction = mem::zeroed();
new.sa_sigaction = handler as usize;
new.sa_flags = libc::SA_RESTART | libc::SA_SIGINFO;
let mut prev = mem::zeroed();
if libc::sigaction(sig, &new, &mut prev) != 0 {
complete.complete(Err(io::Error::last_os_error()));
continue
}
signal.registered = true;
}
}
// Acquire the (registration, set_readiness) pair by... assuming
// we're on the event loop (true because of the spawn above).
let reg = MyRegistration { inner: RefCell::new(None) };
let reg = match PollEvented::new(reg, &self.handle) {
Ok(reg) => reg,
Err(e) => {
complete.complete(Err(e));
continue
}
};
// Create the `Signal` to pass back and then also keep a handle to
// the `SetReadiness` for ourselves internally.
let (tx, rx) = futures::oneshot();
let ready = reg.get_ref().inner.borrow_mut().as_mut().unwrap().1.clone();
complete.complete(Ok(Signal {
signum: sig,
reg: reg,
_finished: tx,
}));
signal.tasks.push((RefCell::new(rx), ready));
}
}
fn check_signals(&mut self) {
// Drain all data from the pipe
let mut buf = [0; 32];
let mut any = false;
loop {
match self.read.read(&mut buf) {
Ok(0) => { // EOF == something happened
any = true;
break
}
Ok(..) => any = true, // data read, but keep draining
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break,
Err(e) => panic!("bad read: {}", e),
}
}
// If nothing happened, no need to check the signals
if !any {
return
}
for (i, slot) in self.signals.iter().enumerate() {
// No need to go farther if we haven't even registered a signal
if !slot.registered {
continue
}
// See if this signal actually happened since we last checked
unsafe {
if !(*GLOBAL_STATE).signals[i].ready.swap(false, Ordering::SeqCst) {
continue
}
}
// Wake up all the tasks waiting on this signal
for task in slot.tasks.iter() {
task.1.set_readiness(mio::Ready::readable())
.expect("failed to set readiness");
}
}
}
}
extern fn handler(signum: c_int,
info: *mut libc::siginfo_t,
ptr: *mut libc::c_void) {
type FnSigaction = extern fn(c_int, *mut libc::siginfo_t, *mut libc::c_void);
type FnHandler = extern fn(c_int);
unsafe {
let state = match (*GLOBAL_STATE).signals.get(signum as usize) {
Some(state) => state,
None => return,
};
if !state.ready.swap(true, Ordering::SeqCst) {
match (&(*GLOBAL_STATE).write).write(&[1]) {
Ok(..) => {}
Err(e) => {
if e.kind() != io::ErrorKind::WouldBlock {
panic!("bad error on write fd: {}", e)
}
}
}
}
let fnptr = state.prev.sa_sigaction;
if fnptr == 0 || fnptr == libc::SIG_DFL || fnptr == libc::SIG_IGN {
return
}
if state.prev.sa_flags & libc::SA_SIGINFO == 0 {
let action = mem::transmute::<usize, FnHandler>(fnptr);
action(signum)
} else {
let action = mem::transmute::<usize, FnSigaction>(fnptr);
action(signum, info, ptr)
}
}
}
struct MyRegistration {
inner: RefCell<Option<(mio::Registration, mio::SetReadiness)>>,
}
impl mio::Evented for MyRegistration {
fn register(&self,
poll: &mio::Poll,
token: mio::Token,
events: mio::Ready,
opts: mio::PollOpt) -> io::Result<()> {
let reg = mio::Registration::new(poll, token, events, opts);
*self.inner.borrow_mut() = Some(reg);
Ok(())
}
fn reregister(&self,
_poll: &mio::Poll,
_token: mio::Token,
_events: mio::Ready,
_opts: mio::PollOpt) -> io::Result<()> {
Ok(())
}
fn deregister(&self, _poll: &mio::Poll) -> io::Result<()> {
Ok(())
}
}
|
use error::FerrumResult;
pub fn copy_recursively<F>(source: &Path, dest: &Path, mut criteria: F) -> FerrumResult<()>
where F : Fn(&Path) -> bool
{
use std::old_io;
use std::old_io::fs;
use std::old_io::fs::PathExtensions;
if !source.is_dir() {
try!(Err(old_io::standard_error(old_io::InvalidInput)))
}
let mut contents = try!(fs::walk_dir(source));
for entry in contents {
debug!("ENTRY: {}", entry.display());
if !criteria(&entry) { continue; }
// TODO: remove this unwrap.
let new_dest = &dest.join(entry.path_relative_from(source).unwrap());
if entry.is_dir() {
try!(fs::mkdir(new_dest, old_io::USER_RWX));
} else {
try!(fs::copy(&entry, new_dest));
}
}
Ok(())
}
Remove unnecessary mut
use error::FerrumResult;
pub fn copy_recursively<F>(source: &Path, dest: &Path, criteria: F) -> FerrumResult<()>
where F : Fn(&Path) -> bool
{
use std::old_io;
use std::old_io::fs;
use std::old_io::fs::PathExtensions;
if !source.is_dir() {
try!(Err(old_io::standard_error(old_io::InvalidInput)))
}
let mut contents = try!(fs::walk_dir(source));
for entry in contents {
debug!("ENTRY: {}", entry.display());
if !criteria(&entry) { continue; }
// TODO: remove this unwrap.
let new_dest = &dest.join(entry.path_relative_from(source).unwrap());
if entry.is_dir() {
try!(fs::mkdir(new_dest, old_io::USER_RWX));
} else {
try!(fs::copy(&entry, new_dest));
}
}
Ok(())
}
|
// util
use crate::parse::*;
use std::cell::RefCell;
use std::rc::Rc;
thread_local! {
static NLABEL: RefCell<usize> = RefCell::new(1);
}
pub fn bump_nlabel() -> usize {
NLABEL.with(|n| {
let mut nlabel = n.borrow_mut();
let ret = *nlabel;
*nlabel += 1;
return ret;
})
}
pub fn first_char(s: &str) -> char {
if s.len() == 0 {
return '\0';
}
return char::from((&s[0..1].as_bytes())[0]);
}
pub fn roundup(x: i32, align: i32) -> i32 {
return (x + align - 1) & (!(align - 1));
}
pub fn ptr_to(base: Rc<RefCell<Type>>) -> Type {
let mut ty = alloc_type();
ty.ty = CType::PTR;
ty.size = 8;
ty.align = 8;
ty.ptr_to = Some(base);
return ty;
}
pub fn ary_of(base: Type, len: i32) -> Type {
let mut ty = alloc_type();
ty.ty = CType::ARY;
ty.size = base.size * len;
ty.align = base.align;
ty.ary_of = Some(Box::new(base));
ty.len = len;
return ty;
}
fn new_prim_ty(ty: CType, size: i32) -> Type {
let mut ret = alloc_type();
ret.ty = ty;
ret.size = size;
ret.align = size;
return ret;
}
pub fn void_ty() -> Type {
return new_prim_ty(CType::VOID, 0);
}
pub fn bool_ty() -> Type {
return new_prim_ty(CType::BOOL, 1);
}
pub fn char_ty() -> Type {
return new_prim_ty(CType::CHAR, 1);
}
pub fn int_ty() -> Type {
return new_prim_ty(CType::INT, 4);
}
pub fn func_ty(base: Type) -> Type {
let mut ty = alloc_type();
ty.returning = Some(Box::new(base));
return ty;
}
pub fn same_type(x: Rc<RefCell<Type>>, y: Rc<RefCell<Type>>) -> bool {
if x.borrow().ty != y.borrow().ty {
return false;
}
let xx = x.borrow();
let yy = y.borrow();
match xx.ty {
CType::PTR => {
let xptr = xx.ptr_to.clone().unwrap();
let yptr = yy.ptr_to.clone().unwrap();
same_type(xptr, yptr)
}
CType::ARY => {
let xary = xx.ary_of.clone().unwrap();
let yary = yy.ary_of.clone().unwrap();
xx.size == yy.size && same_type(Rc::new(RefCell::new(*xary)), Rc::new(RefCell::new(*yary)))
}
CType::STRUCT | CType::FUNC => *xx == *yy,
_ => true,
}
}
Simplify.
// util
use crate::parse::*;
use std::cell::RefCell;
use std::rc::Rc;
thread_local! {
static NLABEL: RefCell<usize> = RefCell::new(1);
}
pub fn bump_nlabel() -> usize {
NLABEL.with(|n| {
let mut nlabel = n.borrow_mut();
let ret = *nlabel;
*nlabel += 1;
return ret;
})
}
pub fn first_char(s: &str) -> char {
if s.len() == 0 {
return '\0';
}
return char::from((&s[0..1].as_bytes())[0]);
}
pub fn roundup(x: i32, align: i32) -> i32 {
return (x + align - 1) & (!(align - 1));
}
pub fn ptr_to(base: Rc<RefCell<Type>>) -> Type {
let mut ty = alloc_type();
ty.ty = CType::PTR;
ty.size = 8;
ty.align = 8;
ty.ptr_to = Some(base);
return ty;
}
pub fn ary_of(base: Type, len: i32) -> Type {
let mut ty = alloc_type();
ty.ty = CType::ARY;
ty.size = base.size * len;
ty.align = base.align;
ty.ary_of = Some(Box::new(base));
ty.len = len;
return ty;
}
fn new_ty(ty: CType, size: i32) -> Type {
let mut ret = alloc_type();
ret.ty = ty;
ret.size = size;
ret.align = size;
return ret;
}
pub fn void_ty() -> Type {
return new_ty(CType::VOID, 0);
}
pub fn bool_ty() -> Type {
return new_ty(CType::BOOL, 1);
}
pub fn char_ty() -> Type {
return new_ty(CType::CHAR, 1);
}
pub fn int_ty() -> Type {
return new_ty(CType::INT, 4);
}
pub fn func_ty(base: Type) -> Type {
let mut ty = alloc_type();
ty.returning = Some(Box::new(base));
return ty;
}
pub fn same_type(x: Rc<RefCell<Type>>, y: Rc<RefCell<Type>>) -> bool {
if x.borrow().ty != y.borrow().ty {
return false;
}
let xx = x.borrow();
let yy = y.borrow();
match xx.ty {
CType::PTR => {
let xptr = xx.ptr_to.clone().unwrap();
let yptr = yy.ptr_to.clone().unwrap();
same_type(xptr, yptr)
}
CType::ARY => {
let xary = xx.ary_of.clone().unwrap();
let yary = yy.ary_of.clone().unwrap();
xx.size == yy.size && same_type(Rc::new(RefCell::new(*xary)), Rc::new(RefCell::new(*yary)))
}
CType::STRUCT | CType::FUNC => *xx == *yy,
_ => true,
}
}
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use std::{cmp::min, env};
use crate::{
cluster::Cluster,
experiments::{
CompatiblityTestParams, CpuFlamegraphParams, Experiment, ExperimentParam,
PerformanceBenchmarkParams, PerformanceBenchmarkThreeRegionSimulationParams,
RebootRandomValidatorsParams, ReconfigurationParams, RecoveryTimeParams,
StateSyncPerformanceParams, TwinValidatorsParams, ValidatorVersioningParams,
},
};
use anyhow::{format_err, Result};
pub struct ExperimentSuite {
pub experiments: Vec<Box<dyn Experiment>>,
}
impl ExperimentSuite {
fn new_pre_release(cluster: &Cluster) -> Self {
let mut experiments: Vec<Box<dyn Experiment>> = vec![];
if env::var("RECOVERY_EXP").is_ok() {
experiments.push(Box::new(
RecoveryTimeParams {
num_accounts_to_mint: 100_000,
}
.build(cluster),
));
}
let count = min(3, cluster.validator_instances().len() / 3);
// Reboot different sets of 3 validators *100 times
for _ in 0..10 {
let b = Box::new(RebootRandomValidatorsParams::new(count, 0).build(cluster));
experiments.push(b);
}
experiments.push(Box::new(
PerformanceBenchmarkParams::non_zero_gas_price(0, 1)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkParams::new_nodes_down(0)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkParams::new_nodes_down(10)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkThreeRegionSimulationParams {}.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkParams::new_fixed_tps(0, 10)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(StateSyncPerformanceParams::new(60).build(cluster)));
experiments.push(Box::new(TwinValidatorsParams { pair: 1 }.build(cluster)));
// This can't be run before any experiment that requires clean_data.
experiments.push(Box::new(
ReconfigurationParams {
count: 101,
emit_txn: false,
}
.build(cluster),
));
experiments.push(Box::new(
CpuFlamegraphParams { duration_secs: 60 }.build(cluster),
));
Self { experiments }
}
fn new_twin_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![
Box::new(TwinValidatorsParams { pair: 1 }.build(cluster)),
Box::new(CpuFlamegraphParams { duration_secs: 60 }.build(cluster)),
];
Self { experiments }
}
fn new_perf_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![
Box::new(PerformanceBenchmarkParams::new_nodes_down(0).build(cluster)),
Box::new(PerformanceBenchmarkParams::new_nodes_down(10).build(cluster)),
Box::new(PerformanceBenchmarkThreeRegionSimulationParams {}.build(cluster)),
Box::new(PerformanceBenchmarkParams::new_fixed_tps(0, 10).build(cluster)),
];
Self { experiments }
}
fn new_land_blocking_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![Box::new(
PerformanceBenchmarkParams::new_nodes_down(0).build(cluster),
)];
Self { experiments }
}
fn new_land_blocking_compat_suite(cluster: &Cluster) -> Result<Self> {
let count: usize = match env::var("BATCH_SIZE") {
Ok(val) => val
.parse()
.map_err(|e| format_err!("Failed to parse BATCH_SIZE {}: {}", val, e))?,
Err(_) => cluster.validator_instances().len() / 2,
};
let updated_image_tag = env::var("UPDATE_TO_TAG")
.map_err(|_| format_err!("Expected environment variable UPDATE_TO_TAG"))?;
let mut experiments: Vec<Box<dyn Experiment>> = vec![Box::new(
CompatiblityTestParams {
count,
updated_image_tag,
}
.build(cluster),
)];
experiments.extend(Self::new_land_blocking_suite(cluster).experiments);
Ok(Self { experiments })
}
fn new_versioning_suite(cluster: &Cluster) -> Result<Self> {
let count: usize = match env::var("BATCH_SIZE") {
Ok(val) => val
.parse()
.map_err(|e| format_err!("Failed to parse BATCH_SIZE {}: {}", val, e))?,
Err(_) => cluster.validator_instances().len() / 2,
};
let updated_image_tag = env::var("UPDATE_TO_TAG")
.map_err(|_| format_err!("Expected environment variable UPDATE_TO_TAG"))?;
let experiments: Vec<Box<dyn Experiment>> = vec![Box::new(
ValidatorVersioningParams {
count,
updated_image_tag,
}
.build(cluster),
)];
Ok(Self { experiments })
}
fn new_invalid_tx_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![
Box::new(PerformanceBenchmarkParams::new_nodes_down(0).build(cluster)),
Box::new(PerformanceBenchmarkParams::mix_invalid_tx(0, 10).build(cluster)),
];
Self { experiments }
}
fn new_state_sync_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> =
vec![Box::new(StateSyncPerformanceParams::new(60).build(cluster))];
Self { experiments }
}
pub fn new_by_name(cluster: &Cluster, name: &str) -> Result<Self> {
match name {
"perf" => Ok(Self::new_perf_suite(cluster)),
"pre_release" => Ok(Self::new_pre_release(cluster)),
"twin" => Ok(Self::new_twin_suite(cluster)),
"land_blocking" => Ok(Self::new_land_blocking_suite(cluster)),
"land_blocking_compat" => Self::new_land_blocking_compat_suite(cluster),
"versioning" => Self::new_versioning_suite(cluster),
"invalid" => Ok(Self::new_invalid_tx_suite(cluster)),
"state_sync" => Ok(Self::new_state_sync_suite(cluster)),
other => Err(format_err!("Unknown suite: {}", other)),
}
}
}
[State Sync] Reduce the txn emitter time in the performance test.
Closes: #9853
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use std::{cmp::min, env};
use crate::{
cluster::Cluster,
experiments::{
CompatiblityTestParams, CpuFlamegraphParams, Experiment, ExperimentParam,
PerformanceBenchmarkParams, PerformanceBenchmarkThreeRegionSimulationParams,
RebootRandomValidatorsParams, ReconfigurationParams, RecoveryTimeParams,
StateSyncPerformanceParams, TwinValidatorsParams, ValidatorVersioningParams,
},
};
use anyhow::{format_err, Result};
pub struct ExperimentSuite {
pub experiments: Vec<Box<dyn Experiment>>,
}
impl ExperimentSuite {
fn new_pre_release(cluster: &Cluster) -> Self {
let mut experiments: Vec<Box<dyn Experiment>> = vec![];
if env::var("RECOVERY_EXP").is_ok() {
experiments.push(Box::new(
RecoveryTimeParams {
num_accounts_to_mint: 100_000,
}
.build(cluster),
));
}
let count = min(3, cluster.validator_instances().len() / 3);
// Reboot different sets of 3 validators *100 times
for _ in 0..10 {
let b = Box::new(RebootRandomValidatorsParams::new(count, 0).build(cluster));
experiments.push(b);
}
experiments.push(Box::new(
PerformanceBenchmarkParams::non_zero_gas_price(0, 1)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkParams::new_nodes_down(0)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkParams::new_nodes_down(10)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkThreeRegionSimulationParams {}.build(cluster),
));
experiments.push(Box::new(
PerformanceBenchmarkParams::new_fixed_tps(0, 10)
.enable_db_backup()
.build(cluster),
));
experiments.push(Box::new(StateSyncPerformanceParams::new(30).build(cluster)));
experiments.push(Box::new(TwinValidatorsParams { pair: 1 }.build(cluster)));
// This can't be run before any experiment that requires clean_data.
experiments.push(Box::new(
ReconfigurationParams {
count: 101,
emit_txn: false,
}
.build(cluster),
));
experiments.push(Box::new(
CpuFlamegraphParams { duration_secs: 60 }.build(cluster),
));
Self { experiments }
}
fn new_twin_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![
Box::new(TwinValidatorsParams { pair: 1 }.build(cluster)),
Box::new(CpuFlamegraphParams { duration_secs: 60 }.build(cluster)),
];
Self { experiments }
}
fn new_perf_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![
Box::new(PerformanceBenchmarkParams::new_nodes_down(0).build(cluster)),
Box::new(PerformanceBenchmarkParams::new_nodes_down(10).build(cluster)),
Box::new(PerformanceBenchmarkThreeRegionSimulationParams {}.build(cluster)),
Box::new(PerformanceBenchmarkParams::new_fixed_tps(0, 10).build(cluster)),
];
Self { experiments }
}
fn new_land_blocking_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![Box::new(
PerformanceBenchmarkParams::new_nodes_down(0).build(cluster),
)];
Self { experiments }
}
fn new_land_blocking_compat_suite(cluster: &Cluster) -> Result<Self> {
let count: usize = match env::var("BATCH_SIZE") {
Ok(val) => val
.parse()
.map_err(|e| format_err!("Failed to parse BATCH_SIZE {}: {}", val, e))?,
Err(_) => cluster.validator_instances().len() / 2,
};
let updated_image_tag = env::var("UPDATE_TO_TAG")
.map_err(|_| format_err!("Expected environment variable UPDATE_TO_TAG"))?;
let mut experiments: Vec<Box<dyn Experiment>> = vec![Box::new(
CompatiblityTestParams {
count,
updated_image_tag,
}
.build(cluster),
)];
experiments.extend(Self::new_land_blocking_suite(cluster).experiments);
Ok(Self { experiments })
}
fn new_versioning_suite(cluster: &Cluster) -> Result<Self> {
let count: usize = match env::var("BATCH_SIZE") {
Ok(val) => val
.parse()
.map_err(|e| format_err!("Failed to parse BATCH_SIZE {}: {}", val, e))?,
Err(_) => cluster.validator_instances().len() / 2,
};
let updated_image_tag = env::var("UPDATE_TO_TAG")
.map_err(|_| format_err!("Expected environment variable UPDATE_TO_TAG"))?;
let experiments: Vec<Box<dyn Experiment>> = vec![Box::new(
ValidatorVersioningParams {
count,
updated_image_tag,
}
.build(cluster),
)];
Ok(Self { experiments })
}
fn new_invalid_tx_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> = vec![
Box::new(PerformanceBenchmarkParams::new_nodes_down(0).build(cluster)),
Box::new(PerformanceBenchmarkParams::mix_invalid_tx(0, 10).build(cluster)),
];
Self { experiments }
}
fn new_state_sync_suite(cluster: &Cluster) -> Self {
let experiments: Vec<Box<dyn Experiment>> =
vec![Box::new(StateSyncPerformanceParams::new(30).build(cluster))];
Self { experiments }
}
pub fn new_by_name(cluster: &Cluster, name: &str) -> Result<Self> {
match name {
"perf" => Ok(Self::new_perf_suite(cluster)),
"pre_release" => Ok(Self::new_pre_release(cluster)),
"twin" => Ok(Self::new_twin_suite(cluster)),
"land_blocking" => Ok(Self::new_land_blocking_suite(cluster)),
"land_blocking_compat" => Self::new_land_blocking_compat_suite(cluster),
"versioning" => Self::new_versioning_suite(cluster),
"invalid" => Ok(Self::new_invalid_tx_suite(cluster)),
"state_sync" => Ok(Self::new_state_sync_suite(cluster)),
other => Err(format_err!("Unknown suite: {}", other)),
}
}
}
|
use std::fs::remove_file;
use std::process::{Command, Output};
const RUSTC_COLOR_ARGS: &[&str] = &["--color", "always"];
pub fn compile_test_cmd(filename: &str) -> Output {
Command::new("rustc")
.args(&["--test", filename, "-o", "temp"])
.args(RUSTC_COLOR_ARGS)
.output()
.expect("failed to compile exercise")
}
pub fn compile_cmd(filename: &str) -> Output {
Command::new("rustc")
.args(&[filename, "-o", "temp"])
.args(RUSTC_COLOR_ARGS)
.output()
.expect("failed to compile exercise")
}
pub fn run_cmd() -> Output {
Command::new("./temp")
.output()
.expect("failed to run exercise")
}
pub fn clean() {
let _ignored = remove_file("temp");
}
#[test]
fn test_clean() {
std::fs::File::create("temp").unwrap();
clean();
assert!(!std::path::Path::new("temp").exists());
}
Add process id to temp file name
use std::fs::remove_file;
use std::process::{self, Command, Output};
const RUSTC_COLOR_ARGS: &[&str] = &["--color", "always"];
fn temp_file() -> String {
format!("./temp_{}", process::id())
}
pub fn compile_test_cmd(filename: &str) -> Output {
Command::new("rustc")
.args(&["--test", filename, "-o", &temp_file()])
.args(RUSTC_COLOR_ARGS)
.output()
.expect("failed to compile exercise")
}
pub fn compile_cmd(filename: &str) -> Output {
Command::new("rustc")
.args(&[filename, "-o", &temp_file()])
.args(RUSTC_COLOR_ARGS)
.output()
.expect("failed to compile exercise")
}
pub fn run_cmd() -> Output {
Command::new(&temp_file())
.output()
.expect("failed to run exercise")
}
pub fn clean() {
let _ignored = remove_file(&temp_file());
}
#[test]
fn test_clean() {
std::fs::File::create(&temp_file()).unwrap();
clean();
assert!(!std::path::Path::new(&temp_file()).exists());
}
|
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::aptos_cli::setup_cli_test;
use aptos::{account::create::DEFAULT_FUNDED_COINS, test::CliTestFramework};
use aptos_config::{config::ApiConfig, utils::get_available_port};
use aptos_crypto::HashValue;
use aptos_rosetta::common::{BLOCKCHAIN, Y2K_SECS};
use aptos_rosetta::types::{
AccountBalanceResponse, Block, BlockIdentifier, NetworkIdentifier, NetworkRequest,
PartialBlockIdentifier,
};
use aptos_rosetta::{
client::RosettaClient,
common::native_coin,
types::{AccountBalanceRequest, BlockRequest},
ROSETTA_VERSION,
};
use aptos_types::account_address::AccountAddress;
use aptos_types::chain_id::ChainId;
use forge::{LocalSwarm, Node};
use std::{future::Future, time::Duration};
use tokio::task::JoinHandle;
pub async fn setup_test(
num_nodes: usize,
num_accounts: usize,
) -> (LocalSwarm, CliTestFramework, JoinHandle<()>, RosettaClient) {
let (swarm, cli, faucet) = setup_cli_test(num_nodes).await;
let validator = swarm.validators().next().unwrap();
// And the client
let rosetta_port = get_available_port();
let rosetta_socket_addr = format!("127.0.0.1:{}", rosetta_port);
let rosetta_url = format!("http://{}", rosetta_socket_addr.clone())
.parse()
.unwrap();
let rosetta_client = RosettaClient::new(rosetta_url);
let api_config = ApiConfig {
enabled: true,
address: rosetta_socket_addr.parse().unwrap(),
tls_cert_path: None,
tls_key_path: None,
content_length_limit: None,
};
// Start the server
let _rosetta = aptos_rosetta::bootstrap_async(
swarm.chain_id(),
api_config,
Some(aptos_rest_client::Client::new(
validator.rest_api_endpoint(),
)),
)
.await
.unwrap();
// Create accounts
for i in 0..num_accounts {
cli.create_account_with_faucet(i).await.unwrap();
}
(swarm, cli, faucet, rosetta_client)
}
#[tokio::test]
async fn test_network() {
let (swarm, _, _, rosetta_client) = setup_test(1, 1).await;
let chain_id = swarm.chain_id();
// We only support one network, this network
let networks = rosetta_client.network_list().await.unwrap();
assert_eq!(1, networks.network_identifiers.len());
let network_id = networks.network_identifiers.first().unwrap();
assert_eq!(BLOCKCHAIN, network_id.blockchain);
assert_eq!(chain_id.to_string(), network_id.network);
let request = NetworkRequest {
network_identifier: NetworkIdentifier::from(chain_id),
};
let options = rosetta_client.network_options(&request).await.unwrap();
assert_eq!(ROSETTA_VERSION, options.version.rosetta_version);
// TODO: Check other options
let request = NetworkRequest {
network_identifier: NetworkIdentifier::from(chain_id),
};
let status = rosetta_client.network_status(&request).await.unwrap();
assert!(status.current_block_identifier.index > 0);
assert!(status.current_block_timestamp > Y2K_SECS);
assert_eq!(
BlockIdentifier {
index: 0,
hash: HashValue::zero().to_hex()
},
status.genesis_block_identifier
);
assert_eq!(
Some(status.genesis_block_identifier),
status.oldest_block_identifier,
);
}
#[tokio::test]
async fn test_account_balance() {
let (swarm, cli, _faucet, rosetta_client) = setup_test(1, 1).await;
cli.create_account_with_faucet(0).await.unwrap();
let account = CliTestFramework::account_id(0);
let chain_id = swarm.chain_id();
// At time 0, there should be 0 balance
let response = get_balance(&rosetta_client, chain_id, account, 0)
.await
.unwrap();
assert_eq!(
response.block_identifier,
BlockIdentifier {
index: 0,
hash: HashValue::zero().to_hex(),
}
);
// At some time before version 100, the account should exist
let mut successful_version = None;
for i in 1..100 {
let response = get_balance(&rosetta_client, chain_id, account, i)
.await
.unwrap();
let amount = response.balances.first().unwrap();
if amount.value == DEFAULT_FUNDED_COINS.to_string() {
successful_version = Some(i);
break;
}
}
if successful_version.is_none() {
panic!("Failed to find account balance increase")
}
// TODO: Send money
// TODO: Fail request due to bad transaction
// TODO: Receive money
// TODO: Recieve money by faucet
}
async fn get_balance(
rosetta_client: &RosettaClient,
chain_id: ChainId,
account: AccountAddress,
index: u64,
) -> anyhow::Result<AccountBalanceResponse> {
let request = AccountBalanceRequest {
network_identifier: chain_id.into(),
account_identifier: account.into(),
block_identifier: Some(PartialBlockIdentifier {
index: Some(index),
hash: None,
}),
currencies: Some(vec![native_coin()]),
};
try_until_ok(|| rosetta_client.account_balance(&request)).await
}
#[tokio::test]
async fn test_block() {
let (swarm, _cli, _faucet, rosetta_client) = setup_test(1, 0).await;
let chain_id = swarm.chain_id();
let request_genesis = BlockRequest::by_index(chain_id, 0);
let by_version_response = try_until_ok(|| rosetta_client.block(&request_genesis))
.await
.unwrap();
let genesis_block = by_version_response.block.unwrap();
// Genesis txn should always have parent be same as block
assert_eq!(
genesis_block.block_identifier,
genesis_block.parent_block_identifier
);
assert_eq!(
HashValue::zero().to_hex(),
genesis_block.block_identifier.hash,
);
assert_eq!(0, genesis_block.block_identifier.index);
// Genesis timestamp is always Y2K
assert_eq!(Y2K_SECS, genesis_block.timestamp);
// There should only be the genesis transaction
assert_eq!(1, genesis_block.transactions.len());
let genesis_txn = genesis_block.transactions.first().unwrap();
// Version should match as 0
assert_eq!(
0,
genesis_txn.metadata.unwrap().version.0,
"Genesis version"
);
// There should be at least one transfer in genesis
assert!(!genesis_txn.operations.is_empty());
// Get genesis txn by hash
let request_genesis_by_hash =
BlockRequest::by_hash(chain_id, genesis_block.block_identifier.hash.clone());
let by_hash_response = rosetta_client
.block(&request_genesis_by_hash)
.await
.unwrap();
let genesis_block_by_hash = by_hash_response.block.unwrap();
// Both blocks should be the same
assert_eq!(genesis_block, genesis_block_by_hash);
// Responses should be idempotent
let response = rosetta_client.block(&request_genesis).await.unwrap();
assert_eq!(response.block.unwrap(), genesis_block_by_hash);
let response = rosetta_client
.block(&request_genesis_by_hash)
.await
.unwrap();
assert_eq!(response.block.unwrap(), genesis_block_by_hash);
// Block 1 is always a reconfig with exactly 1 txn
let block_1 = get_block(&rosetta_client, chain_id, 1).await;
assert_eq!(1, block_1.transactions.len());
// Block metadata won't have operations
assert!(block_1.transactions.first().unwrap().operations.is_empty());
assert!(block_1.timestamp > genesis_block.timestamp);
// Block 2 is always a standard block with 2 or more txns
let block_2 = get_block(&rosetta_client, chain_id, 2).await;
assert!(block_2.transactions.len() >= 2);
// Block metadata won't have operations
assert!(block_2.transactions.first().unwrap().operations.is_empty());
// StateCheckpoint won't have operations
assert!(block_2.transactions.last().unwrap().operations.is_empty());
assert!(block_2.timestamp >= block_1.timestamp);
// No input should give the latest version, not the genesis txn
let request_latest = BlockRequest::latest(chain_id);
let response = rosetta_client.block(&request_latest).await.unwrap();
let latest_block = response.block.unwrap();
// The latest block should always come after genesis
assert!(latest_block.block_identifier.index >= block_2.block_identifier.index);
assert!(latest_block.timestamp >= block_2.timestamp);
// The parent should always be exactly one version before
assert_eq!(
latest_block.parent_block_identifier.index,
latest_block.block_identifier.index - 1
);
// There should be at least 1 txn
assert!(latest_block.transactions.len() > 1);
// We should be able to query it again by hash or by version and it is the same
let request_latest_by_version =
BlockRequest::by_index(chain_id, latest_block.block_identifier.index);
let latest_block_by_version = rosetta_client
.block(&request_latest_by_version)
.await
.unwrap()
.block
.unwrap();
let request_latest_by_hash =
BlockRequest::by_hash(chain_id, latest_block.block_identifier.hash.clone());
let latest_block_by_hash = rosetta_client
.block(&request_latest_by_hash)
.await
.unwrap()
.block
.unwrap();
assert_eq!(latest_block, latest_block_by_version);
assert_eq!(latest_block_by_hash, latest_block_by_version);
// Wait until we get a new block processed
let network_request = NetworkRequest {
network_identifier: NetworkIdentifier::from(chain_id),
};
while rosetta_client
.network_status(&network_request)
.await
.unwrap()
.current_block_identifier
.index
== latest_block.block_identifier.index
{
tokio::time::sleep(Duration::from_micros(10)).await
}
// And querying latest again should get yet another transaction in the future
let newer_block = rosetta_client
.block(&request_latest)
.await
.unwrap()
.block
.unwrap();
assert!(newer_block.block_identifier.index >= latest_block.block_identifier.index);
assert!(newer_block.timestamp >= latest_block.timestamp);
}
async fn get_block(rosetta_client: &RosettaClient, chain_id: ChainId, index: u64) -> Block {
let rosetta_client = (*rosetta_client).clone();
let request = BlockRequest::by_index(chain_id, index);
try_until_ok(|| rosetta_client.block(&request))
.await
.unwrap()
.block
.unwrap()
}
/// Try for 2 seconds to get a response. This handles the fact that it's starting async
async fn try_until_ok<F, Fut, T>(function: F) -> anyhow::Result<T>
where
F: Fn() -> Fut,
Fut: Future<Output = anyhow::Result<T>>,
{
let mut result = Err(anyhow::Error::msg("Failed to get response"));
for _ in 1..10 {
result = function().await;
if result.is_ok() {
break;
}
tokio::time::sleep(Duration::from_millis(200)).await;
}
result
}
[aptos-rosetta] Refactor e2e tests
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::aptos_cli::setup_cli_test;
use aptos::{account::create::DEFAULT_FUNDED_COINS, test::CliTestFramework};
use aptos_config::{config::ApiConfig, utils::get_available_port};
use aptos_crypto::HashValue;
use aptos_rosetta::common::{BLOCKCHAIN, Y2K_SECS};
use aptos_rosetta::types::{
AccountBalanceResponse, Block, BlockIdentifier, NetworkIdentifier, NetworkRequest,
PartialBlockIdentifier,
};
use aptos_rosetta::{
client::RosettaClient,
common::native_coin,
types::{AccountBalanceRequest, BlockRequest},
ROSETTA_VERSION,
};
use aptos_types::account_address::AccountAddress;
use aptos_types::chain_id::ChainId;
use forge::{LocalSwarm, Node};
use std::{future::Future, time::Duration};
use tokio::task::JoinHandle;
pub async fn setup_test(
num_nodes: usize,
num_accounts: usize,
) -> (LocalSwarm, CliTestFramework, JoinHandle<()>, RosettaClient) {
let (swarm, cli, faucet) = setup_cli_test(num_nodes).await;
let validator = swarm.validators().next().unwrap();
// And the client
let rosetta_port = get_available_port();
let rosetta_socket_addr = format!("127.0.0.1:{}", rosetta_port);
let rosetta_url = format!("http://{}", rosetta_socket_addr.clone())
.parse()
.unwrap();
let rosetta_client = RosettaClient::new(rosetta_url);
let api_config = ApiConfig {
enabled: true,
address: rosetta_socket_addr.parse().unwrap(),
tls_cert_path: None,
tls_key_path: None,
content_length_limit: None,
};
// Start the server
let _rosetta = aptos_rosetta::bootstrap_async(
swarm.chain_id(),
api_config,
Some(aptos_rest_client::Client::new(
validator.rest_api_endpoint(),
)),
)
.await
.unwrap();
// Create accounts
for i in 0..num_accounts {
cli.create_account_with_faucet(i).await.unwrap();
}
(swarm, cli, faucet, rosetta_client)
}
#[tokio::test]
async fn test_network() {
let (swarm, _, _, rosetta_client) = setup_test(1, 1).await;
let chain_id = swarm.chain_id();
// We only support one network, this network
let networks = rosetta_client.network_list().await.unwrap();
assert_eq!(1, networks.network_identifiers.len());
let network_id = networks.network_identifiers.first().unwrap();
assert_eq!(BLOCKCHAIN, network_id.blockchain);
assert_eq!(chain_id.to_string(), network_id.network);
let request = NetworkRequest {
network_identifier: NetworkIdentifier::from(chain_id),
};
let options = rosetta_client.network_options(&request).await.unwrap();
assert_eq!(ROSETTA_VERSION, options.version.rosetta_version);
// TODO: Check other options
let request = NetworkRequest {
network_identifier: NetworkIdentifier::from(chain_id),
};
let status = rosetta_client.network_status(&request).await.unwrap();
assert!(status.current_block_identifier.index > 0);
assert!(status.current_block_timestamp > Y2K_SECS);
assert_eq!(
BlockIdentifier {
index: 0,
hash: HashValue::zero().to_hex()
},
status.genesis_block_identifier
);
assert_eq!(
Some(status.genesis_block_identifier),
status.oldest_block_identifier,
);
}
#[tokio::test]
async fn test_account_balance() {
let (swarm, cli, _faucet, rosetta_client) = setup_test(1, 1).await;
cli.create_account_with_faucet(0).await.unwrap();
let account = CliTestFramework::account_id(0);
let chain_id = swarm.chain_id();
// At time 0, there should be 0 balance
let response = get_balance(&rosetta_client, chain_id, account, 0)
.await
.unwrap();
assert_eq!(
response.block_identifier,
BlockIdentifier {
index: 0,
hash: HashValue::zero().to_hex(),
}
);
// At some time before version 100, the account should exist
let mut successful_version = None;
for i in 1..100 {
let response = get_balance(&rosetta_client, chain_id, account, i)
.await
.unwrap();
let amount = response.balances.first().unwrap();
if amount.value == DEFAULT_FUNDED_COINS.to_string() {
successful_version = Some(i);
break;
}
}
if successful_version.is_none() {
panic!("Failed to find account balance increase")
}
// TODO: Send money
// TODO: Fail request due to bad transaction
// TODO: Receive money
// TODO: Recieve money by faucet
}
async fn get_balance(
rosetta_client: &RosettaClient,
chain_id: ChainId,
account: AccountAddress,
index: u64,
) -> anyhow::Result<AccountBalanceResponse> {
let request = AccountBalanceRequest {
network_identifier: chain_id.into(),
account_identifier: account.into(),
block_identifier: Some(PartialBlockIdentifier {
index: Some(index),
hash: None,
}),
currencies: Some(vec![native_coin()]),
};
try_until_ok(|| rosetta_client.account_balance(&request)).await
}
#[tokio::test]
async fn test_block() {
let (swarm, _cli, _faucet, rosetta_client) = setup_test(1, 0).await;
let chain_id = swarm.chain_id();
// Genesis by version
let request_genesis = BlockRequest::by_index(chain_id, 0);
let genesis_block = try_until_ok(|| rosetta_client.block(&request_genesis))
.await
.unwrap().block.unwrap();
assert_genesis_block(&genesis_block);
// Get genesis txn by hash
let request_genesis_by_hash =
BlockRequest::by_hash(chain_id, genesis_block.block_identifier.hash.clone());
let genesis_block_by_hash = rosetta_client
.block(&request_genesis_by_hash)
.await
.unwrap().block.unwrap();
// Both blocks should be the same
assert_eq!(genesis_block, genesis_block_by_hash, "Genesis by hash or by index should be the same");
// Responses should be idempotent
let idempotent_block = rosetta_client.block(&request_genesis).await.unwrap().block.unwrap();
assert_eq!(idempotent_block, genesis_block_by_hash, "Blocks should be idempotent");
// Block 1 is always a reconfig with exactly 1 txn
let block_1 = get_block(&rosetta_client, chain_id, 1).await;
assert_eq!(1, block_1.transactions.len());
// Block metadata won't have operations
assert!(block_1.transactions.first().unwrap().operations.is_empty());
assert!(block_1.timestamp > genesis_block.timestamp);
// Block 2 is always a standard block with 2 or more txns
let block_2 = get_block(&rosetta_client, chain_id, 2).await;
assert!(block_2.transactions.len() >= 2);
// Block metadata won't have operations
assert!(block_2.transactions.first().unwrap().operations.is_empty());
// StateCheckpoint won't have operations
assert!(block_2.transactions.last().unwrap().operations.is_empty());
assert!(block_2.timestamp >= block_1.timestamp);
// No input should give the latest version, not the genesis txn
let request_latest = BlockRequest::latest(chain_id);
let response = rosetta_client.block(&request_latest).await.unwrap();
let latest_block = response.block.unwrap();
// The latest block should always come after genesis
assert!(latest_block.block_identifier.index >= block_2.block_identifier.index);
assert!(latest_block.timestamp >= block_2.timestamp);
// The parent should always be exactly one version before
assert_eq!(
latest_block.parent_block_identifier.index,
latest_block.block_identifier.index - 1
);
// There should be at least 1 txn
assert!(latest_block.transactions.len() > 1);
// We should be able to query it again by hash or by version and it is the same
let request_latest_by_version =
BlockRequest::by_index(chain_id, latest_block.block_identifier.index);
let latest_block_by_version = rosetta_client
.block(&request_latest_by_version)
.await
.unwrap()
.block
.unwrap();
let request_latest_by_hash =
BlockRequest::by_hash(chain_id, latest_block.block_identifier.hash.clone());
let latest_block_by_hash = rosetta_client
.block(&request_latest_by_hash)
.await
.unwrap()
.block
.unwrap();
assert_eq!(latest_block, latest_block_by_version);
assert_eq!(latest_block_by_hash, latest_block_by_version);
// Wait until we get a new block processed
let network_request = NetworkRequest {
network_identifier: NetworkIdentifier::from(chain_id),
};
while rosetta_client
.network_status(&network_request)
.await
.unwrap()
.current_block_identifier
.index
== latest_block.block_identifier.index
{
tokio::time::sleep(Duration::from_micros(10)).await
}
// And querying latest again should get yet another transaction in the future
let newer_block = rosetta_client
.block(&request_latest)
.await
.unwrap()
.block
.unwrap();
assert!(newer_block.block_identifier.index >= latest_block.block_identifier.index);
assert!(newer_block.timestamp >= latest_block.timestamp);
}
fn assert_genesis_block(block: &Block) {
assert_eq!(
genesis_block.block_identifier, genesis_block.parent_block_identifier,
"The genesis block is also it's own parent"
);
assert_eq!(
HashValue::zero().to_hex(),
block.block_identifier.hash,
"The genesis block hash is always 0s"
);
assert_eq!(
0, block.block_identifier.index,
"The genesis block index is always 0"
);
assert_eq!(
Y2K_SECS, block.timestamp,
"The genesis timestamp should be Y2K seconds"
);
assert_eq!(
1,
block.transactions.len(),
"The genesis block should be exactly 1 transaction"
);
let genesis_txn = block.transactions.first().unwrap();
assert_eq!(
0,
genesis_txn.metadata.unwrap().version.0,
"Genesis version should be 0"
);
assert_ne!(
HashValue::zero().to_hex(),
genesis_txn.transaction_identifier.hash,
"Genesis should have a txn hash"
);
assert!(
!genesis_txn.operations.is_empty(),
"There should be at least one operation in genesis"
);
}
async fn get_block(rosetta_client: &RosettaClient, chain_id: ChainId, index: u64) -> Block {
let rosetta_client = (*rosetta_client).clone();
let request = BlockRequest::by_index(chain_id, index);
try_until_ok(|| rosetta_client.block(&request))
.await
.unwrap()
.block
.unwrap()
}
/// Try for 2 seconds to get a response. This handles the fact that it's starting async
async fn try_until_ok<F, Fut, T>(function: F) -> anyhow::Result<T>
where
F: Fn() -> Fut,
Fut: Future<Output = anyhow::Result<T>>,
{
let mut result = Err(anyhow::Error::msg("Failed to get response"));
for _ in 1..10 {
result = function().await;
if result.is_ok() {
break;
}
tokio::time::sleep(Duration::from_millis(200)).await;
}
result
}
|
use std::fmt;
use parking_lot::RwLock;
use std::cell::RefCell;
use std::sync::Arc;
use std::sync::Weak;
use std::fs::File;
use std::io::prelude::*;
use std::io::Result;
//
use crate::core::editor::user_is_active;
use crate::core::mapped_file::MappedFile;
use crate::core::mapped_file::MappedFileEvent;
use crate::core::mapped_file::UpdateHierarchyOp;
use crate::core::mapped_file::NodeIndex;
use super::buffer::Buffer;
use super::buffer::OpenMode;
use super::bufferlog::BufferLog;
use super::bufferlog::BufferOperation;
use super::bufferlog::BufferOperationType;
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Id(pub usize);
///
#[derive(Debug)]
pub struct DocumentBuilder {
internal: bool,
use_buffer_log: bool,
document_name: String,
file_name: String,
mode: OpenMode,
}
#[derive(Debug)]
struct DocumentMappedFileEventHandler<'a> {
_doc: Weak<RwLock<Document<'a>>>,
}
fn mapped_file_event_to_document_event(evt: &MappedFileEvent) -> DocumentEvent {
match evt {
MappedFileEvent::NodeChanged { node_index } => DocumentEvent::NodeChanged {
node_index: *node_index,
},
MappedFileEvent::NodeAdded { node_index } => DocumentEvent::NodeAdded {
node_index: *node_index,
},
MappedFileEvent::NodeRemoved { node_index } => DocumentEvent::NodeRemoved {
node_index: *node_index,
},
}
}
///
impl DocumentBuilder {
///
pub fn new() -> Self {
Self {
internal: false,
use_buffer_log: false,
document_name: String::new(),
file_name: String::new(),
mode: OpenMode::ReadOnly,
}
}
///
pub fn internal(&mut self, flag: bool) -> &mut Self {
self.internal = flag;
self
}
///
pub fn use_buffer_log(&mut self, flag: bool) -> &mut Self {
self.use_buffer_log = flag;
self
}
///
pub fn document_name(&mut self, name: &str) -> &mut Self {
self.document_name = name.to_string();
self
}
///
pub fn file_name(&mut self, name: &str) -> &mut Self {
self.file_name = name.to_string();
self
}
///
pub fn mode(&mut self, mode: OpenMode) -> &mut Self {
self.mode = mode;
self
}
///
pub fn finalize<'a>(&self) -> Option<Arc<RwLock<Document<'static>>>> {
Document::new(
&self.document_name,
&self.file_name,
self.mode.clone(),
self.use_buffer_log,
)
}
}
#[derive(Debug)]
pub struct DocumentReadCache {
start: u64,
end: u64,
data: Vec<u8>,
revision: usize,
}
impl DocumentReadCache {
pub fn new() -> Self {
DocumentReadCache {
start: 0,
end: 0,
data: vec![],
revision: 0,
}
}
pub fn contains(&self, min: u64, max: u64) -> bool {
if min < self.start || min > self.end {
return false;
}
if max < self.start || max > self.end {
return false;
}
return true;
}
pub fn read(
&self,
offset: u64,
nr_bytes: usize,
data: &mut Vec<u8>,
doc_revision: usize,
) -> Option<usize> {
if !crate::core::use_read_cache() {
return None;
}
// no cache sync yet
if self.revision != doc_revision {
return None;
}
if self.start == self.end {
return None;
}
if offset < self.start {
return None;
}
if offset + nr_bytes as u64 > self.end {
return None;
}
let idx = (offset - self.start) as usize;
for i in 0..nr_bytes {
data.push(self.data[i + idx]);
}
Some(nr_bytes)
}
}
pub trait DocumentEventCb {
fn cb(&mut self, doc: &Document, event: &DocumentEvent);
}
#[derive(Debug, Clone)]
pub enum DocumentEvent {
DocumentAdded,
DocumentOpened,
DocumentClosed,
DocumentRemoved,
DocumentFullyIndexed,
NodeAdded { node_index: usize },
NodeChanged { node_index: usize },
NodeRemoved { node_index: usize },
NodeIndexed { node_index: usize },
}
fn document_event_to_string(evt: &DocumentEvent) -> String {
match evt {
DocumentEvent::DocumentAdded => "Added".to_owned(),
DocumentEvent::DocumentOpened => "Opened".to_owned(),
DocumentEvent::DocumentClosed => "Closed".to_owned(),
DocumentEvent::DocumentRemoved => "Removed".to_owned(),
DocumentEvent::DocumentFullyIndexed => "FullyIndexed".to_owned(),
DocumentEvent::NodeAdded { node_index } => {
format!("NodeAdded idx: {}", node_index)
}
DocumentEvent::NodeChanged { node_index } => {
format!("NodeChanged idx: {}", node_index)
}
DocumentEvent::NodeRemoved { node_index, .. } => {
format!("NodeRemoved idx: {}", node_index)
}
DocumentEvent::NodeIndexed { node_index, .. } => {
format!("NodeIndexed idx: {}", node_index)
}
}
}
pub struct Document<'a> {
pub id: Id,
pub name: String,
pub buffer: Buffer<'a>, // TODO(ceg): provide iterator apis ?
cache: DocumentReadCache,
pub buffer_log: BufferLog,
pub use_buffer_log: bool,
pub changed: bool,
pub is_syncing: bool,
pub abort_indexing: bool,
pub indexed: bool,
pub last_tag_time: std::time::Instant,
pub subscribers: Vec<RefCell<Box<dyn DocumentEventCb>>>,
}
impl<'a> fmt::Debug for Document<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Document {}")
.field("id", &self.id)
.field("name", &self.name)
.finish()
}
}
// NB: doc MUST be wrapped in Arc<RwLock<XXX>>
unsafe impl<'a> Send for Document<'a> {}
unsafe impl<'a> Sync for Document<'a> {}
impl<'a> Document<'a> {
pub fn new(
document_name: &String,
file_name: &String,
mode: OpenMode,
use_buffer_log: bool,
) -> Option<Arc<RwLock<Document<'static>>>> {
dbg_println!("try open {} {} {:?}", document_name, file_name, mode);
let buffer = if file_name.is_empty() {
Buffer::empty(mode.clone())
} else {
Buffer::new(&file_name, mode.clone())
};
let mut changed = false;
// fallback
let buffer = if buffer.is_none() {
changed = true;
Buffer::empty_with_name(&document_name, mode.clone())
} else {
buffer
};
let doc = Document {
id: Id(0),
name: document_name.clone(),
buffer: buffer.unwrap(),
cache: DocumentReadCache::new(), // TODO(ceg): have a per view cache or move to View
buffer_log: BufferLog::new(),
use_buffer_log,
abort_indexing: false,
indexed: false,
changed,
is_syncing: false,
last_tag_time: std::time::Instant::now(),
subscribers: vec![],
};
Some(Arc::new(RwLock::new(doc)))
}
pub fn set_cache(&mut self, start: u64, end: u64) {
if start > end {
panic!("start {} > end {}", start, end);
}
self.cache.start = start;
self.cache.end = end;
if start == end {
return;
}
self.cache.data.clear();
let size = (end - start) as usize;
let sz = self.buffer.read(start, size, &mut self.cache.data);
self.cache.end = start + sz as u64;
self.cache.data.shrink_to_fit(); // ?
}
pub fn build_cache(&self, start: u64, end: u64) -> DocumentReadCache {
let mut cache = DocumentReadCache::new(); // TODO ::with_capacity()
assert!(start <= end);
cache.start = start;
cache.end = end;
if start == end {
return cache;
}
let size = (end - start) as usize;
let sz = self.buffer.read(start, size, &mut cache.data);
cache.end = start + sz as u64;
cache.data.shrink_to_fit(); // ?
cache
}
pub fn get_cache_range(&self) -> (u64, u64) {
(self.cache.start, self.cache.end)
}
pub fn file_name(&self) -> String {
self.buffer.file_name.clone()
}
pub fn metadata(&self) -> Result<std::fs::Metadata> {
self.buffer.metadata()
}
/// copy the content of the buffer up to 'nr_bytes' into the data Vec
/// the read bytes are appended to the data Vec
/// return XXX on error (TODO(ceg): use ioresult)
pub fn size(&self) -> usize {
self.buffer.size
}
pub fn nr_changes(&self) -> usize {
self.buffer.nr_changes() as usize
}
pub fn is_cached(&self, start: u64, end: u64) -> bool {
self.cache.contains(start, end)
}
pub fn readahead(&mut self, start: u64, end: u64) {
self.cache = self.build_cache(start, end)
}
pub fn notify(&self, evt: &DocumentEvent) {
dbg_println!(
"notify {:?}, nb subscribers {}",
document_event_to_string(&evt),
self.subscribers.len()
);
for s in self.subscribers.iter() {
s.borrow_mut().cb(self, evt);
}
}
pub fn build_node_byte_count(&self, node_index: usize) {
// let node_info = doc.get_node_info(node_index);
let mut file = self.buffer.data.write();
build_node_byte_count(&mut file, Some(node_index));
}
pub fn remove_node_byte_count(&self, node_index: usize) {
// let node_info = doc.get_node_info(node_index);
let mut file = self.buffer.data.write();
remove_node_byte_count(&mut file, Some(node_index));
}
pub fn update_node_byte_count(&self, node_index: usize) {
// let node_info = doc.get_node_info(node_index);
let mut file = self.buffer.data.write();
update_node_byte_count(&mut file, Some(node_index));
}
pub fn show_root_node_bytes_stats(&self) {
// let node_info = doc.get_node_info(node_index);
let file = self.buffer.data.read();
if let Some(idx) = file.root_index() {
let node = &file.pool[idx];
if !node.indexed {
return;
}
for (i, count) in node.byte_count.iter().enumerate() {
if i == 10 {
dbg_println!("ROOT NODE byte_count[{}] = {}", i, count);
}
}
}
}
// TODO(ceg): return cb slot / unregister slot_mask
pub fn register_subscriber(&mut self, cb: Box<dyn DocumentEventCb>) -> usize {
let len = 1 + self.subscribers.len();
self.subscribers.push(RefCell::new(cb));
len
}
// read ahead
/// copy the content of the buffer up to 'nr_bytes' into the data Vec
/// the read bytes are appended to the data Vec
/// return XXX on error (TODO(ceg): use ioresult)
pub fn read(&self, offset: u64, nr_bytes: usize, data: &mut Vec<u8>) -> usize {
// return self.buffer.read(offset, nr_bytes, data);
let doc_rev = self.nr_changes();
if let Some(size) = self.cache.read(offset, nr_bytes, data, doc_rev) {
//dbg_println!("DATA IN CACHE offset {} size {}", offset, nr_bytes);
// cache validation checks
if false {
let mut real = vec![];
self.buffer.read(offset, nr_bytes, &mut real);
assert!(real.len() == data.len());
for i in 0..real.len() {
assert!(real[i] == data[i]);
}
}
return size;
}
// dbg_println!("DATA NOT IN CACHE offset {} size {}", offset, nr_bytes);
// TODO(ceg): --panic-on-read-cache-miss
// panic!("");
self.buffer.read(offset, nr_bytes, data)
}
/// copy the content of the buffer up to 'nr_bytes' into the data Vec
/// the read bytes are appended to the data Vec
/// return XXX on error (TODO(ceg): use ioresult)
pub fn read_cached(
&self,
offset: u64,
nr_bytes: usize,
data: &mut Vec<u8>,
cache: &DocumentReadCache,
) -> usize {
let doc_rev = self.nr_changes();
if let Some(size) = cache.read(offset, nr_bytes, data, doc_rev) {
//dbg_println!("DATA IN CACHE offset {} size {}", offset, nr_bytes);
// cache validation checks
if false {
let mut real = vec![];
self.buffer.read(offset, nr_bytes, &mut real);
assert!(real.len() == data.len());
for i in 0..real.len() {
assert!(real[i] == data[i]);
}
}
return size;
}
dbg_println!("DATA NOT IN CACHE offset {} size {}", offset, nr_bytes);
self.buffer.read(offset, nr_bytes, data) // reread cache
}
pub fn buffer_log_pos(&self) -> usize {
self.buffer_log.pos
}
pub fn buffer_log_count(&self) -> usize {
self.buffer_log.data.len()
}
pub fn buffer_log_reset(&mut self) {
self.buffer_log.data.clear();
self.buffer_log.pos = 0;
dbg_println!("bufferlog: cleared");
}
pub fn tag(
&mut self,
time: std::time::Instant,
offset: u64,
marks_offsets: Vec<u64>,
selections_offsets: Vec<u64>,
) -> bool {
if !self.use_buffer_log {
// return log disabled ?
return false;
}
if self.last_tag_time == time {
// ignore contiguous event ? config
// return;
}
//dbg_println!("// doc.tag(..) offsets = {:?}", marks_offset);
self.buffer_log.add(
offset,
BufferOperationType::Tag {
time,
marks_offsets,
selections_offsets,
},
None,
);
self.last_tag_time = time;
true
}
pub fn get_tag_offsets(&mut self) -> Option<(Vec<u64>, Vec<u64>)> {
let dlen = self.buffer_log.data.len();
if dlen == 0 {
return None;
}
let pos = if self.buffer_log.pos == dlen {
self.buffer_log.pos - 1
} else {
self.buffer_log.pos
};
// get inverted operation
let op = &self.buffer_log.data[pos];
match op.op_type {
BufferOperationType::Tag {
ref marks_offsets,
ref selections_offsets,
..
} => {
Some((marks_offsets.clone(), selections_offsets.clone())) // TODO(ceg): Arc<Vec<u64>>
}
_ => None,
}
}
pub fn update_hierarchy_from_events(&self, events: &Vec<MappedFileEvent>) {
for ev in events {
match ev {
MappedFileEvent::NodeChanged { node_index } => {
self.remove_node_byte_count(*node_index);
self.build_node_byte_count(*node_index);
let mut file = self.buffer.data.write();
// remove prev counts
update_byte_index_hierarchy(
&mut file,
Some(*node_index),
UpdateHierarchyOp::Sub,
);
// rebuild current counters
// add new count
update_byte_index_hierarchy(
&mut file,
Some(*node_index),
UpdateHierarchyOp::Add,
);
}
MappedFileEvent::NodeAdded { node_index } => {
self.build_node_byte_count(*node_index);
}
MappedFileEvent::NodeRemoved { node_index } => {
self.remove_node_byte_count(*node_index);
}
}
}
}
/// insert the 'data' Vec content in the buffer up to 'nr_bytes'
/// return the number of written bytes (TODO(ceg): use io::Result)
pub fn insert(&mut self, offset: u64, nr_bytes: usize, data: &[u8]) -> usize {
// TODO(ceg): update cache if possible
self.set_cache(0, 0); // invalidate cache,
// log insert op
let mut ins_data = Vec::with_capacity(nr_bytes);
ins_data.extend(&data[..nr_bytes]);
if self.use_buffer_log {
self.buffer_log.add(
offset,
BufferOperationType::Insert,
Some(Arc::new(ins_data)),
);
}
let (sz, events) = self.buffer.insert(offset, nr_bytes, &data[..nr_bytes]);
if sz > 0 {
self.changed = true;
}
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
sz
}
pub fn append(&mut self, data: &[u8]) -> usize {
let sz = self.size() as u64;
self.insert(sz, data.len(), &data)
}
/// remove up to 'nr_bytes' from the buffer starting at offset
/// if removed_data is provided will call self.read(offset, nr_bytes, data)
/// before remove the bytes
/*
TODO(ceg): we want
- remove the data
- collect each leaf node impacted
- update byte index from these nodes
- call event subscriber
- cleanup impacted nodes
*/
pub fn remove(
&mut self,
offset: u64,
nr_bytes: usize,
removed_data: Option<&mut Vec<u8>>,
) -> usize {
// TODO(ceg): update cache if possible
self.set_cache(0, 0); // invalidate cache,
let mut rm_data = Vec::with_capacity(nr_bytes);
let (nr_bytes_removed, events) = self.buffer.remove(offset, nr_bytes, Some(&mut rm_data));
if let Some(v) = removed_data {
v.extend(rm_data.clone());
}
if self.use_buffer_log {
self.buffer_log
.add(offset, BufferOperationType::Remove, Some(Arc::new(rm_data)));
}
if nr_bytes_removed > 0 {
self.changed = true;
}
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
nr_bytes_removed
}
pub fn delete_content(&mut self, removed_data: Option<&mut Vec<u8>>) -> usize {
let sz = self.size();
self.remove(0, sz, removed_data);
sz
}
pub fn find(&self, data: &[u8], from_offset: u64, to_offset: Option<u64>) -> Option<u64> {
self.buffer.find(&data, from_offset, to_offset)
}
pub fn find_reverse(
&self,
data: &[u8],
from_offset: u64,
to_offset: Option<u64>,
) -> Option<u64> {
self.buffer.find_reverse(&data, from_offset, to_offset)
}
// TODO(ceg): return an array of offsets ?
pub fn apply_operations(&mut self, ops: &[BufferOperation]) {
for op in ops {
self.apply_log_operation(op);
}
}
fn apply_log_operation(&mut self, op: &BufferOperation) -> Option<u64> {
// apply op
dbg_println!("apply_log_operation");
op.dump();
let mark_offset = match op.op_type {
BufferOperationType::Insert => {
let sz = self.buffer.size();
// TODO(ceg): check i/o errors
let added = if let Some(data) = &op.data {
let (_, events) = self.buffer.insert(op.offset, data.len(), &data);
self.changed = true;
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
data.len() as u64
} else {
0
};
assert_eq!(sz + added as usize, self.buffer.size());
op.offset + added
}
BufferOperationType::Remove => {
let sz = self.buffer.size();
// TODO(ceg): check i/o errors
let _removed = if let Some(data) = &op.data {
let (rm, events) = self.buffer.remove(op.offset, data.len(), None);
self.changed = true;
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
assert_eq!(rm, data.len());
rm
} else {
0
};
assert_eq!(sz - _removed, self.buffer.size());
op.offset
}
BufferOperationType::Tag {
marks_offsets: _, ..
} => {
/* nothing */
op.offset
}
};
Some(mark_offset)
}
pub fn undo(&mut self) -> Option<u64> {
// read current log position
let pos = self.buffer_log.pos;
if pos == 0 {
return None;
}
// apply inverted previous operation
let op = self.buffer_log.data[pos - 1].invert();
self.buffer_log.pos -= 1;
self.apply_log_operation(&op)
}
pub fn redo(&mut self) -> Option<u64> {
// read current log position
let pos = self.buffer_log.pos;
if pos == self.buffer_log.data.len() {
return None;
}
// apply next operation
let op = self.buffer_log.data[pos].clone();
self.buffer_log.pos += 1;
self.apply_log_operation(&op)
}
pub fn is_buffer_log_op_tag(&self, index: usize) -> bool {
if index >= self.buffer_log.data.len() {
return false;
}
let op = &self.buffer_log.data[index];
match op.op_type {
BufferOperationType::Tag { .. } => true,
_ => false,
}
}
pub fn undo_until_tag(&mut self) -> Vec<BufferOperation> {
// read current log position
let mut ops = Vec::new();
loop {
if self.buffer_log.pos == 0 {
break;
}
self.buffer_log.pos -= 1;
let pos = self.buffer_log.pos;
// get inverted operation
let op = &self.buffer_log.data[pos];
match op.op_type {
BufferOperationType::Tag { .. } => {
// stop if no last tag
if pos != self.buffer_log.data.len() - 1 {
break;
}
}
_ => {}
}
// inverse replay
let inverted_op = op.invert();
self.apply_log_operation(&inverted_op);
ops.push(inverted_op);
}
ops
}
pub fn redo_until_tag(&mut self) -> Vec<BufferOperation> {
let mut ops = Vec::new();
if self.buffer_log.data.is_empty() {
return ops;
}
if self.buffer_log.pos >= self.buffer_log.data.len() - 1 {
return ops;
}
// skip current tag
if self.is_buffer_log_op_tag(self.buffer_log.pos) {
self.buffer_log.pos += 1;
}
// replay until tag
while !self.is_buffer_log_op_tag(self.buffer_log.pos) {
if self.buffer_log.pos >= self.buffer_log.data.len() - 1 {
break;
}
let op = self.buffer_log.data[self.buffer_log.pos].clone();
// actual replay
self.apply_log_operation(&op);
ops.push(op);
self.buffer_log.pos += 1;
}
ops
}
}
// helper
use std::path::Path;
// TODO(ceg): handle errors
pub fn sync_to_storage(doc: &Arc<RwLock<Document>>) {
// read/copy
let mut fd = {
let doc = doc.read();
if doc.file_name().is_empty() {
// TODO(ceg): save as pop up/notification
dbg_println!("cannot dsave target filename is empty");
return;
}
let tmp_file_name = format!("{}{}", doc.file_name(), ".update"); // TODO(ceg): move to global config
let path = Path::new(&tmp_file_name);
if let Result::Err(_) = std::fs::remove_file(path) {}
let fd = File::create(path);
if fd.is_err() {
dbg_println!("cannot save {}", doc.file_name());
return;
}
fd.unwrap()
};
dbg_println!("SYNC: fd = {:?}", fd);
let mut idx = {
let doc = doc.read();
let file = doc.buffer.data.read();
let (node_index, _, _) = file.find_node_by_offset(0);
node_index
};
while idx != None {
// do not hold the doc.lock more
{
let doc = doc.read();
let file = doc.buffer.data.read();
let node = &file.pool[idx.unwrap()];
let mut data = Vec::with_capacity(node.size as usize);
unsafe {
data.set_len(data.capacity());
};
if file.fd.is_none() {
// TODO(ceg): "save as" pop up
break;
}
let orig_fd = { Some(file.fd.as_ref().unwrap().clone()) };
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
let nw = fd.write(&data).unwrap();
if nw != data.len() {
dbg_println!("cannot save {}", doc.file_name());
panic!("");
// return false;
}
// dbg_println!("sync doc('{}') node {}", doc.file_name(), idx.unwrap());
} else {
panic!("direct copy failed");
}
idx = node.link.next;
}
// NB: experimental throttling based on user input freq/rendering
// TODO <-- user configuration
if user_is_active() {
let wait = std::time::Duration::from_millis(16);
std::thread::sleep(wait);
}
}
// update
{
use std::os::unix::fs::PermissionsExt;
let mut doc = doc.write();
// TODO(ceg): use mapped file fd, will panic if file is removed
let perms = match doc.metadata() {
Ok(metadata) => metadata.permissions(),
Err(_) => std::fs::Permissions::from_mode(0o600),
};
let tmp_file_name = format!("{}{}", doc.file_name(), ".update"); // TODO(ceg): move '.update' to global config
{
// TODO(ceg): large file warning in save ? disable backup ?
let _tmp_backup_name = format!("{}{}", doc.file_name(), "~");
// TODO(ceg): move '~' to global config
// let _ = ::std::fs::rename(&doc.file_name(), &tmp_backup_name);
}
let _ = ::std::fs::rename(&tmp_file_name, &doc.file_name());
// reopen file
let new_fd = File::open(&doc.file_name()).unwrap();
// TODO(ceg): handle skip with ReadOnly
let mapped_file = doc.buffer.data.clone();
let mut mapped_file = mapped_file.write();
crate::core::mapped_file::MappedFile::patch_storage_offset_and_file_descriptor(
&mut mapped_file,
new_fd,
);
// TODO(ceg): check result, handle io results properly
// set buffer status to : permission denied etc
let _ = ::std::fs::set_permissions(&doc.file_name(), perms);
doc.changed = false;
doc.is_syncing = false;
}
}
fn update_byte_index_hierarchy(
file: &mut MappedFile,
idx: Option<NodeIndex>,
op: UpdateHierarchyOp,
) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
// get counters
let node = &mut file.pool[idx];
let byte_count = node.byte_count.clone();
let mut p = node.link.parent;
while p.is_some() {
let p_idx = p.unwrap();
let p_node = &mut file.pool[p_idx];
for (i, count) in byte_count.iter().enumerate() {
match op {
UpdateHierarchyOp::Add => p_node.byte_count[i] += count,
UpdateHierarchyOp::Sub => {
p_node.byte_count[i] = p_node.byte_count[i].saturating_sub(*count)
} // TODO(ceg): -=
}
}
p_node.indexed = true;
p = p_node.link.parent;
}
}
pub fn get_node_data(file: &mut MappedFile, idx: Option<NodeIndex>) -> Vec<u8> {
if idx.is_none() {
return vec![];
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
let mut data = Vec::with_capacity(node.size as usize);
unsafe {
data.set_len(node.size as usize);
};
let orig_fd = if file.fd.is_none() {
None
} else {
Some(file.fd.as_ref().unwrap().clone())
};
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
//
} else {
// TODO(ceg): return error
panic!("direct copy failed");
}
data
}
// call this on new node
pub fn build_node_byte_count(mut file: &mut MappedFile, idx: Option<NodeIndex>) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
let mut data = Vec::with_capacity(node.size as usize);
unsafe {
data.set_len(node.size as usize);
};
let orig_fd = if file.fd.is_none() {
None
} else {
Some(file.fd.as_ref().unwrap().clone())
};
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
//
} else {
// TODO(ceg): return error
panic!("direct copy failed");
}
assert!(!node.indexed);
// node.byte_count = [0;256];
// count node bytes (no lock)
for b in data.iter() {
let byte_idx = *b as usize;
if *b as char == '\n' {
node.byte_count[byte_idx] += 1;
}
}
node.indexed = true;
update_byte_index_hierarchy(&mut file, Some(idx), UpdateHierarchyOp::Add);
}
// call this on new node
pub fn remove_node_byte_count(mut file: &mut MappedFile, idx: Option<NodeIndex>) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
if !node.indexed {
return;
}
update_byte_index_hierarchy(&mut file, Some(idx), UpdateHierarchyOp::Sub);
let node = &mut file.pool[idx];
node.byte_count = [0; 256];
node.indexed = false;
}
// call this on new node
pub fn update_node_byte_count(mut file: &mut MappedFile, idx: Option<NodeIndex>) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
if !node.indexed {
return;
}
node.indexed = false;
update_byte_index_hierarchy(&mut file, Some(idx), UpdateHierarchyOp::Sub);
}
// TODO(ceg): split code to provide index_single_node(nid)
pub fn build_index(doc: &Arc<RwLock<Document>>) {
let mut idx = {
let doc = doc.read();
{
if doc.indexed {
return;
}
let file = doc.buffer.data.read();
let (node_index, _, _) = file.find_node_by_offset(0);
node_index
}
};
let t0 = std::time::Instant::now();
let mut data = vec![];
while idx != None {
// read node bytes
{
let doc = doc.read();
if doc.abort_indexing {
break;
}
let file = doc.buffer.data.read();
let node = &file.pool[idx.unwrap()];
if node.indexed {
idx = node.link.next;
continue;
}
data.reserve(node.size as usize);
unsafe {
data.set_len(node.size as usize);
};
let orig_fd = if file.fd.is_none() {
None
} else {
Some(file.fd.as_ref().unwrap().clone())
};
let t0_read = std::time::Instant::now();
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
dbg_println!(
"build index doc('{}') node {} size {}",
doc.file_name(),
idx.unwrap(),
data.len(),
);
} else {
// TODO(ceg): return error
panic!("direct copy failed");
}
let t1_read = std::time::Instant::now();
dbg_println!("read node time {:?} ms", (t1_read - t0_read).as_millis());
}
// count node bytes (no lock)
let mut byte_count: [u64; 256] = [0; 256];
for b in data.iter() {
let byte_idx = *b as usize;
byte_count[byte_idx] += 1;
}
// yield some cpu time
if user_is_active() {
let wait = std::time::Duration::from_millis(16);
std::thread::sleep(wait);
}
// update node info (idx)
{
let doc = doc.read();
let mut file = doc.buffer.data.write();
let node_index = idx.unwrap();
// save byte counters
{
let mut node = &mut file.pool[node_index];
node.byte_count = byte_count;
node.indexed = true;
idx = node.link.next;
}
update_byte_index_hierarchy(&mut file, Some(node_index), UpdateHierarchyOp::Add);
}
// notify subscribers
if idx.is_some() {
let doc = doc.read();
doc.notify(&DocumentEvent::NodeIndexed {
node_index: idx.unwrap(),
});
}
}
let t1 = std::time::Instant::now();
dbg_println!("index time {:?} ms", (t1 - t0).as_millis());
{
// set index status flags
{
let mut doc = doc.write();
if !doc.abort_indexing {
doc.indexed = true;
}
// display root node info
let file = doc.buffer.data.read();
if let Some(root_index) = file.root_index() {
let node = &file.pool[root_index];
dbg_println!(
"{} : Number of lines {}",
doc.file_name(),
node.byte_count[b'\n' as usize]
);
}
}
let doc = doc.read();
if doc.indexed {
doc.notify(&DocumentEvent::DocumentFullyIndexed {});
}
}
}
//
// walk through the binary tree and while looking for the node containing "offset"
// and track byte_index count
// SZ(19) , LF(9)
// _________[ SZ(7+12), LF(3+6) ]____________________
// / \
// __[ 7=SZ(3+4), LF 3=(1+2) ]__ _____[ 12=(5+7), LF 6=(2+4) ]__
// / \ / \
// [SZ(3), LF(1)]={a,LF,b} [SZ(4), LF(2)]={a,LF,LF,b } [5, LF(2)] data{a,LF,b,LF,c} [SZ(7), LF(4)]={a ,LF,LF,b ,Lf,LF,c}
// 0,1 ,2 3, 4, 5,6 7, 8,9,10,11 12,13,14,15,16,17,18
//
//
// return (line_count, offset's node_index)
pub fn get_document_byte_count_at_offset(
doc: &Document,
byte_index: usize,
offset: u64,
) -> (u64, Option<usize>) {
assert!(byte_index < 256);
let mut total_count = 0;
let mut local_offset = offset;
let mut file = doc.buffer.data.write();
let mut cur_index = file.root_index();
while cur_index != None {
let idx = cur_index.unwrap();
let p_node = &file.pool[idx];
let is_leaf = p_node.link.left.is_none() && p_node.link.right.is_none();
if is_leaf {
let data = get_node_data(&mut file, Some(idx));
// count by until local_offset is reached
for b in data.iter().take(local_offset as usize) {
if *b as usize == byte_index {
total_count += 1;
}
}
return (total_count, cur_index);
}
if let Some(left_index) = p_node.link.left {
let left_node = &file.pool[left_index];
if local_offset < left_node.size {
cur_index = Some(left_index);
continue;
}
total_count += left_node.byte_count[byte_index];
local_offset -= left_node.size
}
cur_index = p_node.link.right;
}
(0, None)
}
pub fn get_document_byte_count(doc: &Document, byte_index: usize) -> Option<u64> {
assert!(byte_index < 256);
let file = doc.buffer.data.read();
match file.root_index() {
Some(idx) => Some(file.pool[idx].byte_count[byte_index]),
_ => None,
}
}
//
// walk through the binary tree and while looking for the node containing "offset"
// and track byte_index count
// SZ(19) , LF(9)
// _________[ SZ(7+12), LF(3+6) ]____________________
// / \
// __[ 7=SZ(3+4), LF 3=(1+2) ]__ _____[ 12=(5+7), LF 6=(2+4) ]__
// / \ / \
// [SZ(3), LF(1)]={a,LF,b} [SZ(4), LF(2)]={a,LF,LF,b } [5, LF(2)] data{a,LF,b,LF,c} [SZ(7), LF(4)]={a ,LF,LF,b ,Lf,LF,c}
// 0,1 ,2 3, 4, 5,6 7, 8,9,10,11 12,13,14,15,16,17,18
//
pub fn find_nth_byte_offset(doc: &Document, byte: u8, index: u64) -> Option<u64> {
assert!(index > 0);
let mut index = index;
let mut file = doc.buffer.data.write();
let mut global_offset = 0;
let mut cur_index = file.root_index();
while cur_index != None {
let idx = cur_index.unwrap();
let p_node = &file.pool[idx];
let is_leaf = p_node.link.left.is_none() && p_node.link.right.is_none();
if is_leaf {
let data = get_node_data(&mut file, Some(idx));
// count by until index is reached
for b in data.iter() {
if *b == byte {
index -= 1;
if index == 0 {
break;
};
}
global_offset += 1;
}
return Some(global_offset);
}
let count = file.pool[idx].byte_count[byte as usize];
if index >= count {
// not fully indexed, or this byte does not exists
return None;
}
if let Some(left_index) = p_node.link.left {
let left_node = &file.pool[left_index];
let left_byte_count = left_node.byte_count[byte as usize];
// byte in left sub-tree ?
if index <= left_byte_count {
cur_index = Some(left_index);
continue;
}
global_offset += left_node.size; // count skipped offsets
index -= left_byte_count;
}
// byte in right sub-tree
cur_index = p_node.link.right;
}
None
}
#[cfg(test)]
mod tests {
extern crate rand;
use super::*;
use rand::Rng;
#[test]
fn doc_read() {
use std::io::prelude::*;
use std::os::unix::prelude::FileExt;
let filename = "/tmp/unl-test-file".to_owned();
let _ = std::fs::remove_file(&filename);
{
println!("create file....");
let mut file = std::fs::File::create(&filename).unwrap();
let size = 2 * 1024 * 1024 * 1024;
let mut buf = Vec::with_capacity(size);
//buf.resize(size, 0);
unsafe {
buf.set_len(size);
} // faster in debug build
file.write_all(&buf).unwrap();
println!("create file....ok");
}
println!("read file....");
let doc = DocumentBuilder::new()
.document_name("untitled-1")
.file_name(&filename)
.internal(false)
.finalize();
build_index(doc.as_ref().unwrap());
let file = std::fs::File::open(&filename).unwrap();
let doc = doc.as_ref().unwrap().write();
let doc_size = doc.size() as u64;
let step = 1024 * 1024;
let t0_read = std::time::Instant::now();
let mut prev_time = 0;
let mut data: Vec<u8> = Vec::with_capacity(step);
for offset in (0..doc_size).into_iter().step_by(step) {
if !true {
unsafe {
data.set_len(step);
} // faster in debug build
// data.clear();
doc.read(offset, step, &mut data);
} else {
unsafe {
data.set_len(step);
} // faster in debug build
let res = file.read_at(&mut data[0..step], offset);
match res {
Ok(_size) => {
//println!("read [{}] @ offset {}", size, offset);
}
Err(what) => {
println!("read error [{}] @ offset {}, what {:?}", step, offset, what);
}
}
}
let diff = t0_read.elapsed().as_secs();
if prev_time != diff {
let bytes_per_seconds = offset / diff;
println!("bytes per seconds {}", bytes_per_seconds);
println!("kib per seconds {}", bytes_per_seconds / 1024);
println!("mib per seconds {}", bytes_per_seconds / (1024 * 1024));
println!(
"gib per seconds {}",
bytes_per_seconds / (1024 * 1024 * 1024)
);
prev_time = diff;
}
}
// let _ = std::fs::remove_file(&filename);
}
#[test]
fn undo_redo() {
let doc = DocumentBuilder::new()
.document_name("untitled-1")
.internal(false)
.finalize();
let mut doc = doc.as_ref().unwrap().write();
const STR_LEN: usize = 1000;
let mut s = String::new();
for _ in 0..STR_LEN {
s.push_str("0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
}
const NB_INSERT: usize = 100;
let max = NB_INSERT;
for _ in 0..5 {
println!("start insert test");
let mut off: u64 = 0;
for i in 0..max {
println!("insert ({}/{}) -------", i + 1, max);
let off_update = doc.insert(off, s.len(), s.as_ref());
off += off_update as u64;
}
println!("doc.size = {}", doc.size());
println!("start undo test");
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
println!("start redo test");
for i in 0..max {
println!("redo ({}/{}) -------", i + 1, max);
doc.redo();
}
println!("doc.size = {}", doc.size());
println!("start undo test (2nd pass)");
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
}
}
#[test]
fn doc_random_size_inserts() {
let doc = DocumentBuilder::new()
.document_name("untitled-1")
.internal(false)
.finalize();
let mut doc = doc.as_ref().unwrap().write();
const NB_STR: usize = 10000;
let mut s = String::new();
for _ in 0..NB_STR {
s.push_str("0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
}
const NB_INSERT: usize = 150;
let max = NB_INSERT;
let mut rng = rand::thread_rng();
for _ in 0..10 {
println!("start insert test");
let mut off: u64 = 0;
for i in 0..max {
println!("insert ({}/{}) -------", i, max);
// randomize s.len
let random_size: usize = rng.gen_range(0, s.len());
println!("random insert size = {}", random_size);
let off_update = doc.insert(off, random_size, s.as_ref());
off += off_update as u64;
}
println!("doc.size = {}", doc.size());
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
println!("start redo test");
for i in 0..max {
println!("redo ({}/{}) -------", i + 1, max);
doc.redo();
}
println!("doc.size = {}", doc.size());
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
}
}
}
fix find_nth_byte_offset (when passed invalid index arg)
use std::fmt;
use parking_lot::RwLock;
use std::cell::RefCell;
use std::sync::Arc;
use std::sync::Weak;
use std::fs::File;
use std::io::prelude::*;
use std::io::Result;
//
use crate::core::editor::user_is_active;
use crate::core::mapped_file::MappedFile;
use crate::core::mapped_file::MappedFileEvent;
use crate::core::mapped_file::UpdateHierarchyOp;
use crate::core::mapped_file::NodeIndex;
use super::buffer::Buffer;
use super::buffer::OpenMode;
use super::bufferlog::BufferLog;
use super::bufferlog::BufferOperation;
use super::bufferlog::BufferOperationType;
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Id(pub usize);
///
#[derive(Debug)]
pub struct DocumentBuilder {
internal: bool,
use_buffer_log: bool,
document_name: String,
file_name: String,
mode: OpenMode,
}
#[derive(Debug)]
struct DocumentMappedFileEventHandler<'a> {
_doc: Weak<RwLock<Document<'a>>>,
}
fn mapped_file_event_to_document_event(evt: &MappedFileEvent) -> DocumentEvent {
match evt {
MappedFileEvent::NodeChanged { node_index } => DocumentEvent::NodeChanged {
node_index: *node_index,
},
MappedFileEvent::NodeAdded { node_index } => DocumentEvent::NodeAdded {
node_index: *node_index,
},
MappedFileEvent::NodeRemoved { node_index } => DocumentEvent::NodeRemoved {
node_index: *node_index,
},
}
}
///
impl DocumentBuilder {
///
pub fn new() -> Self {
Self {
internal: false,
use_buffer_log: false,
document_name: String::new(),
file_name: String::new(),
mode: OpenMode::ReadOnly,
}
}
///
pub fn internal(&mut self, flag: bool) -> &mut Self {
self.internal = flag;
self
}
///
pub fn use_buffer_log(&mut self, flag: bool) -> &mut Self {
self.use_buffer_log = flag;
self
}
///
pub fn document_name(&mut self, name: &str) -> &mut Self {
self.document_name = name.to_string();
self
}
///
pub fn file_name(&mut self, name: &str) -> &mut Self {
self.file_name = name.to_string();
self
}
///
pub fn mode(&mut self, mode: OpenMode) -> &mut Self {
self.mode = mode;
self
}
///
pub fn finalize<'a>(&self) -> Option<Arc<RwLock<Document<'static>>>> {
Document::new(
&self.document_name,
&self.file_name,
self.mode.clone(),
self.use_buffer_log,
)
}
}
#[derive(Debug)]
pub struct DocumentReadCache {
start: u64,
end: u64,
data: Vec<u8>,
revision: usize,
}
impl DocumentReadCache {
pub fn new() -> Self {
DocumentReadCache {
start: 0,
end: 0,
data: vec![],
revision: 0,
}
}
pub fn contains(&self, min: u64, max: u64) -> bool {
if min < self.start || min > self.end {
return false;
}
if max < self.start || max > self.end {
return false;
}
return true;
}
pub fn read(
&self,
offset: u64,
nr_bytes: usize,
data: &mut Vec<u8>,
doc_revision: usize,
) -> Option<usize> {
if !crate::core::use_read_cache() {
return None;
}
// no cache sync yet
if self.revision != doc_revision {
return None;
}
if self.start == self.end {
return None;
}
if offset < self.start {
return None;
}
if offset + nr_bytes as u64 > self.end {
return None;
}
let idx = (offset - self.start) as usize;
for i in 0..nr_bytes {
data.push(self.data[i + idx]);
}
Some(nr_bytes)
}
}
pub trait DocumentEventCb {
fn cb(&mut self, doc: &Document, event: &DocumentEvent);
}
#[derive(Debug, Clone)]
pub enum DocumentEvent {
DocumentAdded,
DocumentOpened,
DocumentClosed,
DocumentRemoved,
DocumentFullyIndexed,
NodeAdded { node_index: usize },
NodeChanged { node_index: usize },
NodeRemoved { node_index: usize },
NodeIndexed { node_index: usize },
}
fn document_event_to_string(evt: &DocumentEvent) -> String {
match evt {
DocumentEvent::DocumentAdded => "Added".to_owned(),
DocumentEvent::DocumentOpened => "Opened".to_owned(),
DocumentEvent::DocumentClosed => "Closed".to_owned(),
DocumentEvent::DocumentRemoved => "Removed".to_owned(),
DocumentEvent::DocumentFullyIndexed => "FullyIndexed".to_owned(),
DocumentEvent::NodeAdded { node_index } => {
format!("NodeAdded idx: {}", node_index)
}
DocumentEvent::NodeChanged { node_index } => {
format!("NodeChanged idx: {}", node_index)
}
DocumentEvent::NodeRemoved { node_index, .. } => {
format!("NodeRemoved idx: {}", node_index)
}
DocumentEvent::NodeIndexed { node_index, .. } => {
format!("NodeIndexed idx: {}", node_index)
}
}
}
pub struct Document<'a> {
pub id: Id,
pub name: String,
pub buffer: Buffer<'a>, // TODO(ceg): provide iterator apis ?
cache: DocumentReadCache,
pub buffer_log: BufferLog,
pub use_buffer_log: bool,
pub changed: bool,
pub is_syncing: bool,
pub abort_indexing: bool,
pub indexed: bool,
pub last_tag_time: std::time::Instant,
pub subscribers: Vec<RefCell<Box<dyn DocumentEventCb>>>,
}
impl<'a> fmt::Debug for Document<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Document {}")
.field("id", &self.id)
.field("name", &self.name)
.finish()
}
}
// NB: doc MUST be wrapped in Arc<RwLock<XXX>>
unsafe impl<'a> Send for Document<'a> {}
unsafe impl<'a> Sync for Document<'a> {}
impl<'a> Document<'a> {
pub fn new(
document_name: &String,
file_name: &String,
mode: OpenMode,
use_buffer_log: bool,
) -> Option<Arc<RwLock<Document<'static>>>> {
dbg_println!("try open {} {} {:?}", document_name, file_name, mode);
let buffer = if file_name.is_empty() {
Buffer::empty(mode.clone())
} else {
Buffer::new(&file_name, mode.clone())
};
let mut changed = false;
// fallback
let buffer = if buffer.is_none() {
changed = true;
Buffer::empty_with_name(&document_name, mode.clone())
} else {
buffer
};
let doc = Document {
id: Id(0),
name: document_name.clone(),
buffer: buffer.unwrap(),
cache: DocumentReadCache::new(), // TODO(ceg): have a per view cache or move to View
buffer_log: BufferLog::new(),
use_buffer_log,
abort_indexing: false,
indexed: false,
changed,
is_syncing: false,
last_tag_time: std::time::Instant::now(),
subscribers: vec![],
};
Some(Arc::new(RwLock::new(doc)))
}
pub fn set_cache(&mut self, start: u64, end: u64) {
if start > end {
panic!("start {} > end {}", start, end);
}
self.cache.start = start;
self.cache.end = end;
if start == end {
return;
}
self.cache.data.clear();
let size = (end - start) as usize;
let sz = self.buffer.read(start, size, &mut self.cache.data);
self.cache.end = start + sz as u64;
self.cache.data.shrink_to_fit(); // ?
}
pub fn build_cache(&self, start: u64, end: u64) -> DocumentReadCache {
let mut cache = DocumentReadCache::new(); // TODO ::with_capacity()
assert!(start <= end);
cache.start = start;
cache.end = end;
if start == end {
return cache;
}
let size = (end - start) as usize;
let sz = self.buffer.read(start, size, &mut cache.data);
cache.end = start + sz as u64;
cache.data.shrink_to_fit(); // ?
cache
}
pub fn get_cache_range(&self) -> (u64, u64) {
(self.cache.start, self.cache.end)
}
pub fn file_name(&self) -> String {
self.buffer.file_name.clone()
}
pub fn metadata(&self) -> Result<std::fs::Metadata> {
self.buffer.metadata()
}
/// copy the content of the buffer up to 'nr_bytes' into the data Vec
/// the read bytes are appended to the data Vec
/// return XXX on error (TODO(ceg): use ioresult)
pub fn size(&self) -> usize {
self.buffer.size
}
pub fn nr_changes(&self) -> usize {
self.buffer.nr_changes() as usize
}
pub fn is_cached(&self, start: u64, end: u64) -> bool {
self.cache.contains(start, end)
}
pub fn readahead(&mut self, start: u64, end: u64) {
self.cache = self.build_cache(start, end)
}
pub fn notify(&self, evt: &DocumentEvent) {
dbg_println!(
"notify {:?}, nb subscribers {}",
document_event_to_string(&evt),
self.subscribers.len()
);
for s in self.subscribers.iter() {
s.borrow_mut().cb(self, evt);
}
}
pub fn build_node_byte_count(&self, node_index: usize) {
// let node_info = doc.get_node_info(node_index);
let mut file = self.buffer.data.write();
build_node_byte_count(&mut file, Some(node_index));
}
pub fn remove_node_byte_count(&self, node_index: usize) {
// let node_info = doc.get_node_info(node_index);
let mut file = self.buffer.data.write();
remove_node_byte_count(&mut file, Some(node_index));
}
pub fn update_node_byte_count(&self, node_index: usize) {
// let node_info = doc.get_node_info(node_index);
let mut file = self.buffer.data.write();
update_node_byte_count(&mut file, Some(node_index));
}
pub fn show_root_node_bytes_stats(&self) {
// let node_info = doc.get_node_info(node_index);
let file = self.buffer.data.read();
if let Some(idx) = file.root_index() {
let node = &file.pool[idx];
if !node.indexed {
return;
}
for (i, count) in node.byte_count.iter().enumerate() {
if i == 10 {
dbg_println!("ROOT NODE byte_count[{}] = {}", i, count);
}
}
}
}
// TODO(ceg): return cb slot / unregister slot_mask
pub fn register_subscriber(&mut self, cb: Box<dyn DocumentEventCb>) -> usize {
let len = 1 + self.subscribers.len();
self.subscribers.push(RefCell::new(cb));
len
}
// read ahead
/// copy the content of the buffer up to 'nr_bytes' into the data Vec
/// the read bytes are appended to the data Vec
/// return XXX on error (TODO(ceg): use ioresult)
pub fn read(&self, offset: u64, nr_bytes: usize, data: &mut Vec<u8>) -> usize {
// return self.buffer.read(offset, nr_bytes, data);
let doc_rev = self.nr_changes();
if let Some(size) = self.cache.read(offset, nr_bytes, data, doc_rev) {
//dbg_println!("DATA IN CACHE offset {} size {}", offset, nr_bytes);
// cache validation checks
if false {
let mut real = vec![];
self.buffer.read(offset, nr_bytes, &mut real);
assert!(real.len() == data.len());
for i in 0..real.len() {
assert!(real[i] == data[i]);
}
}
return size;
}
// dbg_println!("DATA NOT IN CACHE offset {} size {}", offset, nr_bytes);
// TODO(ceg): --panic-on-read-cache-miss
// panic!("");
self.buffer.read(offset, nr_bytes, data)
}
/// copy the content of the buffer up to 'nr_bytes' into the data Vec
/// the read bytes are appended to the data Vec
/// return XXX on error (TODO(ceg): use ioresult)
pub fn read_cached(
&self,
offset: u64,
nr_bytes: usize,
data: &mut Vec<u8>,
cache: &DocumentReadCache,
) -> usize {
let doc_rev = self.nr_changes();
if let Some(size) = cache.read(offset, nr_bytes, data, doc_rev) {
//dbg_println!("DATA IN CACHE offset {} size {}", offset, nr_bytes);
// cache validation checks
if false {
let mut real = vec![];
self.buffer.read(offset, nr_bytes, &mut real);
assert!(real.len() == data.len());
for i in 0..real.len() {
assert!(real[i] == data[i]);
}
}
return size;
}
dbg_println!("DATA NOT IN CACHE offset {} size {}", offset, nr_bytes);
self.buffer.read(offset, nr_bytes, data) // reread cache
}
pub fn buffer_log_pos(&self) -> usize {
self.buffer_log.pos
}
pub fn buffer_log_count(&self) -> usize {
self.buffer_log.data.len()
}
pub fn buffer_log_reset(&mut self) {
self.buffer_log.data.clear();
self.buffer_log.pos = 0;
dbg_println!("bufferlog: cleared");
}
pub fn tag(
&mut self,
time: std::time::Instant,
offset: u64,
marks_offsets: Vec<u64>,
selections_offsets: Vec<u64>,
) -> bool {
if !self.use_buffer_log {
// return log disabled ?
return false;
}
if self.last_tag_time == time {
// ignore contiguous event ? config
// return;
}
//dbg_println!("// doc.tag(..) offsets = {:?}", marks_offset);
self.buffer_log.add(
offset,
BufferOperationType::Tag {
time,
marks_offsets,
selections_offsets,
},
None,
);
self.last_tag_time = time;
true
}
pub fn get_tag_offsets(&mut self) -> Option<(Vec<u64>, Vec<u64>)> {
let dlen = self.buffer_log.data.len();
if dlen == 0 {
return None;
}
let pos = if self.buffer_log.pos == dlen {
self.buffer_log.pos - 1
} else {
self.buffer_log.pos
};
// get inverted operation
let op = &self.buffer_log.data[pos];
match op.op_type {
BufferOperationType::Tag {
ref marks_offsets,
ref selections_offsets,
..
} => {
Some((marks_offsets.clone(), selections_offsets.clone())) // TODO(ceg): Arc<Vec<u64>>
}
_ => None,
}
}
pub fn update_hierarchy_from_events(&self, events: &Vec<MappedFileEvent>) {
for ev in events {
match ev {
MappedFileEvent::NodeChanged { node_index } => {
self.remove_node_byte_count(*node_index);
self.build_node_byte_count(*node_index);
let mut file = self.buffer.data.write();
// remove prev counts
update_byte_index_hierarchy(
&mut file,
Some(*node_index),
UpdateHierarchyOp::Sub,
);
// rebuild current counters
// add new count
update_byte_index_hierarchy(
&mut file,
Some(*node_index),
UpdateHierarchyOp::Add,
);
}
MappedFileEvent::NodeAdded { node_index } => {
self.build_node_byte_count(*node_index);
}
MappedFileEvent::NodeRemoved { node_index } => {
self.remove_node_byte_count(*node_index);
}
}
}
}
/// insert the 'data' Vec content in the buffer up to 'nr_bytes'
/// return the number of written bytes (TODO(ceg): use io::Result)
pub fn insert(&mut self, offset: u64, nr_bytes: usize, data: &[u8]) -> usize {
// TODO(ceg): update cache if possible
self.set_cache(0, 0); // invalidate cache,
// log insert op
let mut ins_data = Vec::with_capacity(nr_bytes);
ins_data.extend(&data[..nr_bytes]);
if self.use_buffer_log {
self.buffer_log.add(
offset,
BufferOperationType::Insert,
Some(Arc::new(ins_data)),
);
}
let (sz, events) = self.buffer.insert(offset, nr_bytes, &data[..nr_bytes]);
if sz > 0 {
self.changed = true;
}
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
sz
}
pub fn append(&mut self, data: &[u8]) -> usize {
let sz = self.size() as u64;
self.insert(sz, data.len(), &data)
}
/// remove up to 'nr_bytes' from the buffer starting at offset
/// if removed_data is provided will call self.read(offset, nr_bytes, data)
/// before remove the bytes
/*
TODO(ceg): we want
- remove the data
- collect each leaf node impacted
- update byte index from these nodes
- call event subscriber
- cleanup impacted nodes
*/
pub fn remove(
&mut self,
offset: u64,
nr_bytes: usize,
removed_data: Option<&mut Vec<u8>>,
) -> usize {
// TODO(ceg): update cache if possible
self.set_cache(0, 0); // invalidate cache,
let mut rm_data = Vec::with_capacity(nr_bytes);
let (nr_bytes_removed, events) = self.buffer.remove(offset, nr_bytes, Some(&mut rm_data));
if let Some(v) = removed_data {
v.extend(rm_data.clone());
}
if self.use_buffer_log {
self.buffer_log
.add(offset, BufferOperationType::Remove, Some(Arc::new(rm_data)));
}
if nr_bytes_removed > 0 {
self.changed = true;
}
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
nr_bytes_removed
}
pub fn delete_content(&mut self, removed_data: Option<&mut Vec<u8>>) -> usize {
let sz = self.size();
self.remove(0, sz, removed_data);
sz
}
pub fn find(&self, data: &[u8], from_offset: u64, to_offset: Option<u64>) -> Option<u64> {
self.buffer.find(&data, from_offset, to_offset)
}
pub fn find_reverse(
&self,
data: &[u8],
from_offset: u64,
to_offset: Option<u64>,
) -> Option<u64> {
self.buffer.find_reverse(&data, from_offset, to_offset)
}
// TODO(ceg): return an array of offsets ?
pub fn apply_operations(&mut self, ops: &[BufferOperation]) {
for op in ops {
self.apply_log_operation(op);
}
}
fn apply_log_operation(&mut self, op: &BufferOperation) -> Option<u64> {
// apply op
dbg_println!("apply_log_operation");
op.dump();
let mark_offset = match op.op_type {
BufferOperationType::Insert => {
let sz = self.buffer.size();
// TODO(ceg): check i/o errors
let added = if let Some(data) = &op.data {
let (_, events) = self.buffer.insert(op.offset, data.len(), &data);
self.changed = true;
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
data.len() as u64
} else {
0
};
assert_eq!(sz + added as usize, self.buffer.size());
op.offset + added
}
BufferOperationType::Remove => {
let sz = self.buffer.size();
// TODO(ceg): check i/o errors
let _removed = if let Some(data) = &op.data {
let (rm, events) = self.buffer.remove(op.offset, data.len(), None);
self.changed = true;
self.update_hierarchy_from_events(&events);
for ev in &events {
let ev = mapped_file_event_to_document_event(&ev);
self.notify(&ev);
}
assert_eq!(rm, data.len());
rm
} else {
0
};
assert_eq!(sz - _removed, self.buffer.size());
op.offset
}
BufferOperationType::Tag {
marks_offsets: _, ..
} => {
/* nothing */
op.offset
}
};
Some(mark_offset)
}
pub fn undo(&mut self) -> Option<u64> {
// read current log position
let pos = self.buffer_log.pos;
if pos == 0 {
return None;
}
// apply inverted previous operation
let op = self.buffer_log.data[pos - 1].invert();
self.buffer_log.pos -= 1;
self.apply_log_operation(&op)
}
pub fn redo(&mut self) -> Option<u64> {
// read current log position
let pos = self.buffer_log.pos;
if pos == self.buffer_log.data.len() {
return None;
}
// apply next operation
let op = self.buffer_log.data[pos].clone();
self.buffer_log.pos += 1;
self.apply_log_operation(&op)
}
pub fn is_buffer_log_op_tag(&self, index: usize) -> bool {
if index >= self.buffer_log.data.len() {
return false;
}
let op = &self.buffer_log.data[index];
match op.op_type {
BufferOperationType::Tag { .. } => true,
_ => false,
}
}
pub fn undo_until_tag(&mut self) -> Vec<BufferOperation> {
// read current log position
let mut ops = Vec::new();
loop {
if self.buffer_log.pos == 0 {
break;
}
self.buffer_log.pos -= 1;
let pos = self.buffer_log.pos;
// get inverted operation
let op = &self.buffer_log.data[pos];
match op.op_type {
BufferOperationType::Tag { .. } => {
// stop if no last tag
if pos != self.buffer_log.data.len() - 1 {
break;
}
}
_ => {}
}
// inverse replay
let inverted_op = op.invert();
self.apply_log_operation(&inverted_op);
ops.push(inverted_op);
}
ops
}
pub fn redo_until_tag(&mut self) -> Vec<BufferOperation> {
let mut ops = Vec::new();
if self.buffer_log.data.is_empty() {
return ops;
}
if self.buffer_log.pos >= self.buffer_log.data.len() - 1 {
return ops;
}
// skip current tag
if self.is_buffer_log_op_tag(self.buffer_log.pos) {
self.buffer_log.pos += 1;
}
// replay until tag
while !self.is_buffer_log_op_tag(self.buffer_log.pos) {
if self.buffer_log.pos >= self.buffer_log.data.len() - 1 {
break;
}
let op = self.buffer_log.data[self.buffer_log.pos].clone();
// actual replay
self.apply_log_operation(&op);
ops.push(op);
self.buffer_log.pos += 1;
}
ops
}
}
// helper
use std::path::Path;
// TODO(ceg): handle errors
pub fn sync_to_storage(doc: &Arc<RwLock<Document>>) {
// read/copy
let mut fd = {
let doc = doc.read();
if doc.file_name().is_empty() {
// TODO(ceg): save as pop up/notification
dbg_println!("cannot dsave target filename is empty");
return;
}
let tmp_file_name = format!("{}{}", doc.file_name(), ".update"); // TODO(ceg): move to global config
let path = Path::new(&tmp_file_name);
if let Result::Err(_) = std::fs::remove_file(path) {}
let fd = File::create(path);
if fd.is_err() {
dbg_println!("cannot save {}", doc.file_name());
return;
}
fd.unwrap()
};
dbg_println!("SYNC: fd = {:?}", fd);
let mut idx = {
let doc = doc.read();
let file = doc.buffer.data.read();
let (node_index, _, _) = file.find_node_by_offset(0);
node_index
};
while idx != None {
// do not hold the doc.lock more
{
let doc = doc.read();
let file = doc.buffer.data.read();
let node = &file.pool[idx.unwrap()];
let mut data = Vec::with_capacity(node.size as usize);
unsafe {
data.set_len(data.capacity());
};
if file.fd.is_none() {
// TODO(ceg): "save as" pop up
break;
}
let orig_fd = { Some(file.fd.as_ref().unwrap().clone()) };
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
let nw = fd.write(&data).unwrap();
if nw != data.len() {
dbg_println!("cannot save {}", doc.file_name());
panic!("");
// return false;
}
// dbg_println!("sync doc('{}') node {}", doc.file_name(), idx.unwrap());
} else {
panic!("direct copy failed");
}
idx = node.link.next;
}
// NB: experimental throttling based on user input freq/rendering
// TODO <-- user configuration
if user_is_active() {
let wait = std::time::Duration::from_millis(16);
std::thread::sleep(wait);
}
}
// update
{
use std::os::unix::fs::PermissionsExt;
let mut doc = doc.write();
// TODO(ceg): use mapped file fd, will panic if file is removed
let perms = match doc.metadata() {
Ok(metadata) => metadata.permissions(),
Err(_) => std::fs::Permissions::from_mode(0o600),
};
let tmp_file_name = format!("{}{}", doc.file_name(), ".update"); // TODO(ceg): move '.update' to global config
{
// TODO(ceg): large file warning in save ? disable backup ?
let _tmp_backup_name = format!("{}{}", doc.file_name(), "~");
// TODO(ceg): move '~' to global config
// let _ = ::std::fs::rename(&doc.file_name(), &tmp_backup_name);
}
let _ = ::std::fs::rename(&tmp_file_name, &doc.file_name());
// reopen file
let new_fd = File::open(&doc.file_name()).unwrap();
// TODO(ceg): handle skip with ReadOnly
let mapped_file = doc.buffer.data.clone();
let mut mapped_file = mapped_file.write();
crate::core::mapped_file::MappedFile::patch_storage_offset_and_file_descriptor(
&mut mapped_file,
new_fd,
);
// TODO(ceg): check result, handle io results properly
// set buffer status to : permission denied etc
let _ = ::std::fs::set_permissions(&doc.file_name(), perms);
doc.changed = false;
doc.is_syncing = false;
}
}
fn update_byte_index_hierarchy(
file: &mut MappedFile,
idx: Option<NodeIndex>,
op: UpdateHierarchyOp,
) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
// get counters
let node = &mut file.pool[idx];
let byte_count = node.byte_count.clone();
let mut p = node.link.parent;
while p.is_some() {
let p_idx = p.unwrap();
let p_node = &mut file.pool[p_idx];
for (i, count) in byte_count.iter().enumerate() {
match op {
UpdateHierarchyOp::Add => p_node.byte_count[i] += count,
UpdateHierarchyOp::Sub => {
p_node.byte_count[i] = p_node.byte_count[i].saturating_sub(*count)
} // TODO(ceg): -=
}
}
p_node.indexed = true;
p = p_node.link.parent;
}
}
pub fn get_node_data(file: &mut MappedFile, idx: Option<NodeIndex>) -> Vec<u8> {
if idx.is_none() {
return vec![];
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
let mut data = Vec::with_capacity(node.size as usize);
unsafe {
data.set_len(node.size as usize);
};
let orig_fd = if file.fd.is_none() {
None
} else {
Some(file.fd.as_ref().unwrap().clone())
};
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
//
} else {
// TODO(ceg): return error
panic!("direct copy failed");
}
data
}
// call this on new node
pub fn build_node_byte_count(mut file: &mut MappedFile, idx: Option<NodeIndex>) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
let mut data = Vec::with_capacity(node.size as usize);
unsafe {
data.set_len(node.size as usize);
};
let orig_fd = if file.fd.is_none() {
None
} else {
Some(file.fd.as_ref().unwrap().clone())
};
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
//
} else {
// TODO(ceg): return error
panic!("direct copy failed");
}
assert!(!node.indexed);
// node.byte_count = [0;256];
// count node bytes (no lock)
for b in data.iter() {
let byte_idx = *b as usize;
if *b as char == '\n' {
node.byte_count[byte_idx] += 1;
}
}
node.indexed = true;
update_byte_index_hierarchy(&mut file, Some(idx), UpdateHierarchyOp::Add);
}
// call this on new node
pub fn remove_node_byte_count(mut file: &mut MappedFile, idx: Option<NodeIndex>) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
if !node.indexed {
return;
}
update_byte_index_hierarchy(&mut file, Some(idx), UpdateHierarchyOp::Sub);
let node = &mut file.pool[idx];
node.byte_count = [0; 256];
node.indexed = false;
}
// call this on new node
pub fn update_node_byte_count(mut file: &mut MappedFile, idx: Option<NodeIndex>) {
if idx.is_none() {
return;
}
let idx = idx.unwrap();
let node = &mut file.pool[idx];
if !node.indexed {
return;
}
node.indexed = false;
update_byte_index_hierarchy(&mut file, Some(idx), UpdateHierarchyOp::Sub);
}
// TODO(ceg): split code to provide index_single_node(nid)
pub fn build_index(doc: &Arc<RwLock<Document>>) {
let mut idx = {
let doc = doc.read();
{
if doc.indexed {
return;
}
let file = doc.buffer.data.read();
let (node_index, _, _) = file.find_node_by_offset(0);
node_index
}
};
let t0 = std::time::Instant::now();
let mut data = vec![];
while idx != None {
// read node bytes
{
let doc = doc.read();
if doc.abort_indexing {
break;
}
let file = doc.buffer.data.read();
let node = &file.pool[idx.unwrap()];
if node.indexed {
idx = node.link.next;
continue;
}
data.reserve(node.size as usize);
unsafe {
data.set_len(node.size as usize);
};
let orig_fd = if file.fd.is_none() {
None
} else {
Some(file.fd.as_ref().unwrap().clone())
};
let t0_read = std::time::Instant::now();
if let Some(_n) = node.do_direct_copy(&orig_fd, &mut data) {
dbg_println!(
"build index doc('{}') node {} size {}",
doc.file_name(),
idx.unwrap(),
data.len(),
);
} else {
// TODO(ceg): return error
panic!("direct copy failed");
}
let t1_read = std::time::Instant::now();
dbg_println!("read node time {:?} ms", (t1_read - t0_read).as_millis());
}
// count node bytes (no lock)
let mut byte_count: [u64; 256] = [0; 256];
for b in data.iter() {
let byte_idx = *b as usize;
byte_count[byte_idx] += 1;
}
// yield some cpu time
if user_is_active() {
let wait = std::time::Duration::from_millis(16);
std::thread::sleep(wait);
}
// update node info (idx)
{
let doc = doc.read();
let mut file = doc.buffer.data.write();
let node_index = idx.unwrap();
// save byte counters
{
let mut node = &mut file.pool[node_index];
node.byte_count = byte_count;
node.indexed = true;
idx = node.link.next;
}
update_byte_index_hierarchy(&mut file, Some(node_index), UpdateHierarchyOp::Add);
}
// notify subscribers
if idx.is_some() {
let doc = doc.read();
doc.notify(&DocumentEvent::NodeIndexed {
node_index: idx.unwrap(),
});
}
}
let t1 = std::time::Instant::now();
dbg_println!("index time {:?} ms", (t1 - t0).as_millis());
{
// set index status flags
{
let mut doc = doc.write();
if !doc.abort_indexing {
doc.indexed = true;
}
// display root node info
let file = doc.buffer.data.read();
if let Some(root_index) = file.root_index() {
let node = &file.pool[root_index];
dbg_println!(
"{} : Number of lines {}",
doc.file_name(),
node.byte_count[b'\n' as usize]
);
}
}
let doc = doc.read();
if doc.indexed {
doc.notify(&DocumentEvent::DocumentFullyIndexed {});
}
}
}
//
// walk through the binary tree and while looking for the node containing "offset"
// and track byte_index count
// SZ(19) , LF(9)
// _________[ SZ(7+12), LF(3+6) ]____________________
// / \
// __[ 7=SZ(3+4), LF 3=(1+2) ]__ _____[ 12=(5+7), LF 6=(2+4) ]__
// / \ / \
// [SZ(3), LF(1)]={a,LF,b} [SZ(4), LF(2)]={a,LF,LF,b } [5, LF(2)] data{a,LF,b,LF,c} [SZ(7), LF(4)]={a ,LF,LF,b ,Lf,LF,c}
// 0,1 ,2 3, 4, 5,6 7, 8,9,10,11 12,13,14,15,16,17,18
//
//
// return (line_count, offset's node_index)
pub fn get_document_byte_count_at_offset(
doc: &Document,
byte_index: usize,
offset: u64,
) -> (u64, Option<usize>) {
assert!(byte_index < 256);
let mut total_count = 0;
let mut local_offset = offset;
let mut file = doc.buffer.data.write();
let mut cur_index = file.root_index();
while cur_index != None {
let idx = cur_index.unwrap();
let p_node = &file.pool[idx];
let is_leaf = p_node.link.left.is_none() && p_node.link.right.is_none();
if is_leaf {
let data = get_node_data(&mut file, Some(idx));
// count by until local_offset is reached
for b in data.iter().take(local_offset as usize) {
if *b as usize == byte_index {
total_count += 1;
}
}
return (total_count, cur_index);
}
if let Some(left_index) = p_node.link.left {
let left_node = &file.pool[left_index];
if local_offset < left_node.size {
cur_index = Some(left_index);
continue;
}
total_count += left_node.byte_count[byte_index];
local_offset -= left_node.size
}
cur_index = p_node.link.right;
}
(0, None)
}
pub fn get_document_byte_count(doc: &Document, byte_index: usize) -> Option<u64> {
assert!(byte_index < 256);
let file = doc.buffer.data.read();
match file.root_index() {
Some(idx) => Some(file.pool[idx].byte_count[byte_index]),
_ => None,
}
}
//
// walk through the binary tree and while looking for the node containing "offset"
// and track byte_index count
// SZ(19) , LF(9)
// _________[ SZ(7+12), LF(3+6) ]____________________
// / \
// __[ 7=SZ(3+4), LF 3=(1+2) ]__ _____[ 12=(5+7), LF 6=(2+4) ]__
// / \ / \
// [SZ(3), LF(1)]={a,LF,b} [SZ(4), LF(2)]={a,LF,LF,b } [5, LF(2)] data{a,LF,b,LF,c} [SZ(7), LF(4)]={a ,LF,LF,b ,Lf,LF,c}
// 0,1 ,2 3, 4, 5,6 7, 8,9,10,11 12,13,14,15,16,17,18
//
pub fn find_nth_byte_offset(doc: &Document, byte: u8, index: u64) -> Option<u64> {
assert!(index > 0);
let mut index = index;
let mut file = doc.buffer.data.write();
let mut global_offset = 0;
let mut cur_index = file.root_index();
while cur_index != None {
let idx = cur_index.unwrap();
let p_node = &file.pool[idx];
let is_leaf = p_node.link.left.is_none() && p_node.link.right.is_none();
if is_leaf {
let data = get_node_data(&mut file, Some(idx));
// count by until index is reached
for b in data.iter() {
if *b == byte {
index -= 1;
if index == 0 {
return Some(global_offset);
}
}
global_offset += 1;
}
return None;
}
let count = file.pool[idx].byte_count[byte as usize];
if index >= count {
// not fully indexed, or this byte does not exists
return None;
}
if let Some(left_index) = p_node.link.left {
let left_node = &file.pool[left_index];
let left_byte_count = left_node.byte_count[byte as usize];
// byte in left sub-tree ?
if index <= left_byte_count {
cur_index = Some(left_index);
continue;
}
global_offset += left_node.size; // count skipped offsets
index -= left_byte_count;
}
// byte in right sub-tree
cur_index = p_node.link.right;
}
None
}
#[cfg(test)]
mod tests {
extern crate rand;
use super::*;
use rand::Rng;
#[test]
fn doc_read() {
use std::io::prelude::*;
use std::os::unix::prelude::FileExt;
let filename = "/tmp/unl-test-file".to_owned();
let _ = std::fs::remove_file(&filename);
{
println!("create file....");
let mut file = std::fs::File::create(&filename).unwrap();
let size = 2 * 1024 * 1024 * 1024;
let mut buf = Vec::with_capacity(size);
//buf.resize(size, 0);
unsafe {
buf.set_len(size);
} // faster in debug build
file.write_all(&buf).unwrap();
println!("create file....ok");
}
println!("read file....");
let doc = DocumentBuilder::new()
.document_name("untitled-1")
.file_name(&filename)
.internal(false)
.finalize();
build_index(doc.as_ref().unwrap());
let file = std::fs::File::open(&filename).unwrap();
let doc = doc.as_ref().unwrap().write();
let doc_size = doc.size() as u64;
let step = 1024 * 1024;
let t0_read = std::time::Instant::now();
let mut prev_time = 0;
let mut data: Vec<u8> = Vec::with_capacity(step);
for offset in (0..doc_size).into_iter().step_by(step) {
if !true {
unsafe {
data.set_len(step);
} // faster in debug build
// data.clear();
doc.read(offset, step, &mut data);
} else {
unsafe {
data.set_len(step);
} // faster in debug build
let res = file.read_at(&mut data[0..step], offset);
match res {
Ok(_size) => {
//println!("read [{}] @ offset {}", size, offset);
}
Err(what) => {
println!("read error [{}] @ offset {}, what {:?}", step, offset, what);
}
}
}
let diff = t0_read.elapsed().as_secs();
if prev_time != diff {
let bytes_per_seconds = offset / diff;
println!("bytes per seconds {}", bytes_per_seconds);
println!("kib per seconds {}", bytes_per_seconds / 1024);
println!("mib per seconds {}", bytes_per_seconds / (1024 * 1024));
println!(
"gib per seconds {}",
bytes_per_seconds / (1024 * 1024 * 1024)
);
prev_time = diff;
}
}
// let _ = std::fs::remove_file(&filename);
}
#[test]
fn undo_redo() {
let doc = DocumentBuilder::new()
.document_name("untitled-1")
.internal(false)
.finalize();
let mut doc = doc.as_ref().unwrap().write();
const STR_LEN: usize = 1000;
let mut s = String::new();
for _ in 0..STR_LEN {
s.push_str("0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
}
const NB_INSERT: usize = 100;
let max = NB_INSERT;
for _ in 0..5 {
println!("start insert test");
let mut off: u64 = 0;
for i in 0..max {
println!("insert ({}/{}) -------", i + 1, max);
let off_update = doc.insert(off, s.len(), s.as_ref());
off += off_update as u64;
}
println!("doc.size = {}", doc.size());
println!("start undo test");
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
println!("start redo test");
for i in 0..max {
println!("redo ({}/{}) -------", i + 1, max);
doc.redo();
}
println!("doc.size = {}", doc.size());
println!("start undo test (2nd pass)");
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
}
}
#[test]
fn doc_random_size_inserts() {
let doc = DocumentBuilder::new()
.document_name("untitled-1")
.internal(false)
.finalize();
let mut doc = doc.as_ref().unwrap().write();
const NB_STR: usize = 10000;
let mut s = String::new();
for _ in 0..NB_STR {
s.push_str("0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
}
const NB_INSERT: usize = 150;
let max = NB_INSERT;
let mut rng = rand::thread_rng();
for _ in 0..10 {
println!("start insert test");
let mut off: u64 = 0;
for i in 0..max {
println!("insert ({}/{}) -------", i, max);
// randomize s.len
let random_size: usize = rng.gen_range(0, s.len());
println!("random insert size = {}", random_size);
let off_update = doc.insert(off, random_size, s.as_ref());
off += off_update as u64;
}
println!("doc.size = {}", doc.size());
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
println!("start redo test");
for i in 0..max {
println!("redo ({}/{}) -------", i + 1, max);
doc.redo();
}
println!("doc.size = {}", doc.size());
for i in 0..max {
println!("undo ({}/{}) -------", i + 1, max);
doc.undo();
}
println!("doc.size = {}", doc.size());
}
}
}
|
// rustfmt doesn't do a very good job on nom parser invocations.
#![cfg_attr(rustfmt, rustfmt_skip)]
use crate::core::*;
use crate::types::*;
use crate::parser::envelope;
struct BodyFields<'a> {
pub param: BodyParams<'a>,
pub id: Option<&'a str>,
pub description: Option<&'a str>,
pub transfer_encoding: ContentEncoding<'a>,
pub octets: u32,
}
named!(body_fields<BodyFields>, do_parse!(
param: body_param >>
tag!(" ") >>
id: nstring_utf8 >>
tag!(" ") >>
description: nstring_utf8 >>
tag!(" ") >>
transfer_encoding: body_encoding >>
tag!(" ") >>
octets: number >>
(BodyFields { param, id, description, transfer_encoding, octets })
));
struct BodyExt1Part<'a> {
pub md5: Option<&'a str>,
pub disposition: Option<ContentDisposition<'a>>,
pub language: Option<Vec<&'a str>>,
pub location: Option<&'a str>,
pub extension: Option<BodyExtension<'a>>,
}
named!(body_ext_1part<BodyExt1Part>, do_parse!(
md5: opt_opt!(preceded!(tag!(" "), nstring_utf8)) >>
disposition: opt_opt!(preceded!(tag!(" "), body_disposition)) >>
language: opt_opt!(preceded!(tag!(" "), body_lang)) >>
location: opt_opt!(preceded!(tag!(" "), nstring_utf8)) >>
extension: opt!(preceded!(tag!(" "), body_extension)) >>
(BodyExt1Part { md5, disposition, language, location, extension })
));
struct BodyExtMPart<'a> {
pub param: BodyParams<'a>,
pub disposition: Option<ContentDisposition<'a>>,
pub language: Option<Vec<&'a str>>,
pub location: Option<&'a str>,
pub extension: Option<BodyExtension<'a>>,
}
named!(body_ext_mpart<BodyExtMPart>, do_parse!(
param: opt_opt!(preceded!(tag!(" "), body_param)) >>
disposition: opt_opt!(preceded!(tag!(" "), body_disposition)) >>
language: opt_opt!(preceded!(tag!(" "), body_lang)) >>
location: opt_opt!(preceded!(tag!(" "), nstring_utf8)) >>
extension: opt!(preceded!(tag!(" "), body_extension)) >>
(BodyExtMPart { param, disposition, language, location, extension })
));
named!(body_encoding<ContentEncoding>, alt!(
delimited!(char!('"'), alt!(
map!(tag_no_case!("7BIT"), |_| ContentEncoding::SevenBit) |
map!(tag_no_case!("8BIT"), |_| ContentEncoding::EightBit) |
map!(tag_no_case!("BINARY"), |_| ContentEncoding::Binary) |
map!(tag_no_case!("BASE64"), |_| ContentEncoding::Base64) |
map!(tag_no_case!("QUOTED-PRINTABLE"), |_| ContentEncoding::QuotedPrintable)
), char!('"')) |
map!(string_utf8, |enc| ContentEncoding::Other(enc))
));
named!(body_lang<Option<Vec<&str>>>, alt!(
map!(nstring_utf8, |v| v.map(|s| vec![s])) |
map!(parenthesized_nonempty_list!(string_utf8), Option::from)
));
named!(body_param<BodyParams>, alt!(
map!(nil, |_| None) |
map!(parenthesized_nonempty_list!(do_parse!(
key: string_utf8 >>
tag!(" ") >>
val: string_utf8 >>
((key, val))
)), Option::from)
));
named!(body_extension<BodyExtension>, alt!(
map!(number, |n| BodyExtension::Num(n)) |
map!(nstring_utf8, |s| BodyExtension::Str(s)) |
map!(parenthesized_nonempty_list!(body_extension), |ext| BodyExtension::List(ext))
));
named!(body_disposition<Option<ContentDisposition>>, alt!(
map!(nil, |_| None) |
paren_delimited!(do_parse!(
ty: string_utf8 >>
tag!(" ") >>
params: body_param >>
(Some(ContentDisposition {
ty,
params
}))
))
));
named!(body_type_basic<BodyStructure>, do_parse!(
media_type: string_utf8 >>
tag!(" ") >>
media_subtype: string_utf8 >>
tag!(" ") >>
fields: body_fields >>
ext: body_ext_1part >>
(BodyStructure::Basic {
common: BodyContentCommon {
ty: ContentType {
ty: media_type,
subtype: media_subtype,
params: fields.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
other: BodyContentSinglePart {
id: fields.id,
md5: ext.md5,
octets: fields.octets,
description: fields.description,
transfer_encoding: fields.transfer_encoding,
},
extension: ext.extension,
})
));
named!(body_type_text<BodyStructure>, do_parse!(
tag_no_case!("\"TEXT\"") >>
tag!(" ") >>
media_subtype: string_utf8 >>
tag!(" ") >>
fields: body_fields >>
tag!(" ") >>
lines: number >>
ext: body_ext_1part >>
(BodyStructure::Text {
common: BodyContentCommon {
ty: ContentType {
ty: "TEXT",
subtype: media_subtype,
params: fields.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
other: BodyContentSinglePart {
id: fields.id,
md5: ext.md5,
octets: fields.octets,
description: fields.description,
transfer_encoding: fields.transfer_encoding,
},
lines,
extension: ext.extension,
})
));
named!(body_type_message<BodyStructure>, do_parse!(
tag_no_case!("\"MESSAGE\" \"RFC822\"") >>
tag!(" ") >>
fields: body_fields >>
tag!(" ") >>
envelope: envelope >>
tag!(" ") >>
body: body >>
tag!(" ") >>
lines: number >>
ext: body_ext_1part >>
(BodyStructure::Message {
common: BodyContentCommon {
ty: ContentType {
ty: "MESSAGE",
subtype: "RFC822",
params: fields.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
other: BodyContentSinglePart {
id: fields.id,
md5: ext.md5,
octets: fields.octets,
description: fields.description,
transfer_encoding: fields.transfer_encoding,
},
envelope,
body: Box::new(body),
lines,
extension: ext.extension,
})
));
named!(body_type_multipart<BodyStructure>, do_parse!(
bodies: many1!(body) >>
tag!(" ") >>
media_subtype: string_utf8 >>
ext: body_ext_mpart >>
(BodyStructure::Multipart {
common: BodyContentCommon {
ty: ContentType {
ty: "MULTIPART",
subtype: media_subtype,
params: ext.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
bodies,
extension: ext.extension,
})
));
named!(pub(crate) body<BodyStructure>, paren_delimited!(
alt!(body_type_text | body_type_message | body_type_basic | body_type_multipart)
));
named!(pub(crate) msg_att_body_structure<AttributeValue>, do_parse!(
tag_no_case!("BODYSTRUCTURE ") >>
body: body >>
(AttributeValue::BodyStructure(body))
));
#[cfg(test)]
mod tests {
use super::*;
const EMPTY: &[u8] = &[];
// body-fld-param SP body-fld-id SP body-fld-desc SP body-fld-enc SP body-fld-octets
const BODY_FIELDS: &str = r#"("foo" "bar") "id" "desc" "7BIT" 1337"#;
const BODY_FIELD_PARAM_PAIR: (&str, &str) = ("foo", "bar");
const BODY_FIELD_ID: Option<&str> = Some("id");
const BODY_FIELD_DESC: Option<&str> = Some("desc");
const BODY_FIELD_ENC: ContentEncoding = ContentEncoding::SevenBit;
const BODY_FIELD_OCTETS: u32 = 1337;
fn mock_body_text() -> (String, BodyStructure<'static>) {
(
format!(r#"("TEXT" "PLAIN" {} 42)"#, BODY_FIELDS),
BodyStructure::Text {
common: BodyContentCommon {
ty: ContentType {
ty: "TEXT",
subtype: "PLAIN",
params: Some(vec![BODY_FIELD_PARAM_PAIR]),
},
disposition: None,
language: None,
location: None,
},
other: BodyContentSinglePart {
md5: None,
transfer_encoding: BODY_FIELD_ENC,
octets: BODY_FIELD_OCTETS,
id: BODY_FIELD_ID,
description: BODY_FIELD_DESC,
},
lines: 42,
extension: None,
}
)
}
#[test]
fn test_body_param_data() {
assert_matches!(
body_param(br#"NIL"#),
Ok((EMPTY, None))
);
assert_matches!(
body_param(br#"("foo" "bar")"#),
Ok((EMPTY, Some(param))) => {
assert_eq!(param, vec![("foo", "bar")]);
}
);
}
#[test]
fn test_body_lang_data() {
assert_matches!(
body_lang(br#""bob""#),
Ok((EMPTY, Some(langs))) => {
assert_eq!(langs, vec!["bob"]);
}
);
assert_matches!(
body_lang(br#"("one" "two")"#),
Ok((EMPTY, Some(langs))) => {
assert_eq!(langs, vec!["one", "two"]);
}
);
assert_matches!(
body_lang(br#"NIL"#),
Ok((EMPTY, None))
);
}
#[test]
fn test_body_extension_data() {
assert_matches!(
body_extension(br#""blah""#),
Ok((EMPTY, BodyExtension::Str(Some("blah"))))
);
assert_matches!(
body_extension(br#"NIL"#),
Ok((EMPTY, BodyExtension::Str(None)))
);
assert_matches!(
body_extension(br#"("hello")"#),
Ok((EMPTY, BodyExtension::List(list))) => {
assert_eq!(list, vec![BodyExtension::Str(Some("hello"))]);
}
);
assert_matches!(
body_extension(br#"(1337)"#),
Ok((EMPTY, BodyExtension::List(list))) => {
assert_eq!(list, vec![BodyExtension::Num(1337)]);
}
);
}
#[test]
fn test_body_disposition_data() {
assert_matches!(
body_disposition(br#"NIL"#),
Ok((EMPTY, None))
);
assert_matches!(
body_disposition(br#"("attachment" ("FILENAME" "pages.pdf"))"#),
Ok((EMPTY, Some(disposition))) => {
assert_eq!(disposition, ContentDisposition {
ty: "attachment",
params: Some(vec![
("FILENAME", "pages.pdf")
])
});
}
);
}
#[test]
fn test_body_structure_text() {
let (body_str, body_struct) = mock_body_text();
assert_matches!(
body(body_str.as_bytes()),
Ok((_, text)) => {
assert_eq!(text, body_struct);
}
);
}
#[test]
fn test_body_structure_text_with_ext() {
let body_str = format!(r#"("TEXT" "PLAIN" {} 42 NIL NIL NIL NIL)"#, BODY_FIELDS);
let (_, text_body_struct) = mock_body_text();
assert_matches!(
body(body_str.as_bytes()),
Ok((_, text)) => {
assert_eq!(text, text_body_struct)
}
);
}
#[test]
fn test_body_structure_basic() {
const BODY: &[u8] = br#"("APPLICATION" "PDF" ("NAME" "pages.pdf") NIL NIL "BASE64" 38838 NIL ("attachment" ("FILENAME" "pages.pdf")) NIL NIL)"#;
assert_matches!(
body(BODY),
Ok((_, basic)) => {
assert_eq!(basic, BodyStructure::Basic {
common: BodyContentCommon {
ty: ContentType {
ty: "APPLICATION",
subtype: "PDF",
params: Some(vec![("NAME", "pages.pdf")])
},
disposition: Some(ContentDisposition {
ty: "attachment",
params: Some(vec![("FILENAME", "pages.pdf")])
}),
language: None,
location: None,
},
other: BodyContentSinglePart {
transfer_encoding: ContentEncoding::Base64,
octets: 38838,
id: None,
md5: None,
description: None,
},
extension: None,
})
}
);
}
#[test]
fn test_body_structure_message() {
let (text_body_str, _) = mock_body_text();
let envelope_str = r#"("Wed, 17 Jul 1996 02:23:25 -0700 (PDT)" "IMAP4rev1 WG mtg summary and minutes" (("Terry Gray" NIL "gray" "cac.washington.edu")) (("Terry Gray" NIL "gray" "cac.washington.edu")) (("Terry Gray" NIL "gray" "cac.washington.edu")) ((NIL NIL "imap" "cac.washington.edu")) ((NIL NIL "minutes" "CNRI.Reston.VA.US") ("John Klensin" NIL "KLENSIN" "MIT.EDU")) NIL NIL "<B27397-0100000@cac.washington.edu>")"#;
let body_str = format!(r#"("MESSAGE" "RFC822" {} {} {} 42)"#, BODY_FIELDS, envelope_str, text_body_str);
assert_matches!(
body(body_str.as_bytes()),
Ok((_, BodyStructure::Message { .. }))
);
}
#[test]
fn test_body_structure_multipart() {
let (text_body_str1, text_body_struct1) = mock_body_text();
let (text_body_str2, text_body_struct2) = mock_body_text();
let body_str = format!(
r#"({}{} "ALTERNATIVE" NIL NIL NIL NIL)"#,
text_body_str1, text_body_str2
);
assert_matches!(
body(body_str.as_bytes()),
Ok((_, multipart)) => {
assert_eq!(multipart, BodyStructure::Multipart {
common: BodyContentCommon {
ty: ContentType {
ty: "MULTIPART",
subtype: "ALTERNATIVE",
params: None
},
language: None,
location: None,
disposition: None,
},
bodies: vec![
text_body_struct1,
text_body_struct2,
],
extension: None
});
}
);
}
}
Document choices on character encoding issues (fixes #64)
// rustfmt doesn't do a very good job on nom parser invocations.
#![cfg_attr(rustfmt, rustfmt_skip)]
use crate::core::*;
use crate::types::*;
use crate::parser::envelope;
struct BodyFields<'a> {
pub param: BodyParams<'a>,
pub id: Option<&'a str>,
pub description: Option<&'a str>,
pub transfer_encoding: ContentEncoding<'a>,
pub octets: u32,
}
// body-fields = body-fld-param SP body-fld-id SP body-fld-desc SP
// body-fld-enc SP body-fld-octets
named!(body_fields<BodyFields>, do_parse!(
param: body_param >>
tag!(" ") >>
// body id seems to refer to the Message-ID or possibly Content-ID header, which
// by the definition in RFC 2822 seems to resolve to all ASCII characters (through
// a large amount of indirection which I did not have the patience to fully explore)
id: nstring_utf8 >>
tag!(" ") >>
// Per https://tools.ietf.org/html/rfc2045#section-8, description should be all ASCII
description: nstring_utf8 >>
tag!(" ") >>
transfer_encoding: body_encoding >>
tag!(" ") >>
octets: number >>
(BodyFields { param, id, description, transfer_encoding, octets })
));
struct BodyExt1Part<'a> {
pub md5: Option<&'a str>,
pub disposition: Option<ContentDisposition<'a>>,
pub language: Option<Vec<&'a str>>,
pub location: Option<&'a str>,
pub extension: Option<BodyExtension<'a>>,
}
// body-ext-1part = body-fld-md5 [SP body-fld-dsp [SP body-fld-lang
// [SP body-fld-loc *(SP body-extension)]]]
// ; MUST NOT be returned on non-extensible
// ; "BODY" fetch
named!(body_ext_1part<BodyExt1Part>, do_parse!(
// Per RFC 1864, MD5 values are base64-encoded
md5: opt_opt!(preceded!(tag!(" "), nstring_utf8)) >>
disposition: opt_opt!(preceded!(tag!(" "), body_disposition)) >>
language: opt_opt!(preceded!(tag!(" "), body_lang)) >>
// Location appears to reference a URL, which by RFC 1738 (section 2.2) should be ASCII
location: opt_opt!(preceded!(tag!(" "), nstring_utf8)) >>
extension: opt!(preceded!(tag!(" "), body_extension)) >>
(BodyExt1Part { md5, disposition, language, location, extension })
));
struct BodyExtMPart<'a> {
pub param: BodyParams<'a>,
pub disposition: Option<ContentDisposition<'a>>,
pub language: Option<Vec<&'a str>>,
pub location: Option<&'a str>,
pub extension: Option<BodyExtension<'a>>,
}
// body-ext-mpart = body-fld-param [SP body-fld-dsp [SP body-fld-lang
// [SP body-fld-loc *(SP body-extension)]]]
// ; MUST NOT be returned on non-extensible
// ; "BODY" fetch
named!(body_ext_mpart<BodyExtMPart>, do_parse!(
param: opt_opt!(preceded!(tag!(" "), body_param)) >>
disposition: opt_opt!(preceded!(tag!(" "), body_disposition)) >>
language: opt_opt!(preceded!(tag!(" "), body_lang)) >>
// Location appears to reference a URL, which by RFC 1738 (section 2.2) should be ASCII
location: opt_opt!(preceded!(tag!(" "), nstring_utf8)) >>
extension: opt!(preceded!(tag!(" "), body_extension)) >>
(BodyExtMPart { param, disposition, language, location, extension })
));
named!(body_encoding<ContentEncoding>, alt!(
delimited!(char!('"'), alt!(
map!(tag_no_case!("7BIT"), |_| ContentEncoding::SevenBit) |
map!(tag_no_case!("8BIT"), |_| ContentEncoding::EightBit) |
map!(tag_no_case!("BINARY"), |_| ContentEncoding::Binary) |
map!(tag_no_case!("BASE64"), |_| ContentEncoding::Base64) |
map!(tag_no_case!("QUOTED-PRINTABLE"), |_| ContentEncoding::QuotedPrintable)
), char!('"')) |
map!(string_utf8, |enc| ContentEncoding::Other(enc))
));
named!(body_lang<Option<Vec<&str>>>, alt!(
// body language seems to refer to RFC 3066 language tags, which should be ASCII-only
map!(nstring_utf8, |v| v.map(|s| vec![s])) |
map!(parenthesized_nonempty_list!(string_utf8), Option::from)
));
named!(body_param<BodyParams>, alt!(
map!(nil, |_| None) |
map!(parenthesized_nonempty_list!(do_parse!(
key: string_utf8 >>
tag!(" ") >>
val: string_utf8 >>
((key, val))
)), Option::from)
));
named!(body_extension<BodyExtension>, alt!(
map!(number, |n| BodyExtension::Num(n)) |
// Cannot find documentation on character encoding for body extension values.
// So far, assuming UTF-8 seems fine, please report if you run into issues here.
map!(nstring_utf8, |s| BodyExtension::Str(s)) |
map!(parenthesized_nonempty_list!(body_extension), |ext| BodyExtension::List(ext))
));
named!(body_disposition<Option<ContentDisposition>>, alt!(
map!(nil, |_| None) |
paren_delimited!(do_parse!(
ty: string_utf8 >>
tag!(" ") >>
params: body_param >>
(Some(ContentDisposition {
ty,
params
}))
))
));
named!(body_type_basic<BodyStructure>, do_parse!(
media_type: string_utf8 >>
tag!(" ") >>
media_subtype: string_utf8 >>
tag!(" ") >>
fields: body_fields >>
ext: body_ext_1part >>
(BodyStructure::Basic {
common: BodyContentCommon {
ty: ContentType {
ty: media_type,
subtype: media_subtype,
params: fields.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
other: BodyContentSinglePart {
id: fields.id,
md5: ext.md5,
octets: fields.octets,
description: fields.description,
transfer_encoding: fields.transfer_encoding,
},
extension: ext.extension,
})
));
named!(body_type_text<BodyStructure>, do_parse!(
tag_no_case!("\"TEXT\"") >>
tag!(" ") >>
media_subtype: string_utf8 >>
tag!(" ") >>
fields: body_fields >>
tag!(" ") >>
lines: number >>
ext: body_ext_1part >>
(BodyStructure::Text {
common: BodyContentCommon {
ty: ContentType {
ty: "TEXT",
subtype: media_subtype,
params: fields.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
other: BodyContentSinglePart {
id: fields.id,
md5: ext.md5,
octets: fields.octets,
description: fields.description,
transfer_encoding: fields.transfer_encoding,
},
lines,
extension: ext.extension,
})
));
named!(body_type_message<BodyStructure>, do_parse!(
tag_no_case!("\"MESSAGE\" \"RFC822\"") >>
tag!(" ") >>
fields: body_fields >>
tag!(" ") >>
envelope: envelope >>
tag!(" ") >>
body: body >>
tag!(" ") >>
lines: number >>
ext: body_ext_1part >>
(BodyStructure::Message {
common: BodyContentCommon {
ty: ContentType {
ty: "MESSAGE",
subtype: "RFC822",
params: fields.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
other: BodyContentSinglePart {
id: fields.id,
md5: ext.md5,
octets: fields.octets,
description: fields.description,
transfer_encoding: fields.transfer_encoding,
},
envelope,
body: Box::new(body),
lines,
extension: ext.extension,
})
));
named!(body_type_multipart<BodyStructure>, do_parse!(
bodies: many1!(body) >>
tag!(" ") >>
media_subtype: string_utf8 >>
ext: body_ext_mpart >>
(BodyStructure::Multipart {
common: BodyContentCommon {
ty: ContentType {
ty: "MULTIPART",
subtype: media_subtype,
params: ext.param,
},
disposition: ext.disposition,
language: ext.language,
location: ext.location,
},
bodies,
extension: ext.extension,
})
));
named!(pub(crate) body<BodyStructure>, paren_delimited!(
alt!(body_type_text | body_type_message | body_type_basic | body_type_multipart)
));
named!(pub(crate) msg_att_body_structure<AttributeValue>, do_parse!(
tag_no_case!("BODYSTRUCTURE ") >>
body: body >>
(AttributeValue::BodyStructure(body))
));
#[cfg(test)]
mod tests {
use super::*;
const EMPTY: &[u8] = &[];
// body-fld-param SP body-fld-id SP body-fld-desc SP body-fld-enc SP body-fld-octets
const BODY_FIELDS: &str = r#"("foo" "bar") "id" "desc" "7BIT" 1337"#;
const BODY_FIELD_PARAM_PAIR: (&str, &str) = ("foo", "bar");
const BODY_FIELD_ID: Option<&str> = Some("id");
const BODY_FIELD_DESC: Option<&str> = Some("desc");
const BODY_FIELD_ENC: ContentEncoding = ContentEncoding::SevenBit;
const BODY_FIELD_OCTETS: u32 = 1337;
fn mock_body_text() -> (String, BodyStructure<'static>) {
(
format!(r#"("TEXT" "PLAIN" {} 42)"#, BODY_FIELDS),
BodyStructure::Text {
common: BodyContentCommon {
ty: ContentType {
ty: "TEXT",
subtype: "PLAIN",
params: Some(vec![BODY_FIELD_PARAM_PAIR]),
},
disposition: None,
language: None,
location: None,
},
other: BodyContentSinglePart {
md5: None,
transfer_encoding: BODY_FIELD_ENC,
octets: BODY_FIELD_OCTETS,
id: BODY_FIELD_ID,
description: BODY_FIELD_DESC,
},
lines: 42,
extension: None,
}
)
}
#[test]
fn test_body_param_data() {
assert_matches!(
body_param(br#"NIL"#),
Ok((EMPTY, None))
);
assert_matches!(
body_param(br#"("foo" "bar")"#),
Ok((EMPTY, Some(param))) => {
assert_eq!(param, vec![("foo", "bar")]);
}
);
}
#[test]
fn test_body_lang_data() {
assert_matches!(
body_lang(br#""bob""#),
Ok((EMPTY, Some(langs))) => {
assert_eq!(langs, vec!["bob"]);
}
);
assert_matches!(
body_lang(br#"("one" "two")"#),
Ok((EMPTY, Some(langs))) => {
assert_eq!(langs, vec!["one", "two"]);
}
);
assert_matches!(
body_lang(br#"NIL"#),
Ok((EMPTY, None))
);
}
#[test]
fn test_body_extension_data() {
assert_matches!(
body_extension(br#""blah""#),
Ok((EMPTY, BodyExtension::Str(Some("blah"))))
);
assert_matches!(
body_extension(br#"NIL"#),
Ok((EMPTY, BodyExtension::Str(None)))
);
assert_matches!(
body_extension(br#"("hello")"#),
Ok((EMPTY, BodyExtension::List(list))) => {
assert_eq!(list, vec![BodyExtension::Str(Some("hello"))]);
}
);
assert_matches!(
body_extension(br#"(1337)"#),
Ok((EMPTY, BodyExtension::List(list))) => {
assert_eq!(list, vec![BodyExtension::Num(1337)]);
}
);
}
#[test]
fn test_body_disposition_data() {
assert_matches!(
body_disposition(br#"NIL"#),
Ok((EMPTY, None))
);
assert_matches!(
body_disposition(br#"("attachment" ("FILENAME" "pages.pdf"))"#),
Ok((EMPTY, Some(disposition))) => {
assert_eq!(disposition, ContentDisposition {
ty: "attachment",
params: Some(vec![
("FILENAME", "pages.pdf")
])
});
}
);
}
#[test]
fn test_body_structure_text() {
let (body_str, body_struct) = mock_body_text();
assert_matches!(
body(body_str.as_bytes()),
Ok((_, text)) => {
assert_eq!(text, body_struct);
}
);
}
#[test]
fn test_body_structure_text_with_ext() {
let body_str = format!(r#"("TEXT" "PLAIN" {} 42 NIL NIL NIL NIL)"#, BODY_FIELDS);
let (_, text_body_struct) = mock_body_text();
assert_matches!(
body(body_str.as_bytes()),
Ok((_, text)) => {
assert_eq!(text, text_body_struct)
}
);
}
#[test]
fn test_body_structure_basic() {
const BODY: &[u8] = br#"("APPLICATION" "PDF" ("NAME" "pages.pdf") NIL NIL "BASE64" 38838 NIL ("attachment" ("FILENAME" "pages.pdf")) NIL NIL)"#;
assert_matches!(
body(BODY),
Ok((_, basic)) => {
assert_eq!(basic, BodyStructure::Basic {
common: BodyContentCommon {
ty: ContentType {
ty: "APPLICATION",
subtype: "PDF",
params: Some(vec![("NAME", "pages.pdf")])
},
disposition: Some(ContentDisposition {
ty: "attachment",
params: Some(vec![("FILENAME", "pages.pdf")])
}),
language: None,
location: None,
},
other: BodyContentSinglePart {
transfer_encoding: ContentEncoding::Base64,
octets: 38838,
id: None,
md5: None,
description: None,
},
extension: None,
})
}
);
}
#[test]
fn test_body_structure_message() {
let (text_body_str, _) = mock_body_text();
let envelope_str = r#"("Wed, 17 Jul 1996 02:23:25 -0700 (PDT)" "IMAP4rev1 WG mtg summary and minutes" (("Terry Gray" NIL "gray" "cac.washington.edu")) (("Terry Gray" NIL "gray" "cac.washington.edu")) (("Terry Gray" NIL "gray" "cac.washington.edu")) ((NIL NIL "imap" "cac.washington.edu")) ((NIL NIL "minutes" "CNRI.Reston.VA.US") ("John Klensin" NIL "KLENSIN" "MIT.EDU")) NIL NIL "<B27397-0100000@cac.washington.edu>")"#;
let body_str = format!(r#"("MESSAGE" "RFC822" {} {} {} 42)"#, BODY_FIELDS, envelope_str, text_body_str);
assert_matches!(
body(body_str.as_bytes()),
Ok((_, BodyStructure::Message { .. }))
);
}
#[test]
fn test_body_structure_multipart() {
let (text_body_str1, text_body_struct1) = mock_body_text();
let (text_body_str2, text_body_struct2) = mock_body_text();
let body_str = format!(
r#"({}{} "ALTERNATIVE" NIL NIL NIL NIL)"#,
text_body_str1, text_body_str2
);
assert_matches!(
body(body_str.as_bytes()),
Ok((_, multipart)) => {
assert_eq!(multipart, BodyStructure::Multipart {
common: BodyContentCommon {
ty: ContentType {
ty: "MULTIPART",
subtype: "ALTERNATIVE",
params: None
},
language: None,
location: None,
disposition: None,
},
bodies: vec![
text_body_struct1,
text_body_struct2,
],
extension: None
});
}
);
}
}
|
use std::mem;
use std::sync::Arc;
use std::time::{Duration, Instant};
use node::{EntryId, Node, NodeContext, NodeError};
use futures::future::{self, Future};
use futures::sync::oneshot;
use parking_lot::Mutex;
use boxfuture::{BoxFuture, Boxable};
///
/// A token that uniquely identifies one run of a Node in the Graph. Each run of a Node (via
/// `N::Context::spawn`) has a different RunToken associated with it. When a run completes, if
/// the current RunToken of its Node no longer matches the RunToken of the spawned work (because
/// the Node was `cleared`), the work is discarded. See `Entry::complete` for more information.
///
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) struct RunToken(u32);
impl RunToken {
fn initial() -> RunToken {
RunToken(0)
}
fn next(self) -> RunToken {
RunToken(self.0 + 1)
}
}
///
/// A token associated with a Node that is incremented whenever its output value has (or might
/// have) changed. When a dependent consumes a dependency at a particular generation, that
/// generation is recorded on the consuming edge, and can later used to determine whether the
/// inputs to a node have changed.
///
/// Unlike the RunToken (which is incremented whenever a node re-runs), the Generation is only
/// incremented when the output of a node has changed.
///
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) struct Generation(u32);
impl Generation {
fn initial() -> Generation {
Generation(0)
}
fn next(self) -> Generation {
Generation(self.0 + 1)
}
}
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
pub(crate) enum EntryState<N: Node> {
// A node that has either been explicitly cleared, or has not yet started Running. In this state
// there is no need for a dirty bit because the RunToken is either in its initial state, or has
// been explicitly incremented when the node was cleared.
//
// The previous_result value is _not_ a valid value for this Entry: rather, it is preserved in
// order to compute the generation value for this Node by comparing it to the new result the next
// time the Node runs.
NotStarted {
run_token: RunToken,
generation: Generation,
previous_result: Option<Result<N::Item, N::Error>>,
},
// A node that is running. A running node that has been marked dirty re-runs rather than
// completing.
//
// The `previous_result` value for a Running node is not a valid value. See NotStarted.
Running {
run_token: RunToken,
generation: Generation,
start_time: Instant,
waiters: Vec<oneshot::Sender<Result<(N::Item, Generation), N::Error>>>,
previous_result: Option<Result<N::Item, N::Error>>,
dirty: bool,
},
// A node that has completed, and then possibly been marked dirty. Because marking a node
// dirty does not eagerly re-execute any logic, it will stay this way until a caller moves it
// back to Running.
Completed {
run_token: RunToken,
generation: Generation,
result: Result<N::Item, N::Error>,
dep_generations: Vec<Generation>,
dirty: bool,
},
}
impl<N: Node> EntryState<N> {
fn initial() -> EntryState<N> {
EntryState::NotStarted {
run_token: RunToken::initial(),
generation: Generation::initial(),
previous_result: None,
}
}
}
///
/// Because there are guaranteed to be more edges than nodes in Graphs, we mark cyclic
/// dependencies via a wrapper around the Node (rather than adding a byte to every
/// valid edge).
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub(crate) enum EntryKey<N: Node> {
Valid(N),
Cyclic(N),
}
impl<N: Node> EntryKey<N> {
pub(crate) fn content(&self) -> &N {
match self {
&EntryKey::Valid(ref v) => v,
&EntryKey::Cyclic(ref v) => v,
}
}
}
///
/// An Entry and its adjacencies.
///
#[derive(Clone)]
pub struct Entry<N: Node> {
// TODO: This is a clone of the Node, which is also kept in the `nodes` map. It would be
// nice to avoid keeping two copies of each Node, but tracking references between the two
// maps is painful.
node: EntryKey<N>,
state: Arc<Mutex<EntryState<N>>>,
}
impl<N: Node> Entry<N> {
///
/// Creates an Entry without starting it. This indirection exists because we cannot know
/// the EntryId of an Entry until after it is stored in the Graph, and we need the EntryId
/// in order to run the Entry.
///
pub(crate) fn new(node: EntryKey<N>) -> Entry<N> {
Entry {
node: node,
state: Arc::new(Mutex::new(EntryState::initial())),
}
}
pub fn node(&self) -> &N {
self.node.content()
}
///
/// If the Future for this Node has already completed, returns a clone of its result.
///
pub fn peek(&self) -> Option<Result<N::Item, N::Error>> {
let state = self.state.lock();
match *state {
EntryState::Completed {
ref result, dirty, ..
}
if !dirty =>
{
Some(result.clone())
}
_ => None,
}
}
///
/// Spawn the execution of the node on an Executor, which will cause it to execute outside of
/// the Graph lock and call back into the graph lock to set the final value.
///
pub(crate) fn run<C>(
context_factory: &C,
entry_key: &EntryKey<N>,
entry_id: EntryId,
run_token: RunToken,
generation: Generation,
previous_dep_generations: Option<Vec<Generation>>,
previous_result: Option<Result<N::Item, N::Error>>,
) -> EntryState<N>
where
C: NodeContext<Node = N>,
{
// Increment the RunToken to uniquely identify this work.
let run_token = run_token.next();
match entry_key {
&EntryKey::Valid(ref n) => {
let context = context_factory.clone_for(entry_id);
let node = n.clone();
context_factory.spawn(future::lazy(move || {
// If we have previous result generations, compare them to all current dependency
// generations (which, if they are dirty, will cause recursive cleaning). If they
// match, we can consider the previous result value to be clean for reuse.
let was_clean = if let Some(previous_dep_generations) = previous_dep_generations {
let context2 = context.clone();
context
.graph()
.dep_generations(entry_id, &context)
.then(move |generation_res| match generation_res {
Ok(ref dep_generations) if dep_generations == &previous_dep_generations => {
// Dependencies have not changed: Node is clean.
Ok(true)
}
_ => {
// If dependency generations mismatched or failed to fetch, clear its
// dependencies and indicate that it should re-run.
context2.graph().clear_deps(entry_id, run_token);
Ok(false)
}
}).to_boxed()
} else {
future::ok(false).to_boxed()
};
// If the Node was clean, complete it. Otherwise, re-run.
was_clean.and_then(move |was_clean| {
if was_clean {
// No dependencies have changed: we can complete the Node without changing its
// previous_result or generation.
context
.graph()
.complete(&context, entry_id, run_token, None);
future::ok(()).to_boxed()
} else {
// The Node needs to (re-)run!
let context2 = context.clone();
node
.run(context)
.then(move |res| {
context2
.graph()
.complete(&context2, entry_id, run_token, Some(res));
Ok(())
}).to_boxed()
}
})
}));
EntryState::Running {
waiters: Vec::new(),
start_time: Instant::now(),
run_token,
generation,
previous_result,
dirty: false,
}
}
&EntryKey::Cyclic(_) => EntryState::Completed {
result: Err(N::Error::cyclic()),
dep_generations: Vec::new(),
run_token,
generation,
dirty: false,
},
}
}
///
/// Returns a Future for the Node's value and Generation.
///
/// The two separate state matches handle two cases: in the first case we simply want to mutate
/// or clone the state, so we take it by reference without swapping it. In the second case, we
/// need to consume the state (which avoids cloning some of the values held there), so we take it
/// by value.
///
pub(crate) fn get<C>(
&mut self,
context: &C,
entry_id: EntryId,
) -> BoxFuture<(N::Item, Generation), N::Error>
where
C: NodeContext<Node = N>,
{
{
let mut state = self.state.lock();
// First check whether the Node is already complete, or is currently running: in both of these
// cases we don't swap the state of the Node.
match &mut *state {
&mut EntryState::Running {
ref mut waiters, ..
} => {
let (send, recv) = oneshot::channel();
waiters.push(send);
return recv
.map_err(|_| N::Error::invalidated())
.flatten()
.to_boxed();
}
&mut EntryState::Completed {
ref result,
generation,
dirty,
..
}
if !dirty =>
{
return future::result(result.clone())
.map(move |res| (res, generation))
.to_boxed();
}
_ => {
// Fall through to the second match.
}
};
// Otherwise, we'll need to swap the state of the Node, so take it by value.
let next_state = match mem::replace(&mut *state, EntryState::initial()) {
EntryState::NotStarted {
run_token,
generation,
previous_result,
} => Self::run(
context,
&self.node,
entry_id,
run_token,
generation,
None,
previous_result,
),
EntryState::Completed {
run_token,
generation,
result,
dep_generations,
dirty,
} => {
assert!(
dirty,
"A clean Node should not reach this point: {:?}",
result
);
// The Node has already completed but is now marked dirty. This indicates that we are the
// first caller to request it since it was marked dirty. We attempt to clean it (which will
// cause it to re-run if the dep_generations mismatch).
Self::run(
context,
&self.node,
entry_id,
run_token,
generation,
Some(dep_generations),
Some(result),
)
}
EntryState::Running { .. } => {
panic!("A Running Node should not reach this point.");
}
};
// Swap in the new state and then recurse.
*state = next_state;
}
self.get(context, entry_id)
}
///
/// Called from the Executor when a Node completes.
///
/// A `result` value of `None` indicates that the Node was found to be clean, and its previous
/// result should be used. This special case exists to avoid 1) cloning the result to call this
/// method, and 2) comparing the current/previous results unnecessarily.
///
/// Takes a &mut InnerGraph to ensure that completing nodes doesn't race with dirtying them.
/// The important relationship being guaranteed here is that if the Graph is calling
/// invalidate_from_roots, it may mark us, or our dependencies, as dirty. We don't want to
/// complete _while_ a batch of nodes are being marked as dirty, and this exclusive access ensures
/// that can't happen.
///
pub(crate) fn complete<C>(
&mut self,
context: &C,
entry_id: EntryId,
result_run_token: RunToken,
dep_generations: Vec<Generation>,
result: Option<Result<N::Item, N::Error>>,
_graph: &mut super::InnerGraph<N>,
) where
C: NodeContext<Node = N>,
{
let mut state = self.state.lock();
// We care about exactly one case: a Running state with the same run_token. All other states
// represent various (legal) race conditions. See `RunToken`'s docs for more information.
match *state {
EntryState::Running { run_token, .. } if result_run_token == run_token => {}
_ => {
// We care about exactly one case: a Running state with the same run_token. All other states
// represent various (legal) race conditions.
return;
}
}
*state = match mem::replace(&mut *state, EntryState::initial()) {
EntryState::Running {
waiters,
run_token,
generation,
previous_result,
dirty,
..
} => {
if result == Some(Err(N::Error::invalidated())) {
// Because it is always ephemeral, invalidation is the only type of Err that we do not
// persist in the Graph. Instead, swap the Node to NotStarted to drop all waiters,
// causing them to also experience invalidation (transitively).
EntryState::NotStarted {
run_token: run_token.next(),
generation,
previous_result,
}
} else if dirty {
// The node was dirtied while it was running. The dep_generations and new result cannot
// be trusted and were never published. We continue to use the previous result.
Self::run(
context,
&self.node,
entry_id,
run_token,
generation,
None,
previous_result,
)
} else {
// If the new result does not match the previous result, the generation increments.
let (generation, next_result) = if let Some(result) = result {
if Some(&result) == previous_result.as_ref() {
// Node was re-executed, but had the same result value.
(generation, result)
} else {
(generation.next(), result)
}
} else {
// Node was marked clean.
// NB: The `expect` here avoids a clone and a comparison: see the method docs.
(
generation,
previous_result.expect("A Node cannot be marked clean without a previous result."),
)
};
// Notify all waiters (ignoring any that have gone away), and then store the value.
// A waiter will go away whenever they drop the `Future` `Receiver` of the value, perhaps
// due to failure of another Future in a `join` or `join_all`, or due to a timeout at the
// root of a request.
for waiter in waiters {
let _ = waiter.send(next_result.clone().map(|res| (res, generation)));
}
EntryState::Completed {
result: next_result,
dep_generations,
run_token,
generation,
dirty: false,
}
}
}
s => s,
};
}
///
/// Get the current Generation of this entry.
///
/// TODO: Consider moving the Generation and RunToken out of the EntryState once we decide what
/// we want the per-Entry locking strategy to be.
///
pub(crate) fn generation(&self) -> Generation {
match *self.state.lock() {
EntryState::NotStarted { generation, .. }
| EntryState::Running { generation, .. }
| EntryState::Completed { generation, .. } => generation,
}
}
///
/// Get the current RunToken of this entry.
///
/// TODO: Consider moving the Generation and RunToken out of the EntryState once we decide what
/// we want the per-Entry locking strategy to be.
///
pub(crate) fn run_token(&self) -> RunToken {
match *self.state.lock() {
EntryState::NotStarted { run_token, .. }
| EntryState::Running { run_token, .. }
| EntryState::Completed { run_token, .. } => run_token,
}
}
///
/// If the Node has started and has not yet completed, returns its runtime.
///
pub(crate) fn current_running_duration(&self, now: Instant) -> Option<Duration> {
match *self.state.lock() {
EntryState::Running { start_time, .. } => Some(now.duration_since(start_time)),
_ => None,
}
}
///
/// Clears the state of this Node, forcing it to be recomputed.
///
pub(crate) fn clear(&mut self) {
let mut state = self.state.lock();
let (run_token, generation, previous_result) =
match mem::replace(&mut *state, EntryState::initial()) {
EntryState::NotStarted {
run_token,
generation,
previous_result,
}
| EntryState::Running {
run_token,
generation,
previous_result,
..
} => (run_token, generation, previous_result),
EntryState::Completed {
run_token,
generation,
result,
..
} => (run_token, generation, Some(result)),
};
// Swap in a state with a new RunToken value, which invalidates any outstanding work.
*state = EntryState::NotStarted {
run_token: run_token.next(),
generation,
previous_result,
};
}
///
/// Dirties this Node, which will cause it to examine its dependencies the next time it is
/// requested, and re-run if any of them have changed generations.
///
/// See comment on complete for information about _graph argument.
///
pub(crate) fn dirty(&mut self, _graph: &mut super::InnerGraph<N>) {
match &mut *self.state.lock() {
&mut EntryState::Running { ref mut dirty, .. }
| &mut EntryState::Completed { ref mut dirty, .. } => {
// Mark dirty.
*dirty = true;
}
&mut EntryState::NotStarted { .. } => {}
}
}
pub(crate) fn format(&self) -> String {
let state = match self.peek() {
Some(Ok(ref nr)) => format!("{:?}", nr),
Some(Err(ref x)) => format!("{:?}", x),
None => "<None>".to_string(),
};
format!("{} == {}", self.node.content().format(), state).replace("\"", "\\\"")
}
}
Add bounds checking to Entry::current_running_duration (#6643)
### Problem
As shown in #6640, once we moved `Entry` locks outside of the `Graph` lock in #6095, it became possible for `heavy_hitters` to race against `Nodes` that are starting outside of the `Graph` lock.
### Solution
Add a bounds check for `Instant::duration_since` in `current_running_duration`, to handle the case where a Node is started after we begin computing `heavy_hitters`. While switching to `Instant::elapsed` would "mostly" avoid this case, it would incur additional syscalls, without fully avoiding panics.
### Result
Fixes #6640.
use std::mem;
use std::sync::Arc;
use std::time::{Duration, Instant};
use node::{EntryId, Node, NodeContext, NodeError};
use futures::future::{self, Future};
use futures::sync::oneshot;
use parking_lot::Mutex;
use boxfuture::{BoxFuture, Boxable};
///
/// A token that uniquely identifies one run of a Node in the Graph. Each run of a Node (via
/// `N::Context::spawn`) has a different RunToken associated with it. When a run completes, if
/// the current RunToken of its Node no longer matches the RunToken of the spawned work (because
/// the Node was `cleared`), the work is discarded. See `Entry::complete` for more information.
///
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) struct RunToken(u32);
impl RunToken {
fn initial() -> RunToken {
RunToken(0)
}
fn next(self) -> RunToken {
RunToken(self.0 + 1)
}
}
///
/// A token associated with a Node that is incremented whenever its output value has (or might
/// have) changed. When a dependent consumes a dependency at a particular generation, that
/// generation is recorded on the consuming edge, and can later used to determine whether the
/// inputs to a node have changed.
///
/// Unlike the RunToken (which is incremented whenever a node re-runs), the Generation is only
/// incremented when the output of a node has changed.
///
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) struct Generation(u32);
impl Generation {
fn initial() -> Generation {
Generation(0)
}
fn next(self) -> Generation {
Generation(self.0 + 1)
}
}
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
pub(crate) enum EntryState<N: Node> {
// A node that has either been explicitly cleared, or has not yet started Running. In this state
// there is no need for a dirty bit because the RunToken is either in its initial state, or has
// been explicitly incremented when the node was cleared.
//
// The previous_result value is _not_ a valid value for this Entry: rather, it is preserved in
// order to compute the generation value for this Node by comparing it to the new result the next
// time the Node runs.
NotStarted {
run_token: RunToken,
generation: Generation,
previous_result: Option<Result<N::Item, N::Error>>,
},
// A node that is running. A running node that has been marked dirty re-runs rather than
// completing.
//
// The `previous_result` value for a Running node is not a valid value. See NotStarted.
Running {
run_token: RunToken,
generation: Generation,
start_time: Instant,
waiters: Vec<oneshot::Sender<Result<(N::Item, Generation), N::Error>>>,
previous_result: Option<Result<N::Item, N::Error>>,
dirty: bool,
},
// A node that has completed, and then possibly been marked dirty. Because marking a node
// dirty does not eagerly re-execute any logic, it will stay this way until a caller moves it
// back to Running.
Completed {
run_token: RunToken,
generation: Generation,
result: Result<N::Item, N::Error>,
dep_generations: Vec<Generation>,
dirty: bool,
},
}
impl<N: Node> EntryState<N> {
fn initial() -> EntryState<N> {
EntryState::NotStarted {
run_token: RunToken::initial(),
generation: Generation::initial(),
previous_result: None,
}
}
}
///
/// Because there are guaranteed to be more edges than nodes in Graphs, we mark cyclic
/// dependencies via a wrapper around the Node (rather than adding a byte to every
/// valid edge).
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub(crate) enum EntryKey<N: Node> {
Valid(N),
Cyclic(N),
}
impl<N: Node> EntryKey<N> {
pub(crate) fn content(&self) -> &N {
match self {
&EntryKey::Valid(ref v) => v,
&EntryKey::Cyclic(ref v) => v,
}
}
}
///
/// An Entry and its adjacencies.
///
#[derive(Clone)]
pub struct Entry<N: Node> {
// TODO: This is a clone of the Node, which is also kept in the `nodes` map. It would be
// nice to avoid keeping two copies of each Node, but tracking references between the two
// maps is painful.
node: EntryKey<N>,
state: Arc<Mutex<EntryState<N>>>,
}
impl<N: Node> Entry<N> {
///
/// Creates an Entry without starting it. This indirection exists because we cannot know
/// the EntryId of an Entry until after it is stored in the Graph, and we need the EntryId
/// in order to run the Entry.
///
pub(crate) fn new(node: EntryKey<N>) -> Entry<N> {
Entry {
node: node,
state: Arc::new(Mutex::new(EntryState::initial())),
}
}
pub fn node(&self) -> &N {
self.node.content()
}
///
/// If the Future for this Node has already completed, returns a clone of its result.
///
pub fn peek(&self) -> Option<Result<N::Item, N::Error>> {
let state = self.state.lock();
match *state {
EntryState::Completed {
ref result, dirty, ..
}
if !dirty =>
{
Some(result.clone())
}
_ => None,
}
}
///
/// Spawn the execution of the node on an Executor, which will cause it to execute outside of
/// the Graph lock and call back into the graph lock to set the final value.
///
pub(crate) fn run<C>(
context_factory: &C,
entry_key: &EntryKey<N>,
entry_id: EntryId,
run_token: RunToken,
generation: Generation,
previous_dep_generations: Option<Vec<Generation>>,
previous_result: Option<Result<N::Item, N::Error>>,
) -> EntryState<N>
where
C: NodeContext<Node = N>,
{
// Increment the RunToken to uniquely identify this work.
let run_token = run_token.next();
match entry_key {
&EntryKey::Valid(ref n) => {
let context = context_factory.clone_for(entry_id);
let node = n.clone();
context_factory.spawn(future::lazy(move || {
// If we have previous result generations, compare them to all current dependency
// generations (which, if they are dirty, will cause recursive cleaning). If they
// match, we can consider the previous result value to be clean for reuse.
let was_clean = if let Some(previous_dep_generations) = previous_dep_generations {
let context2 = context.clone();
context
.graph()
.dep_generations(entry_id, &context)
.then(move |generation_res| match generation_res {
Ok(ref dep_generations) if dep_generations == &previous_dep_generations => {
// Dependencies have not changed: Node is clean.
Ok(true)
}
_ => {
// If dependency generations mismatched or failed to fetch, clear its
// dependencies and indicate that it should re-run.
context2.graph().clear_deps(entry_id, run_token);
Ok(false)
}
}).to_boxed()
} else {
future::ok(false).to_boxed()
};
// If the Node was clean, complete it. Otherwise, re-run.
was_clean.and_then(move |was_clean| {
if was_clean {
// No dependencies have changed: we can complete the Node without changing its
// previous_result or generation.
context
.graph()
.complete(&context, entry_id, run_token, None);
future::ok(()).to_boxed()
} else {
// The Node needs to (re-)run!
let context2 = context.clone();
node
.run(context)
.then(move |res| {
context2
.graph()
.complete(&context2, entry_id, run_token, Some(res));
Ok(())
}).to_boxed()
}
})
}));
EntryState::Running {
waiters: Vec::new(),
start_time: Instant::now(),
run_token,
generation,
previous_result,
dirty: false,
}
}
&EntryKey::Cyclic(_) => EntryState::Completed {
result: Err(N::Error::cyclic()),
dep_generations: Vec::new(),
run_token,
generation,
dirty: false,
},
}
}
///
/// Returns a Future for the Node's value and Generation.
///
/// The two separate state matches handle two cases: in the first case we simply want to mutate
/// or clone the state, so we take it by reference without swapping it. In the second case, we
/// need to consume the state (which avoids cloning some of the values held there), so we take it
/// by value.
///
pub(crate) fn get<C>(
&mut self,
context: &C,
entry_id: EntryId,
) -> BoxFuture<(N::Item, Generation), N::Error>
where
C: NodeContext<Node = N>,
{
{
let mut state = self.state.lock();
// First check whether the Node is already complete, or is currently running: in both of these
// cases we don't swap the state of the Node.
match &mut *state {
&mut EntryState::Running {
ref mut waiters, ..
} => {
let (send, recv) = oneshot::channel();
waiters.push(send);
return recv
.map_err(|_| N::Error::invalidated())
.flatten()
.to_boxed();
}
&mut EntryState::Completed {
ref result,
generation,
dirty,
..
}
if !dirty =>
{
return future::result(result.clone())
.map(move |res| (res, generation))
.to_boxed();
}
_ => {
// Fall through to the second match.
}
};
// Otherwise, we'll need to swap the state of the Node, so take it by value.
let next_state = match mem::replace(&mut *state, EntryState::initial()) {
EntryState::NotStarted {
run_token,
generation,
previous_result,
} => Self::run(
context,
&self.node,
entry_id,
run_token,
generation,
None,
previous_result,
),
EntryState::Completed {
run_token,
generation,
result,
dep_generations,
dirty,
} => {
assert!(
dirty,
"A clean Node should not reach this point: {:?}",
result
);
// The Node has already completed but is now marked dirty. This indicates that we are the
// first caller to request it since it was marked dirty. We attempt to clean it (which will
// cause it to re-run if the dep_generations mismatch).
Self::run(
context,
&self.node,
entry_id,
run_token,
generation,
Some(dep_generations),
Some(result),
)
}
EntryState::Running { .. } => {
panic!("A Running Node should not reach this point.");
}
};
// Swap in the new state and then recurse.
*state = next_state;
}
self.get(context, entry_id)
}
///
/// Called from the Executor when a Node completes.
///
/// A `result` value of `None` indicates that the Node was found to be clean, and its previous
/// result should be used. This special case exists to avoid 1) cloning the result to call this
/// method, and 2) comparing the current/previous results unnecessarily.
///
/// Takes a &mut InnerGraph to ensure that completing nodes doesn't race with dirtying them.
/// The important relationship being guaranteed here is that if the Graph is calling
/// invalidate_from_roots, it may mark us, or our dependencies, as dirty. We don't want to
/// complete _while_ a batch of nodes are being marked as dirty, and this exclusive access ensures
/// that can't happen.
///
pub(crate) fn complete<C>(
&mut self,
context: &C,
entry_id: EntryId,
result_run_token: RunToken,
dep_generations: Vec<Generation>,
result: Option<Result<N::Item, N::Error>>,
_graph: &mut super::InnerGraph<N>,
) where
C: NodeContext<Node = N>,
{
let mut state = self.state.lock();
// We care about exactly one case: a Running state with the same run_token. All other states
// represent various (legal) race conditions. See `RunToken`'s docs for more information.
match *state {
EntryState::Running { run_token, .. } if result_run_token == run_token => {}
_ => {
// We care about exactly one case: a Running state with the same run_token. All other states
// represent various (legal) race conditions.
return;
}
}
*state = match mem::replace(&mut *state, EntryState::initial()) {
EntryState::Running {
waiters,
run_token,
generation,
previous_result,
dirty,
..
} => {
if result == Some(Err(N::Error::invalidated())) {
// Because it is always ephemeral, invalidation is the only type of Err that we do not
// persist in the Graph. Instead, swap the Node to NotStarted to drop all waiters,
// causing them to also experience invalidation (transitively).
EntryState::NotStarted {
run_token: run_token.next(),
generation,
previous_result,
}
} else if dirty {
// The node was dirtied while it was running. The dep_generations and new result cannot
// be trusted and were never published. We continue to use the previous result.
Self::run(
context,
&self.node,
entry_id,
run_token,
generation,
None,
previous_result,
)
} else {
// If the new result does not match the previous result, the generation increments.
let (generation, next_result) = if let Some(result) = result {
if Some(&result) == previous_result.as_ref() {
// Node was re-executed, but had the same result value.
(generation, result)
} else {
(generation.next(), result)
}
} else {
// Node was marked clean.
// NB: The `expect` here avoids a clone and a comparison: see the method docs.
(
generation,
previous_result.expect("A Node cannot be marked clean without a previous result."),
)
};
// Notify all waiters (ignoring any that have gone away), and then store the value.
// A waiter will go away whenever they drop the `Future` `Receiver` of the value, perhaps
// due to failure of another Future in a `join` or `join_all`, or due to a timeout at the
// root of a request.
for waiter in waiters {
let _ = waiter.send(next_result.clone().map(|res| (res, generation)));
}
EntryState::Completed {
result: next_result,
dep_generations,
run_token,
generation,
dirty: false,
}
}
}
s => s,
};
}
///
/// Get the current Generation of this entry.
///
/// TODO: Consider moving the Generation and RunToken out of the EntryState once we decide what
/// we want the per-Entry locking strategy to be.
///
pub(crate) fn generation(&self) -> Generation {
match *self.state.lock() {
EntryState::NotStarted { generation, .. }
| EntryState::Running { generation, .. }
| EntryState::Completed { generation, .. } => generation,
}
}
///
/// Get the current RunToken of this entry.
///
/// TODO: Consider moving the Generation and RunToken out of the EntryState once we decide what
/// we want the per-Entry locking strategy to be.
///
pub(crate) fn run_token(&self) -> RunToken {
match *self.state.lock() {
EntryState::NotStarted { run_token, .. }
| EntryState::Running { run_token, .. }
| EntryState::Completed { run_token, .. } => run_token,
}
}
///
/// If the Node has started and has not yet completed, returns its runtime.
///
pub(crate) fn current_running_duration(&self, now: Instant) -> Option<Duration> {
match *self.state.lock() {
EntryState::Running { start_time, .. } =>
// NB: `Instant::duration_since` panics if the end time is before the start time, which can
// happen when starting a Node races against a caller creating their Instant.
{
Some(if start_time < now {
now.duration_since(start_time)
} else {
Duration::from_secs(0)
})
}
_ => None,
}
}
///
/// Clears the state of this Node, forcing it to be recomputed.
///
pub(crate) fn clear(&mut self) {
let mut state = self.state.lock();
let (run_token, generation, previous_result) =
match mem::replace(&mut *state, EntryState::initial()) {
EntryState::NotStarted {
run_token,
generation,
previous_result,
}
| EntryState::Running {
run_token,
generation,
previous_result,
..
} => (run_token, generation, previous_result),
EntryState::Completed {
run_token,
generation,
result,
..
} => (run_token, generation, Some(result)),
};
// Swap in a state with a new RunToken value, which invalidates any outstanding work.
*state = EntryState::NotStarted {
run_token: run_token.next(),
generation,
previous_result,
};
}
///
/// Dirties this Node, which will cause it to examine its dependencies the next time it is
/// requested, and re-run if any of them have changed generations.
///
/// See comment on complete for information about _graph argument.
///
pub(crate) fn dirty(&mut self, _graph: &mut super::InnerGraph<N>) {
match &mut *self.state.lock() {
&mut EntryState::Running { ref mut dirty, .. }
| &mut EntryState::Completed { ref mut dirty, .. } => {
// Mark dirty.
*dirty = true;
}
&mut EntryState::NotStarted { .. } => {}
}
}
pub(crate) fn format(&self) -> String {
let state = match self.peek() {
Some(Ok(ref nr)) => format!("{:?}", nr),
Some(Err(ref x)) => format!("{:?}", x),
None => "<None>".to_string(),
};
format!("{} == {}", self.node.content().format(), state).replace("\"", "\\\"")
}
}
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
// #[PerformanceCriticalPath]
use std::cmp::PartialOrd;
use std::collections::VecDeque;
use std::ops::{Add, AddAssign, Sub, SubAssign};
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::mpsc::{self, Receiver, RecvTimeoutError, SyncSender};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::u64;
use collections::HashMap;
use engine_rocks::FlowInfo;
use engine_traits::{CFNamesExt, FlowControlFactorsExt};
use num_traits::cast::{AsPrimitive, FromPrimitive};
use rand::Rng;
use tikv_util::time::{Instant, Limiter};
use crate::storage::config::FlowControlConfig;
use crate::storage::metrics::*;
const TICK_DURATION: Duration = Duration::from_millis(1000);
const RATIO_SCALE_FACTOR: u32 = 10_000_000;
const K_INC_SLOWDOWN_RATIO: f64 = 0.8;
const K_DEC_SLOWDOWN_RATIO: f64 = 1.0 / K_INC_SLOWDOWN_RATIO;
const MIN_THROTTLE_SPEED: f64 = 16.0 * 1024.0; // 16KB
const MAX_THROTTLE_SPEED: f64 = 200.0 * 1024.0 * 1024.0; // 200MB
const EMA_FACTOR: f64 = 0.6; // EMA stands for Exponential Moving Average
#[derive(Eq, PartialEq, Debug)]
enum Trend {
Increasing,
Decreasing,
NoTrend,
}
/// Flow controller is used to throttle the write rate at scheduler level, aiming
/// to substitute the write stall mechanism of RocksDB. It features in two points:
/// * throttle at scheduler, so raftstore and apply won't be blocked anymore
/// * better control on the throttle rate to avoid QPS drop under heavy write
///
/// When write stall happens, the max speed of write rate max_delayed_write_rate
/// is limited to 16MB/s by default which doesn't take real disk ability into
/// account. It may underestimate the disk's throughout that 16MB/s is too small
/// at once, causing a very large jitter on the write duration.
/// Also, it decreases the delayed write rate further if the factors still exceed
/// the threshold. So under heavy write load, the write rate may be throttled to
/// a very low rate from time to time, causing QPS drop eventually.
///
/// For compaction pending bytes, we use discardable ratio to do flow control
/// which is separated mechanism from throttle speed. Compaction pending bytes is
/// a approximate value, usually, changes up and down dramatically, so it's unwise
/// to map compaction pending bytes to a specified throttle speed. Instead,
/// mapping it from soft limit to hard limit as 0% to 100% discardable ratio. With
/// this, there must be a point that foreground write rate is equal to the
/// background compaction pending bytes consuming rate so that compaction pending
/// bytes is kept around a steady level.
///
/// Here is a brief flow showing where the mechanism works:
/// grpc -> check should drop(discardable ratio) -> limiter -> async write to raftstore
pub struct FlowController {
discard_ratio: Arc<AtomicU32>,
limiter: Arc<Limiter>,
enabled: Arc<AtomicBool>,
tx: Option<SyncSender<Msg>>,
handle: Option<std::thread::JoinHandle<()>>,
}
enum Msg {
Close,
Enable,
Disable,
}
impl Drop for FlowController {
fn drop(&mut self) {
let h = self.handle.take();
if h.is_none() {
return;
}
if let Some(Err(e)) = self.tx.as_ref().map(|tx| tx.send(Msg::Close)) {
error!("send quit message for flow controller failed"; "err" => ?e);
return;
}
if let Err(e) = h.unwrap().join() {
error!("join flow controller failed"; "err" => ?e);
return;
}
}
}
impl FlowController {
// only for test
pub fn empty() -> Self {
Self {
discard_ratio: Arc::new(AtomicU32::new(0)),
limiter: Arc::new(Limiter::new(f64::INFINITY)),
enabled: Arc::new(AtomicBool::new(false)),
tx: None,
handle: None,
}
}
pub fn new<E: CFNamesExt + FlowControlFactorsExt + Send + 'static>(
config: &FlowControlConfig,
engine: E,
flow_info_receiver: Receiver<FlowInfo>,
) -> Self {
let limiter = Arc::new(
<Limiter>::builder(f64::INFINITY)
.refill(Duration::from_millis(1))
.build(),
);
let discard_ratio = Arc::new(AtomicU32::new(0));
let checker = FlowChecker::new(config, engine, discard_ratio.clone(), limiter.clone());
let (tx, rx) = mpsc::sync_channel(5);
tx.send(if config.enable {
Msg::Enable
} else {
Msg::Disable
})
.unwrap();
Self {
discard_ratio,
limiter,
enabled: Arc::new(AtomicBool::new(config.enable)),
tx: Some(tx),
handle: Some(checker.start(rx, flow_info_receiver)),
}
}
pub fn should_drop(&self) -> bool {
let ratio = self.discard_ratio.load(Ordering::Relaxed);
let mut rng = rand::thread_rng();
rng.gen_ratio(ratio, RATIO_SCALE_FACTOR)
}
#[cfg(test)]
pub fn discard_ratio(&self) -> f64 {
self.discard_ratio.load(Ordering::Relaxed) as f64 / RATIO_SCALE_FACTOR as f64
}
pub fn consume(&self, bytes: usize) -> Duration {
self.limiter.consume_duration(bytes)
}
pub fn unconsume(&self, bytes: usize) {
self.limiter.unconsume(bytes);
}
#[cfg(test)]
pub fn total_bytes_consumed(&self) -> usize {
self.limiter.total_bytes_consumed()
}
pub fn enable(&self, enable: bool) {
self.enabled.store(enable, Ordering::Relaxed);
if let Some(tx) = &self.tx {
if enable {
tx.send(Msg::Enable).unwrap();
} else {
tx.send(Msg::Disable).unwrap();
}
}
}
pub fn enabled(&self) -> bool {
self.enabled.load(Ordering::Relaxed)
}
#[cfg(test)]
pub fn set_speed_limit(&self, speed_limit: f64) {
self.limiter.set_speed_limit(speed_limit);
}
pub fn is_unlimited(&self) -> bool {
self.limiter.speed_limit() == f64::INFINITY
}
}
const SMOOTHER_STALE_RECORD_THRESHOLD: f64 = 300.0; // 5min
// Smoother is a sliding window used to provide steadier flow statistics.
struct Smoother<T, const CAP: usize>
where
T: Default
+ Add<Output = T>
+ Sub<Output = T>
+ AddAssign
+ SubAssign
+ PartialOrd
+ AsPrimitive<f64>
+ FromPrimitive,
{
records: VecDeque<(T, Instant)>,
total: T,
}
impl<T, const CAP: usize> Default for Smoother<T, CAP>
where
T: Default
+ Add<Output = T>
+ Sub<Output = T>
+ AddAssign
+ SubAssign
+ PartialOrd
+ AsPrimitive<f64>
+ FromPrimitive,
{
fn default() -> Self {
Self {
records: VecDeque::with_capacity(CAP),
total: Default::default(),
}
}
}
impl<T, const CAP: usize> Smoother<T, CAP>
where
T: Default
+ Add<Output = T>
+ Sub<Output = T>
+ AddAssign
+ SubAssign
+ PartialOrd
+ AsPrimitive<f64>
+ FromPrimitive,
{
pub fn observe(&mut self, record: T) {
if self.records.len() == CAP {
let v = self.records.pop_front().unwrap().0;
self.total -= v;
}
self.total += record;
self.records.push_back((record, Instant::now_coarse()));
self.remove_stale_records();
}
fn remove_stale_records(&mut self) {
// make sure there are two records left at least
while self.records.len() > 2 {
if self.records.front().unwrap().1.saturating_elapsed_secs()
> SMOOTHER_STALE_RECORD_THRESHOLD
{
let v = self.records.pop_front().unwrap().0;
self.total -= v;
} else {
break;
}
}
}
pub fn get_recent(&self) -> T {
if self.records.is_empty() {
return T::default();
}
self.records.back().unwrap().0
}
pub fn get_avg(&self) -> f64 {
if self.records.is_empty() {
return 0.0;
}
self.total.as_() / self.records.len() as f64
}
pub fn get_max(&self) -> T {
if self.records.is_empty() {
return T::default();
}
self.records
.iter()
.max_by(|a, b| a.0.partial_cmp(&b.0).unwrap())
.unwrap()
.0
}
pub fn get_percentile_90(&mut self) -> T {
if self.records.is_empty() {
return FromPrimitive::from_u64(0).unwrap();
}
let mut v: Vec<_> = self.records.iter().collect();
v.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
v[((self.records.len() - 1) as f64 * 0.90) as usize].0
}
pub fn trend(&self) -> Trend {
if self.records.len() <= 1 {
return Trend::NoTrend;
}
// calculate the average of left and right parts
let half = self.records.len() / 2;
let mut left = T::default();
let mut right = T::default();
for (i, r) in self.records.iter().enumerate() {
if i < half {
left += r.0;
} else if self.records.len() - i - 1 < half {
right += r.0;
}
}
// decide if there is a trend by the two averages
// adding 2 here is to give a tolerance
if right > left + FromPrimitive::from_u64(2).unwrap() {
Trend::Increasing
} else if left > right + FromPrimitive::from_u64(2).unwrap() {
Trend::Decreasing
} else {
Trend::NoTrend
}
}
}
// CFFlowChecker records some statistics and states related to one CF.
// These statistics fall into five categories:
// * memtable
// * L0 files
// * L0 production flow (flush flow)
// * L0 consumption flow (compaction read flow of L0)
// * pending compaction bytes
// And all of them are collected from the hook of RocksDB's event listener.
struct CFFlowChecker {
// Memtable related
last_num_memtables: Smoother<u64, 20>,
memtable_debt: f64,
memtable_init_speed: bool,
// L0 files related
// a few records of number of L0 files right after flush or L0 compaction
// As we know, after flush the number of L0 files must increase by 1,
// whereas, after L0 compaction the number of L0 files must decrease a lot
// considering L0 compactions nearly includes all L0 files in a round.
// So to evaluate the accumulation of L0 files, here only records the number
// of L0 files right after L0 compactions.
long_term_num_l0_files: Smoother<u64, 20>,
// L0 production flow related
last_flush_bytes: u64,
last_flush_bytes_time: Instant,
short_term_l0_production_flow: Smoother<u64, 10>,
// L0 consumption flow related
last_l0_bytes: u64,
last_l0_bytes_time: Instant,
short_term_l0_consumption_flow: Smoother<u64, 3>,
// Pending compaction bytes related
long_term_pending_bytes: Smoother<f64, 60>,
pending_bytes_before_unsafe_destroy_range: Option<f64>,
// On start related markers. Because after restart, the memtable, l0 files
// and compaction pending bytes may be high on start. If throttle on start
// at once, it may get a low throttle speed as initialization cause it may
// has no write flow after restart. So use the markers to make sure only
// throttled after the the memtable, l0 files and compaction pending bytes
// go beyond the threshold again.
on_start_memtable: bool,
on_start_l0_files: bool,
on_start_pending_bytes: bool,
}
impl Default for CFFlowChecker {
fn default() -> Self {
Self {
last_num_memtables: Smoother::default(),
memtable_debt: 0.0,
memtable_init_speed: false,
long_term_num_l0_files: Smoother::default(),
last_flush_bytes: 0,
last_flush_bytes_time: Instant::now_coarse(),
short_term_l0_production_flow: Smoother::default(),
last_l0_bytes: 0,
last_l0_bytes_time: Instant::now_coarse(),
short_term_l0_consumption_flow: Smoother::default(),
long_term_pending_bytes: Smoother::default(),
pending_bytes_before_unsafe_destroy_range: None,
on_start_memtable: true,
on_start_l0_files: true,
on_start_pending_bytes: true,
}
}
}
struct FlowChecker<E: CFNamesExt + FlowControlFactorsExt + Send + 'static> {
soft_pending_compaction_bytes_limit: u64,
hard_pending_compaction_bytes_limit: u64,
memtables_threshold: u64,
l0_files_threshold: u64,
// CFFlowChecker for each CF.
cf_checkers: HashMap<String, CFFlowChecker>,
// Record which CF is taking control of throttling, the throttle speed is
// decided based on the statistics of the throttle CF. If the multiple CFs
// exceed the threshold, choose the larger one.
throttle_cf: Option<String>,
// Discard ratio is decided by pending compaction bytes, it's the ratio to
// drop write requests(return ServerIsBusy to TiDB) randomly.
discard_ratio: Arc<AtomicU32>,
engine: E,
limiter: Arc<Limiter>,
// Records the foreground write flow at scheduler level of last few seconds.
write_flow_recorder: Smoother<u64, 30>,
last_record_time: Instant,
last_speed: f64,
wait_for_destroy_range_finish: bool,
}
impl<E: CFNamesExt + FlowControlFactorsExt + Send + 'static> FlowChecker<E> {
pub fn new(
config: &FlowControlConfig,
engine: E,
discard_ratio: Arc<AtomicU32>,
limiter: Arc<Limiter>,
) -> Self {
let cf_checkers = engine
.cf_names()
.into_iter()
.map(|cf| (cf.to_owned(), CFFlowChecker::default()))
.collect();
Self {
soft_pending_compaction_bytes_limit: config.soft_pending_compaction_bytes_limit.0,
hard_pending_compaction_bytes_limit: config.hard_pending_compaction_bytes_limit.0,
memtables_threshold: config.memtables_threshold,
l0_files_threshold: config.l0_files_threshold,
engine,
discard_ratio,
limiter,
write_flow_recorder: Smoother::default(),
cf_checkers,
throttle_cf: None,
last_record_time: Instant::now_coarse(),
last_speed: 0.0,
wait_for_destroy_range_finish: false,
}
}
fn start(self, rx: Receiver<Msg>, flow_info_receiver: Receiver<FlowInfo>) -> JoinHandle<()> {
Builder::new()
.name(thd_name!("flow-checker"))
.spawn(move || {
tikv_alloc::add_thread_memory_accessor();
let mut checker = self;
let mut deadline = std::time::Instant::now();
let mut enabled = true;
loop {
match rx.try_recv() {
Ok(Msg::Close) => break,
Ok(Msg::Disable) => {
enabled = false;
checker.reset_statistics();
}
Ok(Msg::Enable) => {
enabled = true;
}
Err(_) => {}
}
match flow_info_receiver.recv_deadline(deadline) {
Ok(FlowInfo::L0(cf, l0_bytes)) => {
checker.collect_l0_consumption_stats(&cf, l0_bytes);
if enabled {
checker.on_l0_change(cf)
}
}
Ok(FlowInfo::L0Intra(cf, diff_bytes)) => {
if diff_bytes > 0 {
// Intra L0 merges some deletion records, so regard it as a L0 compaction.
checker.collect_l0_consumption_stats(&cf, diff_bytes);
if enabled {
checker.on_l0_change(cf);
}
}
}
Ok(FlowInfo::Flush(cf, flush_bytes)) => {
checker.collect_l0_production_stats(&cf, flush_bytes);
if enabled {
checker.on_memtable_change(&cf);
checker.on_l0_change(cf)
}
}
Ok(FlowInfo::Compaction(cf)) => {
if enabled {
checker.on_pending_compaction_bytes_change(cf);
}
}
Ok(FlowInfo::BeforeUnsafeDestroyRange) => {
if !enabled {
continue;
}
checker.wait_for_destroy_range_finish = true;
let soft = (checker.soft_pending_compaction_bytes_limit as f64).log2();
for cf_checker in checker.cf_checkers.values_mut() {
let v = cf_checker.long_term_pending_bytes.get_avg();
if v <= soft {
cf_checker.pending_bytes_before_unsafe_destroy_range = Some(v);
}
}
}
Ok(FlowInfo::AfterUnsafeDestroyRange) => {
if !enabled {
continue;
}
checker.wait_for_destroy_range_finish = false;
for (cf, cf_checker) in &mut checker.cf_checkers {
if let Some(before) =
cf_checker.pending_bytes_before_unsafe_destroy_range
{
let soft =
(checker.soft_pending_compaction_bytes_limit as f64).log2();
let after = (checker
.engine
.get_cf_pending_compaction_bytes(cf)
.unwrap_or(None)
.unwrap_or(0)
as f64)
.log2();
assert!(before < soft);
if after >= soft {
// there is a pending bytes jump
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[cf, "pending_bytes_jump"])
.inc();
} else {
cf_checker.pending_bytes_before_unsafe_destroy_range = None;
}
}
}
}
Err(RecvTimeoutError::Timeout) => {
checker.update_statistics();
deadline = std::time::Instant::now() + TICK_DURATION;
}
Err(e) => {
error!("failed to receive compaction info {:?}", e);
}
}
}
tikv_alloc::remove_thread_memory_accessor();
})
.unwrap()
}
fn reset_statistics(&mut self) {
SCHED_L0_TARGET_FLOW_GAUGE.set(0);
for cf in self.cf_checkers.keys() {
SCHED_THROTTLE_CF_GAUGE.with_label_values(&[cf]).set(0);
SCHED_PENDING_COMPACTION_BYTES_GAUGE
.with_label_values(&[cf])
.set(0);
SCHED_MEMTABLE_GAUGE.with_label_values(&[cf]).set(0);
SCHED_L0_GAUGE.with_label_values(&[cf]).set(0);
SCHED_L0_AVG_GAUGE.with_label_values(&[cf]).set(0);
SCHED_L0_FLOW_GAUGE.with_label_values(&[cf]).set(0);
SCHED_FLUSH_FLOW_GAUGE.with_label_values(&[cf]).set(0);
}
SCHED_WRITE_FLOW_GAUGE.set(0);
SCHED_THROTTLE_FLOW_GAUGE.set(0);
self.limiter.set_speed_limit(f64::INFINITY);
SCHED_DISCARD_RATIO_GAUGE.set(0);
self.discard_ratio.store(0, Ordering::Relaxed);
}
fn update_statistics(&mut self) {
if let Some(throttle_cf) = self.throttle_cf.as_ref() {
SCHED_THROTTLE_CF_GAUGE
.with_label_values(&[throttle_cf])
.set(1);
for cf in self.cf_checkers.keys() {
if cf != throttle_cf {
SCHED_THROTTLE_CF_GAUGE.with_label_values(&[cf]).set(0);
}
}
} else {
for cf in self.cf_checkers.keys() {
SCHED_THROTTLE_CF_GAUGE.with_label_values(&[cf]).set(0);
}
}
// calculate foreground write flow
let dur = self.last_record_time.saturating_elapsed_secs();
if dur < f64::EPSILON {
return;
}
let rate = self.limiter.total_bytes_consumed() as f64 / dur;
// don't record those write rate of 0.
// For closed loop system, if all the requests are delayed(assume > 1s),
// then in the next second, the write rate would be 0. But it doesn't
// reflect the real write rate, so just ignore it.
if self.limiter.total_bytes_consumed() != 0 {
self.write_flow_recorder.observe(rate as u64);
}
SCHED_WRITE_FLOW_GAUGE.set(rate as i64);
self.last_record_time = Instant::now_coarse();
self.limiter.reset_statistics();
}
fn on_pending_compaction_bytes_change(&mut self, cf: String) {
let hard = (self.hard_pending_compaction_bytes_limit as f64).log2();
let soft = (self.soft_pending_compaction_bytes_limit as f64).log2();
// Because pending compaction bytes changes dramatically, take the
// logarithm of pending compaction bytes to make the values fall into
// a relative small range
let num = (self
.engine
.get_cf_pending_compaction_bytes(&cf)
.unwrap_or(None)
.unwrap_or(0) as f64)
.log2();
let checker = self.cf_checkers.get_mut(&cf).unwrap();
checker.long_term_pending_bytes.observe(num);
SCHED_PENDING_COMPACTION_BYTES_GAUGE
.with_label_values(&[&cf])
.set((checker.long_term_pending_bytes.get_avg() * RATIO_SCALE_FACTOR as f64) as i64);
// do special check on start, see the comment of the variable definition for detail.
if checker.on_start_pending_bytes {
if num < soft || checker.long_term_pending_bytes.trend() == Trend::Increasing {
// the write is accumulating, still need to throttle
checker.on_start_pending_bytes = false;
} else {
// still on start, should not throttle now
return;
}
}
let pending_compaction_bytes = checker.long_term_pending_bytes.get_avg();
let ignore = if let Some(before) = checker.pending_bytes_before_unsafe_destroy_range {
if pending_compaction_bytes <= before && !self.wait_for_destroy_range_finish {
checker.pending_bytes_before_unsafe_destroy_range = None;
}
true
} else {
false
};
for checker in self.cf_checkers.values() {
if num < checker.long_term_pending_bytes.get_recent() {
return;
}
}
let mut ratio = if pending_compaction_bytes < soft || ignore {
0
} else {
let new_ratio = (pending_compaction_bytes - soft) / (hard - soft);
let old_ratio = self.discard_ratio.load(Ordering::Relaxed);
// Because pending compaction bytes changes up and down, so using
// EMA(Exponential Moving Average) to smooth it.
(if old_ratio != 0 {
EMA_FACTOR * (old_ratio as f64 / RATIO_SCALE_FACTOR as f64)
+ (1.0 - EMA_FACTOR) * new_ratio
} else if new_ratio > 0.01 {
0.01
} else {
new_ratio
} * RATIO_SCALE_FACTOR as f64) as u32
};
SCHED_DISCARD_RATIO_GAUGE.set(ratio as i64);
if ratio > RATIO_SCALE_FACTOR {
ratio = RATIO_SCALE_FACTOR;
}
self.discard_ratio.store(ratio, Ordering::Relaxed);
}
fn on_memtable_change(&mut self, cf: &str) {
let num_memtables = self
.engine
.get_cf_num_immutable_mem_table(cf)
.unwrap_or(None)
.unwrap_or(0);
let checker = self.cf_checkers.get_mut(cf).unwrap();
SCHED_MEMTABLE_GAUGE
.with_label_values(&[cf])
.set(num_memtables as i64);
let prev = checker.last_num_memtables.get_recent();
checker.last_num_memtables.observe(num_memtables);
// do special check on start, see the comment of the variable definition for detail.
if checker.on_start_memtable {
if num_memtables < self.memtables_threshold
|| checker.last_num_memtables.trend() == Trend::Increasing
{
// the write is accumulating, still need to throttle
checker.on_start_memtable = false;
} else {
// still on start, should not throttle now
return;
}
}
for c in self.cf_checkers.values() {
if num_memtables < c.last_num_memtables.get_recent() {
return;
}
}
let checker = self.cf_checkers.get_mut(cf).unwrap();
let is_throttled = self.limiter.speed_limit() != f64::INFINITY;
let should_throttle =
checker.last_num_memtables.get_avg() > self.memtables_threshold as f64;
let throttle = if !is_throttled && should_throttle {
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[cf, "memtable_init"])
.inc();
let x = self.write_flow_recorder.get_percentile_90();
if x == 0 {
f64::INFINITY
} else {
checker.memtable_init_speed = true;
self.throttle_cf = Some(cf.to_string());
x as f64
}
} else if is_throttled && (!should_throttle || num_memtables < self.memtables_threshold) {
// should not throttle memtable
if checker.memtable_init_speed {
checker.memtable_init_speed = false;
f64::INFINITY
} else {
let speed = self.limiter.speed_limit() + checker.memtable_debt * 1024.0 * 1024.0;
checker.memtable_debt = 0.0;
speed
}
} else if is_throttled && should_throttle {
// should throttle
let diff = match num_memtables.cmp(&prev) {
std::cmp::Ordering::Greater => {
checker.memtable_debt += 1.0;
-1.0
}
std::cmp::Ordering::Less => {
checker.memtable_debt -= 1.0;
1.0
}
std::cmp::Ordering::Equal => {
// keep, do nothing
0.0
}
};
self.limiter.speed_limit() + diff * 1024.0 * 1024.0
} else {
f64::INFINITY
};
self.update_speed_limit(throttle);
}
fn collect_l0_consumption_stats(&mut self, cf: &str, l0_bytes: u64) {
let num_l0_files = self
.engine
.get_cf_num_files_at_level(cf, 0)
.unwrap_or(None)
.unwrap_or(0);
let checker = self.cf_checkers.get_mut(cf).unwrap();
checker.last_l0_bytes += l0_bytes;
checker.long_term_num_l0_files.observe(num_l0_files);
SCHED_L0_GAUGE
.with_label_values(&[cf])
.set(num_l0_files as i64);
SCHED_L0_AVG_GAUGE
.with_label_values(&[cf])
.set(checker.long_term_num_l0_files.get_avg() as i64);
}
fn collect_l0_production_stats(&mut self, cf: &str, flush_bytes: u64) {
let num_l0_files = self
.engine
.get_cf_num_files_at_level(cf, 0)
.unwrap_or(None)
.unwrap_or(0);
let checker = self.cf_checkers.get_mut(cf).unwrap();
checker.last_flush_bytes += flush_bytes;
checker.long_term_num_l0_files.observe(num_l0_files);
SCHED_L0_GAUGE
.with_label_values(&[cf])
.set(num_l0_files as i64);
SCHED_L0_AVG_GAUGE
.with_label_values(&[cf])
.set(checker.long_term_num_l0_files.get_avg() as i64);
if checker.last_flush_bytes_time.saturating_elapsed_secs() > 5.0 {
// update flush flow
let flush_flow = checker.last_flush_bytes as f64
/ checker.last_flush_bytes_time.saturating_elapsed_secs();
checker
.short_term_l0_production_flow
.observe(flush_flow as u64);
SCHED_FLUSH_FLOW_GAUGE
.with_label_values(&[cf])
.set(checker.short_term_l0_production_flow.get_avg() as i64);
// update l0 flow
if checker.last_l0_bytes != 0 {
let l0_flow = checker.last_l0_bytes as f64
/ checker.last_l0_bytes_time.saturating_elapsed_secs();
checker.last_l0_bytes_time = Instant::now_coarse();
checker
.short_term_l0_consumption_flow
.observe(l0_flow as u64);
SCHED_L0_FLOW_GAUGE
.with_label_values(&[cf])
.set(checker.short_term_l0_consumption_flow.get_avg() as i64);
}
checker.last_flush_bytes_time = Instant::now_coarse();
checker.last_l0_bytes = 0;
checker.last_flush_bytes = 0;
}
}
// Check the number of l0 files to decide whether need to adjust target flow
fn on_l0_change(&mut self, cf: String) {
let checker = self.cf_checkers.get_mut(&cf).unwrap();
let num_l0_files = checker.long_term_num_l0_files.get_recent();
// do special check on start, see the comment of the variable definition for detail.
if checker.on_start_l0_files {
if num_l0_files < self.l0_files_threshold
|| checker.long_term_num_l0_files.trend() == Trend::Increasing
{
// the write is accumulating, still need to throttle
checker.on_start_l0_files = false;
} else {
// still on start, should not throttle now
return;
}
}
if let Some(throttle_cf) = self.throttle_cf.as_ref() {
if &cf != throttle_cf {
// to avoid throttle cf changes back and forth, only change it
// when the other is much higher.
if num_l0_files
> self.cf_checkers[throttle_cf]
.long_term_num_l0_files
.get_max()
+ 4
{
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[&cf, "change_throttle_cf"])
.inc();
self.throttle_cf = Some(cf.clone());
} else {
return;
}
}
}
let checker = self.cf_checkers.get_mut(&cf).unwrap();
if checker.memtable_init_speed {
return;
}
let is_throttled = self.limiter.speed_limit() != f64::INFINITY;
let should_throttle = checker.long_term_num_l0_files.get_recent() > self.l0_files_threshold;
let throttle = if !is_throttled && should_throttle {
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[&cf, "init"])
.inc();
self.throttle_cf = Some(cf.clone());
let x = if self.last_speed < f64::EPSILON {
self.write_flow_recorder.get_percentile_90() as f64
} else {
self.last_speed
};
if x < f64::EPSILON { f64::INFINITY } else { x }
} else if is_throttled && should_throttle {
self.limiter.speed_limit() * K_INC_SLOWDOWN_RATIO
} else if is_throttled && !should_throttle {
self.last_speed = self.limiter.speed_limit() * K_DEC_SLOWDOWN_RATIO;
f64::INFINITY
} else {
f64::INFINITY
};
self.update_speed_limit(throttle)
}
fn update_speed_limit(&mut self, mut throttle: f64) {
if throttle < MIN_THROTTLE_SPEED {
throttle = MIN_THROTTLE_SPEED;
}
if throttle > MAX_THROTTLE_SPEED {
self.throttle_cf = None;
throttle = f64::INFINITY;
}
SCHED_THROTTLE_FLOW_GAUGE.set(if throttle == f64::INFINITY {
0
} else {
throttle as i64
});
self.limiter.set_speed_limit(throttle)
}
}
#[cfg(test)]
mod tests {
use super::*;
use engine_traits::Result;
use std::sync::atomic::AtomicU64;
#[derive(Clone)]
struct EngineStub(Arc<EngineStubInner>);
struct EngineStubInner {
pub pending_compaction_bytes: AtomicU64,
pub num_l0_files: AtomicU64,
pub num_memtable_files: AtomicU64,
}
impl EngineStub {
fn new() -> Self {
Self(Arc::new(EngineStubInner {
pending_compaction_bytes: AtomicU64::new(0),
num_l0_files: AtomicU64::new(0),
num_memtable_files: AtomicU64::new(0),
}))
}
}
impl CFNamesExt for EngineStub {
fn cf_names(&self) -> Vec<&str> {
vec!["default"]
}
}
impl FlowControlFactorsExt for EngineStub {
fn get_cf_num_files_at_level(&self, _cf: &str, _level: usize) -> Result<Option<u64>> {
Ok(Some(self.0.num_l0_files.load(Ordering::Relaxed)))
}
fn get_cf_num_immutable_mem_table(&self, _cf: &str) -> Result<Option<u64>> {
Ok(Some(self.0.num_memtable_files.load(Ordering::Relaxed)))
}
fn get_cf_pending_compaction_bytes(&self, _cf: &str) -> Result<Option<u64>> {
Ok(Some(
self.0.pending_compaction_bytes.load(Ordering::Relaxed),
))
}
}
#[test]
fn test_flow_controller_basic() {
let stub = EngineStub::new();
let (_tx, rx) = mpsc::channel();
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub, rx);
// enable flow controller
assert_eq!(flow_controller.enabled(), true);
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
assert_eq!(flow_controller.consume(0), Duration::ZERO);
assert_eq!(flow_controller.consume(1000), Duration::ZERO);
// disable flow controller
flow_controller.enable(false);
assert_eq!(flow_controller.enabled(), false);
// re-enable flow controller
flow_controller.enable(true);
assert_eq!(flow_controller.enabled(), true);
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
assert_eq!(flow_controller.consume(1), Duration::ZERO);
}
#[test]
fn test_flow_controller_memtable() {
let stub = EngineStub::new();
let (tx, rx) = mpsc::sync_channel(0);
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub.clone(), rx);
assert_eq!(flow_controller.consume(2000), Duration::ZERO);
loop {
if flow_controller.total_bytes_consumed() == 0 {
break;
}
std::thread::sleep(TICK_DURATION);
}
// exceeds the threshold on start
stub.0.num_memtable_files.store(8, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
// on start check forbids flow control
assert_eq!(flow_controller.is_unlimited(), true);
// once falls below the threshold, pass the on start check
stub.0.num_memtable_files.store(1, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
// not throttle when the average of the sliding window doesn't exceeds the threshold
stub.0.num_memtable_files.store(6, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
// the average of sliding window exceeds the threshold
stub.0.num_memtable_files.store(6, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), false);
assert_ne!(flow_controller.consume(2000), Duration::ZERO);
// not throttle once the number of memtables falls below the threshold
stub.0.num_memtable_files.store(1, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
}
#[test]
fn test_flow_controller_l0() {
let stub = EngineStub::new();
let (tx, rx) = mpsc::sync_channel(0);
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub.clone(), rx);
assert_eq!(flow_controller.consume(2000), Duration::ZERO);
loop {
if flow_controller.total_bytes_consumed() == 0 {
break;
}
std::thread::sleep(TICK_DURATION);
}
// exceeds the threshold
stub.0.num_l0_files.store(30, Ordering::Relaxed);
tx.send(FlowInfo::L0("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
// on start check forbids flow control
assert_eq!(flow_controller.is_unlimited(), true);
// once fall below the threshold, pass the on start check
stub.0.num_l0_files.store(10, Ordering::Relaxed);
tx.send(FlowInfo::L0("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
// exceeds the threshold, throttle now
stub.0.num_l0_files.store(30, Ordering::Relaxed);
tx.send(FlowInfo::L0("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), false);
assert_ne!(flow_controller.consume(2000), Duration::ZERO);
}
#[test]
fn test_flow_controller_pending_compaction_bytes() {
let stub = EngineStub::new();
let (tx, rx) = mpsc::sync_channel(0);
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub.clone(), rx);
// exceeds the threshold
stub.0
.pending_compaction_bytes
.store(1000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
// on start check forbids flow control
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// once fall below the threshold, pass the on start check
stub.0
.pending_compaction_bytes
.store(100 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
stub.0
.pending_compaction_bytes
.store(1000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() > f64::EPSILON);
stub.0
.pending_compaction_bytes
.store(1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// pending compaction bytes jump after unsafe destroy range
tx.send(FlowInfo::BeforeUnsafeDestroyRange).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// during unsafe destroy range, pending compaction bytes may change
stub.0
.pending_compaction_bytes
.store(1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
stub.0
.pending_compaction_bytes
.store(10000000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::AfterUnsafeDestroyRange).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// unfreeze the control
stub.0
.pending_compaction_bytes
.store(1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
stub.0
.pending_compaction_bytes
.store(1000000000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() > f64::EPSILON);
}
#[test]
fn test_smoother() {
let mut smoother = Smoother::<u64, 5>::default();
smoother.observe(1);
smoother.observe(6);
smoother.observe(2);
smoother.observe(3);
smoother.observe(4);
smoother.observe(5);
smoother.observe(0);
assert!((smoother.get_avg() - 2.8).abs() < f64::EPSILON);
assert_eq!(smoother.get_recent(), 0);
assert_eq!(smoother.get_max(), 5);
assert_eq!(smoother.get_percentile_90(), 4);
assert_eq!(smoother.trend(), Trend::NoTrend);
let mut smoother = Smoother::<f64, 5>::default();
smoother.observe(1.0);
smoother.observe(6.0);
smoother.observe(2.0);
smoother.observe(3.0);
smoother.observe(4.0);
smoother.observe(5.0);
smoother.observe(9.0);
assert!((smoother.get_avg() - 4.6).abs() < f64::EPSILON);
assert!((smoother.get_recent() - 9.0).abs() < f64::EPSILON);
assert!((smoother.get_max() - 9.0).abs() < f64::EPSILON);
assert!((smoother.get_percentile_90() - 5.0).abs() < f64::EPSILON);
assert_eq!(smoother.trend(), Trend::Increasing);
}
}
flow controller: consider the time factor when evaluating the trending (#11531)
* flow controller: consider the time range when evaluating the trending, close #11530
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
* fix return syntax
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
* adjust the smoother capacity by time
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
* comments: remove tail space
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
* address comments: consider time span and also consider records count
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
* tests: correct the test time range
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
* fix tests
Signed-off-by: zhangjinpeng1987 <zhangjinpeng@pingcap.com>
Co-authored-by: Ti Chi Robot <040e6455cde1c91e876ec337e785ce913eacba35@tidb.io>
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
// #[PerformanceCriticalPath]
use std::cmp::PartialOrd;
use std::collections::VecDeque;
use std::ops::{Add, AddAssign, Sub, SubAssign};
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::mpsc::{self, Receiver, RecvTimeoutError, SyncSender};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::u64;
use collections::HashMap;
use engine_rocks::FlowInfo;
use engine_traits::{CFNamesExt, FlowControlFactorsExt};
use num_traits::cast::{AsPrimitive, FromPrimitive};
use rand::Rng;
use tikv_util::time::{Instant, Limiter};
use crate::storage::config::FlowControlConfig;
use crate::storage::metrics::*;
const TICK_DURATION: Duration = Duration::from_millis(1000);
const RATIO_SCALE_FACTOR: u32 = 10_000_000;
const K_INC_SLOWDOWN_RATIO: f64 = 0.8;
const K_DEC_SLOWDOWN_RATIO: f64 = 1.0 / K_INC_SLOWDOWN_RATIO;
const MIN_THROTTLE_SPEED: f64 = 16.0 * 1024.0; // 16KB
const MAX_THROTTLE_SPEED: f64 = 200.0 * 1024.0 * 1024.0; // 200MB
const EMA_FACTOR: f64 = 0.6; // EMA stands for Exponential Moving Average
#[derive(Eq, PartialEq, Debug)]
enum Trend {
Increasing,
Decreasing,
NoTrend,
}
/// Flow controller is used to throttle the write rate at scheduler level, aiming
/// to substitute the write stall mechanism of RocksDB. It features in two points:
/// * throttle at scheduler, so raftstore and apply won't be blocked anymore
/// * better control on the throttle rate to avoid QPS drop under heavy write
///
/// When write stall happens, the max speed of write rate max_delayed_write_rate
/// is limited to 16MB/s by default which doesn't take real disk ability into
/// account. It may underestimate the disk's throughout that 16MB/s is too small
/// at once, causing a very large jitter on the write duration.
/// Also, it decreases the delayed write rate further if the factors still exceed
/// the threshold. So under heavy write load, the write rate may be throttled to
/// a very low rate from time to time, causing QPS drop eventually.
///
/// For compaction pending bytes, we use discardable ratio to do flow control
/// which is separated mechanism from throttle speed. Compaction pending bytes is
/// a approximate value, usually, changes up and down dramatically, so it's unwise
/// to map compaction pending bytes to a specified throttle speed. Instead,
/// mapping it from soft limit to hard limit as 0% to 100% discardable ratio. With
/// this, there must be a point that foreground write rate is equal to the
/// background compaction pending bytes consuming rate so that compaction pending
/// bytes is kept around a steady level.
///
/// Here is a brief flow showing where the mechanism works:
/// grpc -> check should drop(discardable ratio) -> limiter -> async write to raftstore
pub struct FlowController {
discard_ratio: Arc<AtomicU32>,
limiter: Arc<Limiter>,
enabled: Arc<AtomicBool>,
tx: Option<SyncSender<Msg>>,
handle: Option<std::thread::JoinHandle<()>>,
}
enum Msg {
Close,
Enable,
Disable,
}
impl Drop for FlowController {
fn drop(&mut self) {
let h = self.handle.take();
if h.is_none() {
return;
}
if let Some(Err(e)) = self.tx.as_ref().map(|tx| tx.send(Msg::Close)) {
error!("send quit message for flow controller failed"; "err" => ?e);
return;
}
if let Err(e) = h.unwrap().join() {
error!("join flow controller failed"; "err" => ?e);
return;
}
}
}
impl FlowController {
// only for test
pub fn empty() -> Self {
Self {
discard_ratio: Arc::new(AtomicU32::new(0)),
limiter: Arc::new(Limiter::new(f64::INFINITY)),
enabled: Arc::new(AtomicBool::new(false)),
tx: None,
handle: None,
}
}
pub fn new<E: CFNamesExt + FlowControlFactorsExt + Send + 'static>(
config: &FlowControlConfig,
engine: E,
flow_info_receiver: Receiver<FlowInfo>,
) -> Self {
let limiter = Arc::new(
<Limiter>::builder(f64::INFINITY)
.refill(Duration::from_millis(1))
.build(),
);
let discard_ratio = Arc::new(AtomicU32::new(0));
let checker = FlowChecker::new(config, engine, discard_ratio.clone(), limiter.clone());
let (tx, rx) = mpsc::sync_channel(5);
tx.send(if config.enable {
Msg::Enable
} else {
Msg::Disable
})
.unwrap();
Self {
discard_ratio,
limiter,
enabled: Arc::new(AtomicBool::new(config.enable)),
tx: Some(tx),
handle: Some(checker.start(rx, flow_info_receiver)),
}
}
pub fn should_drop(&self) -> bool {
let ratio = self.discard_ratio.load(Ordering::Relaxed);
let mut rng = rand::thread_rng();
rng.gen_ratio(ratio, RATIO_SCALE_FACTOR)
}
#[cfg(test)]
pub fn discard_ratio(&self) -> f64 {
self.discard_ratio.load(Ordering::Relaxed) as f64 / RATIO_SCALE_FACTOR as f64
}
pub fn consume(&self, bytes: usize) -> Duration {
self.limiter.consume_duration(bytes)
}
pub fn unconsume(&self, bytes: usize) {
self.limiter.unconsume(bytes);
}
#[cfg(test)]
pub fn total_bytes_consumed(&self) -> usize {
self.limiter.total_bytes_consumed()
}
pub fn enable(&self, enable: bool) {
self.enabled.store(enable, Ordering::Relaxed);
if let Some(tx) = &self.tx {
if enable {
tx.send(Msg::Enable).unwrap();
} else {
tx.send(Msg::Disable).unwrap();
}
}
}
pub fn enabled(&self) -> bool {
self.enabled.load(Ordering::Relaxed)
}
#[cfg(test)]
pub fn set_speed_limit(&self, speed_limit: f64) {
self.limiter.set_speed_limit(speed_limit);
}
pub fn is_unlimited(&self) -> bool {
self.limiter.speed_limit() == f64::INFINITY
}
}
const SMOOTHER_STALE_RECORD_THRESHOLD: u64 = 300; // 5min
const SMOOTHER_TIME_RANGE_THRESHOLD: u64 = 60; // 1min
// Smoother is a sliding window used to provide steadier flow statistics.
struct Smoother<T, const CAP: usize, const STALE_DUR: u64, const MIN_TIME_SPAN: u64>
where
T: Default
+ Add<Output = T>
+ Sub<Output = T>
+ AddAssign
+ SubAssign
+ PartialOrd
+ AsPrimitive<f64>
+ FromPrimitive,
{
records: VecDeque<(T, Instant)>,
total: T,
}
impl<T, const CAP: usize, const STALE_DUR: u64, const MIN_TIME_SPAN: u64> Default
for Smoother<T, CAP, STALE_DUR, MIN_TIME_SPAN>
where
T: Default
+ Add<Output = T>
+ Sub<Output = T>
+ AddAssign
+ SubAssign
+ PartialOrd
+ AsPrimitive<f64>
+ FromPrimitive,
{
fn default() -> Self {
Self {
records: VecDeque::with_capacity(CAP),
total: Default::default(),
}
}
}
impl<T, const CAP: usize, const STALE_DUR: u64, const MIN_TIME_SPAN: u64>
Smoother<T, CAP, STALE_DUR, MIN_TIME_SPAN>
where
T: Default
+ Add<Output = T>
+ Sub<Output = T>
+ AddAssign
+ SubAssign
+ PartialOrd
+ AsPrimitive<f64>
+ FromPrimitive,
{
pub fn observe(&mut self, record: T) {
self.observe_with_time(record, Instant::now_coarse());
}
pub fn observe_with_time(&mut self, record: T, time: Instant) {
if self.records.len() == CAP {
let v = self.records.pop_front().unwrap().0;
self.total -= v;
}
self.total += record;
self.records.push_back((record, time));
self.remove_stale_records();
}
fn remove_stale_records(&mut self) {
// make sure there are two records left at least
while self.records.len() > 2 {
if self.records.front().unwrap().1.saturating_elapsed_secs() > STALE_DUR as f64 {
let v = self.records.pop_front().unwrap().0;
self.total -= v;
} else {
break;
}
}
}
pub fn get_recent(&self) -> T {
if self.records.is_empty() {
return T::default();
}
self.records.back().unwrap().0
}
pub fn get_avg(&self) -> f64 {
if self.records.is_empty() {
return 0.0;
}
self.total.as_() / self.records.len() as f64
}
pub fn get_max(&self) -> T {
if self.records.is_empty() {
return T::default();
}
self.records
.iter()
.max_by(|a, b| a.0.partial_cmp(&b.0).unwrap())
.unwrap()
.0
}
pub fn get_percentile_90(&mut self) -> T {
if self.records.is_empty() {
return FromPrimitive::from_u64(0).unwrap();
}
let mut v: Vec<_> = self.records.iter().collect();
v.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
v[((self.records.len() - 1) as f64 * 0.90) as usize].0
}
pub fn trend(&self) -> Trend {
if self.records.len() <= 1 {
return Trend::NoTrend;
}
// If the lastest record is too old, no trend
if self.records.back().unwrap().1.saturating_elapsed_secs() > STALE_DUR as f64 {
return Trend::NoTrend;
}
let (mut left, mut left_cnt) = (T::default(), 0);
let (mut right, mut right_cnt) = (T::default(), 0);
// The time span matters
if MIN_TIME_SPAN > 0 {
// If the records doesn't cover a enough time span, no trend
let time_span = self.records.front().unwrap().1.saturating_elapsed_secs()
- self.records.back().unwrap().1.saturating_elapsed_secs();
if time_span < MIN_TIME_SPAN as f64 {
return Trend::NoTrend;
}
// Split the record into left and right by the middle of time range
for (_, r) in self.records.iter().enumerate() {
let elapsed_secs = r.1.saturating_elapsed_secs();
if elapsed_secs > time_span / 2.0 {
left += r.0;
left_cnt += 1;
} else {
right += r.0;
right_cnt += 1;
}
}
} else {
let half = self.records.len() / 2;
for (i, r) in self.records.iter().enumerate() {
if i < half {
left += r.0;
left_cnt += 1;
} else {
right += r.0;
right_cnt += 1;
}
}
}
// Decide if there is a trend by the two averages.
// Adding 2 here is to give a tolerance
let (l_avg, r_avg) = (left.as_() / left_cnt as f64, right.as_() / right_cnt as f64);
if r_avg > l_avg + 2.0 {
return Trend::Increasing;
}
if l_avg > r_avg + 2.0 {
return Trend::Decreasing;
}
Trend::NoTrend
}
}
// CFFlowChecker records some statistics and states related to one CF.
// These statistics fall into five categories:
// * memtable
// * L0 files
// * L0 production flow (flush flow)
// * L0 consumption flow (compaction read flow of L0)
// * pending compaction bytes
// And all of them are collected from the hook of RocksDB's event listener.
struct CFFlowChecker {
// Memtable related
last_num_memtables: Smoother<u64, 20, SMOOTHER_STALE_RECORD_THRESHOLD, 0>,
memtable_debt: f64,
memtable_init_speed: bool,
// L0 files related
// a few records of number of L0 files right after flush or L0 compaction
// As we know, after flush the number of L0 files must increase by 1,
// whereas, after L0 compaction the number of L0 files must decrease a lot
// considering L0 compactions nearly includes all L0 files in a round.
// So to evaluate the accumulation of L0 files, here only records the number
// of L0 files right after L0 compactions.
long_term_num_l0_files: Smoother<u64, 20, SMOOTHER_STALE_RECORD_THRESHOLD, 0>,
// L0 production flow related
last_flush_bytes: u64,
last_flush_bytes_time: Instant,
short_term_l0_production_flow: Smoother<u64, 10, SMOOTHER_STALE_RECORD_THRESHOLD, 0>,
// L0 consumption flow related
last_l0_bytes: u64,
last_l0_bytes_time: Instant,
short_term_l0_consumption_flow: Smoother<u64, 3, SMOOTHER_STALE_RECORD_THRESHOLD, 0>,
// Pending compaction bytes related
// When the write flow is about 100MB/s, we observed that the compaction ops
// is about 2.5, it means there are 750 compaction events in 5 minutes.
long_term_pending_bytes:
Smoother<f64, 1024, SMOOTHER_STALE_RECORD_THRESHOLD, SMOOTHER_TIME_RANGE_THRESHOLD>,
pending_bytes_before_unsafe_destroy_range: Option<f64>,
// On start related markers. Because after restart, the memtable, l0 files
// and compaction pending bytes may be high on start. If throttle on start
// at once, it may get a low throttle speed as initialization cause it may
// has no write flow after restart. So use the markers to make sure only
// throttled after the the memtable, l0 files and compaction pending bytes
// go beyond the threshold again.
on_start_memtable: bool,
on_start_l0_files: bool,
on_start_pending_bytes: bool,
}
impl Default for CFFlowChecker {
fn default() -> Self {
Self {
last_num_memtables: Smoother::default(),
memtable_debt: 0.0,
memtable_init_speed: false,
long_term_num_l0_files: Smoother::default(),
last_flush_bytes: 0,
last_flush_bytes_time: Instant::now_coarse(),
short_term_l0_production_flow: Smoother::default(),
last_l0_bytes: 0,
last_l0_bytes_time: Instant::now_coarse(),
short_term_l0_consumption_flow: Smoother::default(),
long_term_pending_bytes: Smoother::default(),
pending_bytes_before_unsafe_destroy_range: None,
on_start_memtable: true,
on_start_l0_files: true,
on_start_pending_bytes: true,
}
}
}
struct FlowChecker<E: CFNamesExt + FlowControlFactorsExt + Send + 'static> {
soft_pending_compaction_bytes_limit: u64,
hard_pending_compaction_bytes_limit: u64,
memtables_threshold: u64,
l0_files_threshold: u64,
// CFFlowChecker for each CF.
cf_checkers: HashMap<String, CFFlowChecker>,
// Record which CF is taking control of throttling, the throttle speed is
// decided based on the statistics of the throttle CF. If the multiple CFs
// exceed the threshold, choose the larger one.
throttle_cf: Option<String>,
// Discard ratio is decided by pending compaction bytes, it's the ratio to
// drop write requests(return ServerIsBusy to TiDB) randomly.
discard_ratio: Arc<AtomicU32>,
engine: E,
limiter: Arc<Limiter>,
// Records the foreground write flow at scheduler level of last few seconds.
write_flow_recorder: Smoother<u64, 30, SMOOTHER_STALE_RECORD_THRESHOLD, 0>,
last_record_time: Instant,
last_speed: f64,
wait_for_destroy_range_finish: bool,
}
impl<E: CFNamesExt + FlowControlFactorsExt + Send + 'static> FlowChecker<E> {
pub fn new(
config: &FlowControlConfig,
engine: E,
discard_ratio: Arc<AtomicU32>,
limiter: Arc<Limiter>,
) -> Self {
let cf_checkers = engine
.cf_names()
.into_iter()
.map(|cf| (cf.to_owned(), CFFlowChecker::default()))
.collect();
Self {
soft_pending_compaction_bytes_limit: config.soft_pending_compaction_bytes_limit.0,
hard_pending_compaction_bytes_limit: config.hard_pending_compaction_bytes_limit.0,
memtables_threshold: config.memtables_threshold,
l0_files_threshold: config.l0_files_threshold,
engine,
discard_ratio,
limiter,
write_flow_recorder: Smoother::default(),
cf_checkers,
throttle_cf: None,
last_record_time: Instant::now_coarse(),
last_speed: 0.0,
wait_for_destroy_range_finish: false,
}
}
fn start(self, rx: Receiver<Msg>, flow_info_receiver: Receiver<FlowInfo>) -> JoinHandle<()> {
Builder::new()
.name(thd_name!("flow-checker"))
.spawn(move || {
tikv_alloc::add_thread_memory_accessor();
let mut checker = self;
let mut deadline = std::time::Instant::now();
let mut enabled = true;
loop {
match rx.try_recv() {
Ok(Msg::Close) => break,
Ok(Msg::Disable) => {
enabled = false;
checker.reset_statistics();
}
Ok(Msg::Enable) => {
enabled = true;
}
Err(_) => {}
}
match flow_info_receiver.recv_deadline(deadline) {
Ok(FlowInfo::L0(cf, l0_bytes)) => {
checker.collect_l0_consumption_stats(&cf, l0_bytes);
if enabled {
checker.on_l0_change(cf)
}
}
Ok(FlowInfo::L0Intra(cf, diff_bytes)) => {
if diff_bytes > 0 {
// Intra L0 merges some deletion records, so regard it as a L0 compaction.
checker.collect_l0_consumption_stats(&cf, diff_bytes);
if enabled {
checker.on_l0_change(cf);
}
}
}
Ok(FlowInfo::Flush(cf, flush_bytes)) => {
checker.collect_l0_production_stats(&cf, flush_bytes);
if enabled {
checker.on_memtable_change(&cf);
checker.on_l0_change(cf)
}
}
Ok(FlowInfo::Compaction(cf)) => {
if enabled {
checker.on_pending_compaction_bytes_change(cf);
}
}
Ok(FlowInfo::BeforeUnsafeDestroyRange) => {
if !enabled {
continue;
}
checker.wait_for_destroy_range_finish = true;
let soft = (checker.soft_pending_compaction_bytes_limit as f64).log2();
for cf_checker in checker.cf_checkers.values_mut() {
let v = cf_checker.long_term_pending_bytes.get_avg();
if v <= soft {
cf_checker.pending_bytes_before_unsafe_destroy_range = Some(v);
}
}
}
Ok(FlowInfo::AfterUnsafeDestroyRange) => {
if !enabled {
continue;
}
checker.wait_for_destroy_range_finish = false;
for (cf, cf_checker) in &mut checker.cf_checkers {
if let Some(before) =
cf_checker.pending_bytes_before_unsafe_destroy_range
{
let soft =
(checker.soft_pending_compaction_bytes_limit as f64).log2();
let after = (checker
.engine
.get_cf_pending_compaction_bytes(cf)
.unwrap_or(None)
.unwrap_or(0)
as f64)
.log2();
assert!(before < soft);
if after >= soft {
// there is a pending bytes jump
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[cf, "pending_bytes_jump"])
.inc();
} else {
cf_checker.pending_bytes_before_unsafe_destroy_range = None;
}
}
}
}
Err(RecvTimeoutError::Timeout) => {
checker.update_statistics();
deadline = std::time::Instant::now() + TICK_DURATION;
}
Err(e) => {
error!("failed to receive compaction info {:?}", e);
}
}
}
tikv_alloc::remove_thread_memory_accessor();
})
.unwrap()
}
fn reset_statistics(&mut self) {
SCHED_L0_TARGET_FLOW_GAUGE.set(0);
for cf in self.cf_checkers.keys() {
SCHED_THROTTLE_CF_GAUGE.with_label_values(&[cf]).set(0);
SCHED_PENDING_COMPACTION_BYTES_GAUGE
.with_label_values(&[cf])
.set(0);
SCHED_MEMTABLE_GAUGE.with_label_values(&[cf]).set(0);
SCHED_L0_GAUGE.with_label_values(&[cf]).set(0);
SCHED_L0_AVG_GAUGE.with_label_values(&[cf]).set(0);
SCHED_L0_FLOW_GAUGE.with_label_values(&[cf]).set(0);
SCHED_FLUSH_FLOW_GAUGE.with_label_values(&[cf]).set(0);
}
SCHED_WRITE_FLOW_GAUGE.set(0);
SCHED_THROTTLE_FLOW_GAUGE.set(0);
self.limiter.set_speed_limit(f64::INFINITY);
SCHED_DISCARD_RATIO_GAUGE.set(0);
self.discard_ratio.store(0, Ordering::Relaxed);
}
fn update_statistics(&mut self) {
if let Some(throttle_cf) = self.throttle_cf.as_ref() {
SCHED_THROTTLE_CF_GAUGE
.with_label_values(&[throttle_cf])
.set(1);
for cf in self.cf_checkers.keys() {
if cf != throttle_cf {
SCHED_THROTTLE_CF_GAUGE.with_label_values(&[cf]).set(0);
}
}
} else {
for cf in self.cf_checkers.keys() {
SCHED_THROTTLE_CF_GAUGE.with_label_values(&[cf]).set(0);
}
}
// calculate foreground write flow
let dur = self.last_record_time.saturating_elapsed_secs();
if dur < f64::EPSILON {
return;
}
let rate = self.limiter.total_bytes_consumed() as f64 / dur;
// don't record those write rate of 0.
// For closed loop system, if all the requests are delayed(assume > 1s),
// then in the next second, the write rate would be 0. But it doesn't
// reflect the real write rate, so just ignore it.
if self.limiter.total_bytes_consumed() != 0 {
self.write_flow_recorder.observe(rate as u64);
}
SCHED_WRITE_FLOW_GAUGE.set(rate as i64);
self.last_record_time = Instant::now_coarse();
self.limiter.reset_statistics();
}
fn on_pending_compaction_bytes_change(&mut self, cf: String) {
let hard = (self.hard_pending_compaction_bytes_limit as f64).log2();
let soft = (self.soft_pending_compaction_bytes_limit as f64).log2();
// Because pending compaction bytes changes dramatically, take the
// logarithm of pending compaction bytes to make the values fall into
// a relative small range
let num = (self
.engine
.get_cf_pending_compaction_bytes(&cf)
.unwrap_or(None)
.unwrap_or(0) as f64)
.log2();
let checker = self.cf_checkers.get_mut(&cf).unwrap();
checker.long_term_pending_bytes.observe(num);
SCHED_PENDING_COMPACTION_BYTES_GAUGE
.with_label_values(&[&cf])
.set((checker.long_term_pending_bytes.get_avg() * RATIO_SCALE_FACTOR as f64) as i64);
// do special check on start, see the comment of the variable definition for detail.
if checker.on_start_pending_bytes {
if num < soft || checker.long_term_pending_bytes.trend() == Trend::Increasing {
// the write is accumulating, still need to throttle
checker.on_start_pending_bytes = false;
} else {
// still on start, should not throttle now
return;
}
}
let pending_compaction_bytes = checker.long_term_pending_bytes.get_avg();
let ignore = if let Some(before) = checker.pending_bytes_before_unsafe_destroy_range {
if pending_compaction_bytes <= before && !self.wait_for_destroy_range_finish {
checker.pending_bytes_before_unsafe_destroy_range = None;
}
true
} else {
false
};
for checker in self.cf_checkers.values() {
if num < checker.long_term_pending_bytes.get_recent() {
return;
}
}
let mut ratio = if pending_compaction_bytes < soft || ignore {
0
} else {
let new_ratio = (pending_compaction_bytes - soft) / (hard - soft);
let old_ratio = self.discard_ratio.load(Ordering::Relaxed);
// Because pending compaction bytes changes up and down, so using
// EMA(Exponential Moving Average) to smooth it.
(if old_ratio != 0 {
EMA_FACTOR * (old_ratio as f64 / RATIO_SCALE_FACTOR as f64)
+ (1.0 - EMA_FACTOR) * new_ratio
} else if new_ratio > 0.01 {
0.01
} else {
new_ratio
} * RATIO_SCALE_FACTOR as f64) as u32
};
SCHED_DISCARD_RATIO_GAUGE.set(ratio as i64);
if ratio > RATIO_SCALE_FACTOR {
ratio = RATIO_SCALE_FACTOR;
}
self.discard_ratio.store(ratio, Ordering::Relaxed);
}
fn on_memtable_change(&mut self, cf: &str) {
let num_memtables = self
.engine
.get_cf_num_immutable_mem_table(cf)
.unwrap_or(None)
.unwrap_or(0);
let checker = self.cf_checkers.get_mut(cf).unwrap();
SCHED_MEMTABLE_GAUGE
.with_label_values(&[cf])
.set(num_memtables as i64);
let prev = checker.last_num_memtables.get_recent();
checker.last_num_memtables.observe(num_memtables);
// do special check on start, see the comment of the variable definition for detail.
if checker.on_start_memtable {
if num_memtables < self.memtables_threshold
|| checker.last_num_memtables.trend() == Trend::Increasing
{
// the write is accumulating, still need to throttle
checker.on_start_memtable = false;
} else {
// still on start, should not throttle now
return;
}
}
for c in self.cf_checkers.values() {
if num_memtables < c.last_num_memtables.get_recent() {
return;
}
}
let checker = self.cf_checkers.get_mut(cf).unwrap();
let is_throttled = self.limiter.speed_limit() != f64::INFINITY;
let should_throttle =
checker.last_num_memtables.get_avg() > self.memtables_threshold as f64;
let throttle = if !is_throttled && should_throttle {
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[cf, "memtable_init"])
.inc();
let x = self.write_flow_recorder.get_percentile_90();
if x == 0 {
f64::INFINITY
} else {
checker.memtable_init_speed = true;
self.throttle_cf = Some(cf.to_string());
x as f64
}
} else if is_throttled && (!should_throttle || num_memtables < self.memtables_threshold) {
// should not throttle memtable
if checker.memtable_init_speed {
checker.memtable_init_speed = false;
f64::INFINITY
} else {
let speed = self.limiter.speed_limit() + checker.memtable_debt * 1024.0 * 1024.0;
checker.memtable_debt = 0.0;
speed
}
} else if is_throttled && should_throttle {
// should throttle
let diff = match num_memtables.cmp(&prev) {
std::cmp::Ordering::Greater => {
checker.memtable_debt += 1.0;
-1.0
}
std::cmp::Ordering::Less => {
checker.memtable_debt -= 1.0;
1.0
}
std::cmp::Ordering::Equal => {
// keep, do nothing
0.0
}
};
self.limiter.speed_limit() + diff * 1024.0 * 1024.0
} else {
f64::INFINITY
};
self.update_speed_limit(throttle);
}
fn collect_l0_consumption_stats(&mut self, cf: &str, l0_bytes: u64) {
let num_l0_files = self
.engine
.get_cf_num_files_at_level(cf, 0)
.unwrap_or(None)
.unwrap_or(0);
let checker = self.cf_checkers.get_mut(cf).unwrap();
checker.last_l0_bytes += l0_bytes;
checker.long_term_num_l0_files.observe(num_l0_files);
SCHED_L0_GAUGE
.with_label_values(&[cf])
.set(num_l0_files as i64);
SCHED_L0_AVG_GAUGE
.with_label_values(&[cf])
.set(checker.long_term_num_l0_files.get_avg() as i64);
}
fn collect_l0_production_stats(&mut self, cf: &str, flush_bytes: u64) {
let num_l0_files = self
.engine
.get_cf_num_files_at_level(cf, 0)
.unwrap_or(None)
.unwrap_or(0);
let checker = self.cf_checkers.get_mut(cf).unwrap();
checker.last_flush_bytes += flush_bytes;
checker.long_term_num_l0_files.observe(num_l0_files);
SCHED_L0_GAUGE
.with_label_values(&[cf])
.set(num_l0_files as i64);
SCHED_L0_AVG_GAUGE
.with_label_values(&[cf])
.set(checker.long_term_num_l0_files.get_avg() as i64);
if checker.last_flush_bytes_time.saturating_elapsed_secs() > 5.0 {
// update flush flow
let flush_flow = checker.last_flush_bytes as f64
/ checker.last_flush_bytes_time.saturating_elapsed_secs();
checker
.short_term_l0_production_flow
.observe(flush_flow as u64);
SCHED_FLUSH_FLOW_GAUGE
.with_label_values(&[cf])
.set(checker.short_term_l0_production_flow.get_avg() as i64);
// update l0 flow
if checker.last_l0_bytes != 0 {
let l0_flow = checker.last_l0_bytes as f64
/ checker.last_l0_bytes_time.saturating_elapsed_secs();
checker.last_l0_bytes_time = Instant::now_coarse();
checker
.short_term_l0_consumption_flow
.observe(l0_flow as u64);
SCHED_L0_FLOW_GAUGE
.with_label_values(&[cf])
.set(checker.short_term_l0_consumption_flow.get_avg() as i64);
}
checker.last_flush_bytes_time = Instant::now_coarse();
checker.last_l0_bytes = 0;
checker.last_flush_bytes = 0;
}
}
// Check the number of l0 files to decide whether need to adjust target flow
fn on_l0_change(&mut self, cf: String) {
let checker = self.cf_checkers.get_mut(&cf).unwrap();
let num_l0_files = checker.long_term_num_l0_files.get_recent();
// do special check on start, see the comment of the variable definition for detail.
if checker.on_start_l0_files {
if num_l0_files < self.l0_files_threshold
|| checker.long_term_num_l0_files.trend() == Trend::Increasing
{
// the write is accumulating, still need to throttle
checker.on_start_l0_files = false;
} else {
// still on start, should not throttle now
return;
}
}
if let Some(throttle_cf) = self.throttle_cf.as_ref() {
if &cf != throttle_cf {
// to avoid throttle cf changes back and forth, only change it
// when the other is much higher.
if num_l0_files
> self.cf_checkers[throttle_cf]
.long_term_num_l0_files
.get_max()
+ 4
{
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[&cf, "change_throttle_cf"])
.inc();
self.throttle_cf = Some(cf.clone());
} else {
return;
}
}
}
let checker = self.cf_checkers.get_mut(&cf).unwrap();
if checker.memtable_init_speed {
return;
}
let is_throttled = self.limiter.speed_limit() != f64::INFINITY;
let should_throttle = checker.long_term_num_l0_files.get_recent() > self.l0_files_threshold;
let throttle = if !is_throttled && should_throttle {
SCHED_THROTTLE_ACTION_COUNTER
.with_label_values(&[&cf, "init"])
.inc();
self.throttle_cf = Some(cf.clone());
let x = if self.last_speed < f64::EPSILON {
self.write_flow_recorder.get_percentile_90() as f64
} else {
self.last_speed
};
if x < f64::EPSILON { f64::INFINITY } else { x }
} else if is_throttled && should_throttle {
self.limiter.speed_limit() * K_INC_SLOWDOWN_RATIO
} else if is_throttled && !should_throttle {
self.last_speed = self.limiter.speed_limit() * K_DEC_SLOWDOWN_RATIO;
f64::INFINITY
} else {
f64::INFINITY
};
self.update_speed_limit(throttle)
}
fn update_speed_limit(&mut self, mut throttle: f64) {
if throttle < MIN_THROTTLE_SPEED {
throttle = MIN_THROTTLE_SPEED;
}
if throttle > MAX_THROTTLE_SPEED {
self.throttle_cf = None;
throttle = f64::INFINITY;
}
SCHED_THROTTLE_FLOW_GAUGE.set(if throttle == f64::INFINITY {
0
} else {
throttle as i64
});
self.limiter.set_speed_limit(throttle)
}
}
#[cfg(test)]
mod tests {
use super::*;
use engine_traits::Result;
use std::sync::atomic::AtomicU64;
#[derive(Clone)]
struct EngineStub(Arc<EngineStubInner>);
struct EngineStubInner {
pub pending_compaction_bytes: AtomicU64,
pub num_l0_files: AtomicU64,
pub num_memtable_files: AtomicU64,
}
impl EngineStub {
fn new() -> Self {
Self(Arc::new(EngineStubInner {
pending_compaction_bytes: AtomicU64::new(0),
num_l0_files: AtomicU64::new(0),
num_memtable_files: AtomicU64::new(0),
}))
}
}
impl CFNamesExt for EngineStub {
fn cf_names(&self) -> Vec<&str> {
vec!["default"]
}
}
impl FlowControlFactorsExt for EngineStub {
fn get_cf_num_files_at_level(&self, _cf: &str, _level: usize) -> Result<Option<u64>> {
Ok(Some(self.0.num_l0_files.load(Ordering::Relaxed)))
}
fn get_cf_num_immutable_mem_table(&self, _cf: &str) -> Result<Option<u64>> {
Ok(Some(self.0.num_memtable_files.load(Ordering::Relaxed)))
}
fn get_cf_pending_compaction_bytes(&self, _cf: &str) -> Result<Option<u64>> {
Ok(Some(
self.0.pending_compaction_bytes.load(Ordering::Relaxed),
))
}
}
#[test]
fn test_flow_controller_basic() {
let stub = EngineStub::new();
let (_tx, rx) = mpsc::channel();
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub, rx);
// enable flow controller
assert_eq!(flow_controller.enabled(), true);
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
assert_eq!(flow_controller.consume(0), Duration::ZERO);
assert_eq!(flow_controller.consume(1000), Duration::ZERO);
// disable flow controller
flow_controller.enable(false);
assert_eq!(flow_controller.enabled(), false);
// re-enable flow controller
flow_controller.enable(true);
assert_eq!(flow_controller.enabled(), true);
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
assert_eq!(flow_controller.consume(1), Duration::ZERO);
}
#[test]
fn test_flow_controller_memtable() {
let stub = EngineStub::new();
let (tx, rx) = mpsc::sync_channel(0);
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub.clone(), rx);
assert_eq!(flow_controller.consume(2000), Duration::ZERO);
loop {
if flow_controller.total_bytes_consumed() == 0 {
break;
}
std::thread::sleep(TICK_DURATION);
}
// exceeds the threshold on start
stub.0.num_memtable_files.store(8, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
// on start check forbids flow control
assert_eq!(flow_controller.is_unlimited(), true);
// once falls below the threshold, pass the on start check
stub.0.num_memtable_files.store(1, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
// not throttle when the average of the sliding window doesn't exceeds the threshold
stub.0.num_memtable_files.store(6, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
// the average of sliding window exceeds the threshold
stub.0.num_memtable_files.store(6, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), false);
assert_ne!(flow_controller.consume(2000), Duration::ZERO);
// not throttle once the number of memtables falls below the threshold
stub.0.num_memtable_files.store(1, Ordering::Relaxed);
tx.send(FlowInfo::Flush("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), true);
}
#[test]
fn test_flow_controller_l0() {
let stub = EngineStub::new();
let (tx, rx) = mpsc::sync_channel(0);
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub.clone(), rx);
assert_eq!(flow_controller.consume(2000), Duration::ZERO);
loop {
if flow_controller.total_bytes_consumed() == 0 {
break;
}
std::thread::sleep(TICK_DURATION);
}
// exceeds the threshold
stub.0.num_l0_files.store(30, Ordering::Relaxed);
tx.send(FlowInfo::L0("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
// on start check forbids flow control
assert_eq!(flow_controller.is_unlimited(), true);
// once fall below the threshold, pass the on start check
stub.0.num_l0_files.store(10, Ordering::Relaxed);
tx.send(FlowInfo::L0("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
// exceeds the threshold, throttle now
stub.0.num_l0_files.store(30, Ordering::Relaxed);
tx.send(FlowInfo::L0("default".to_string(), 0)).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert_eq!(flow_controller.should_drop(), false);
assert_eq!(flow_controller.is_unlimited(), false);
assert_ne!(flow_controller.consume(2000), Duration::ZERO);
}
#[test]
fn test_flow_controller_pending_compaction_bytes() {
let stub = EngineStub::new();
let (tx, rx) = mpsc::sync_channel(0);
let flow_controller = FlowController::new(&FlowControlConfig::default(), stub.clone(), rx);
// exceeds the threshold
stub.0
.pending_compaction_bytes
.store(1000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
// on start check forbids flow control
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// once fall below the threshold, pass the on start check
stub.0
.pending_compaction_bytes
.store(100 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
stub.0
.pending_compaction_bytes
.store(1000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() > f64::EPSILON);
stub.0
.pending_compaction_bytes
.store(1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// pending compaction bytes jump after unsafe destroy range
tx.send(FlowInfo::BeforeUnsafeDestroyRange).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// during unsafe destroy range, pending compaction bytes may change
stub.0
.pending_compaction_bytes
.store(1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
stub.0
.pending_compaction_bytes
.store(10000000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::AfterUnsafeDestroyRange).unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
// unfreeze the control
stub.0
.pending_compaction_bytes
.store(1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() < f64::EPSILON);
stub.0
.pending_compaction_bytes
.store(1000000000 * 1024 * 1024 * 1024, Ordering::Relaxed);
tx.send(FlowInfo::Compaction("default".to_string()))
.unwrap();
tx.send(FlowInfo::L0Intra("default".to_string(), 0))
.unwrap();
assert!(flow_controller.discard_ratio() > f64::EPSILON);
}
#[test]
fn test_smoother() {
let mut smoother = Smoother::<u64, 5, SMOOTHER_STALE_RECORD_THRESHOLD, 0>::default();
smoother.observe(1);
smoother.observe(6);
smoother.observe(2);
smoother.observe(3);
smoother.observe(4);
smoother.observe(5);
smoother.observe(0);
assert!((smoother.get_avg() - 2.8).abs() < f64::EPSILON);
assert_eq!(smoother.get_recent(), 0);
assert_eq!(smoother.get_max(), 5);
assert_eq!(smoother.get_percentile_90(), 4);
assert_eq!(smoother.trend(), Trend::NoTrend);
let mut smoother = Smoother::<f64, 5, SMOOTHER_STALE_RECORD_THRESHOLD, 0>::default();
smoother.observe(1.0);
smoother.observe(6.0);
smoother.observe(2.0);
smoother.observe(3.0);
smoother.observe(4.0);
smoother.observe(5.0);
smoother.observe(9.0);
assert!((smoother.get_avg() - 4.6).abs() < f64::EPSILON);
assert!((smoother.get_recent() - 9.0).abs() < f64::EPSILON);
assert!((smoother.get_max() - 9.0).abs() < f64::EPSILON);
assert!((smoother.get_percentile_90() - 5.0).abs() < f64::EPSILON);
assert_eq!(smoother.trend(), Trend::Increasing);
}
#[test]
fn test_smoother_trend() {
// The time range is not enough
let mut smoother = Smoother::<
u64,
6,
SMOOTHER_STALE_RECORD_THRESHOLD,
SMOOTHER_TIME_RANGE_THRESHOLD,
>::default();
let now = Instant::now_coarse();
smoother.observe_with_time(
1,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD - 1)),
);
smoother.observe_with_time(
1,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD - 2)),
);
smoother.observe_with_time(
1,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD - 3)),
);
smoother.observe_with_time(4, now.sub(Duration::from_secs(2)));
smoother.observe_with_time(4, now.sub(Duration::from_secs(1)));
smoother.observe_with_time(4, now);
assert_eq!(smoother.trend(), Trend::NoTrend);
// Incresing trend, the left range contains 3 records, the right range contains 1 records.
let mut smoother = Smoother::<
f64,
6,
SMOOTHER_STALE_RECORD_THRESHOLD,
SMOOTHER_TIME_RANGE_THRESHOLD,
>::default();
smoother.observe_with_time(
1.0,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD + 1)),
);
smoother.observe_with_time(
1.0,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD)),
);
smoother.observe_with_time(
1.0,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD - 1)),
);
smoother.observe_with_time(4.0, now);
assert_eq!(smoother.trend(), Trend::Increasing);
// Decreasing trend, the left range contains 1 records, the right range contains 3 records.
let mut smoother = Smoother::<
f32,
6,
SMOOTHER_STALE_RECORD_THRESHOLD,
SMOOTHER_TIME_RANGE_THRESHOLD,
>::default();
smoother.observe_with_time(
4.0,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD + 1)),
);
smoother.observe_with_time(1.0, now.sub(Duration::from_secs(2)));
smoother.observe_with_time(2.0, now.sub(Duration::from_secs(1)));
smoother.observe_with_time(1.0, now);
assert_eq!(smoother.trend(), Trend::Decreasing);
// No trend, the left range contains 1 records, the right range contains 3 records.
let mut smoother = Smoother::<
f32,
6,
SMOOTHER_STALE_RECORD_THRESHOLD,
SMOOTHER_TIME_RANGE_THRESHOLD,
>::default();
smoother.observe_with_time(
1.0,
now.sub(Duration::from_secs(SMOOTHER_TIME_RANGE_THRESHOLD + 1)),
);
smoother.observe_with_time(1.0, now.sub(Duration::from_secs(2)));
smoother.observe_with_time(3.0, now.sub(Duration::from_secs(1)));
smoother.observe_with_time(2.0, now);
assert_eq!(smoother.trend(), Trend::NoTrend);
// No trend, because the latest record is too old
let mut smoother = Smoother::<
u32,
6,
SMOOTHER_STALE_RECORD_THRESHOLD,
SMOOTHER_TIME_RANGE_THRESHOLD,
>::default();
smoother.observe_with_time(
1,
now.sub(Duration::from_secs(SMOOTHER_STALE_RECORD_THRESHOLD + 1)),
);
assert_eq!(smoother.trend(), Trend::NoTrend);
}
}
|
use instructions::tlb;
use registers::control::Cr3;
use structures::paging::page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags};
use structures::paging::{NotGiantPageSize, Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB};
use ux::u9;
use VirtAddr;
/// This type represents a page whose mapping has changed in the page table.
///
/// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs
/// to be flushed from the TLB before it's accessed. This type is returned from function that
/// change the mapping of a page to ensure that the TLB flush is not forgotten.
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlush<S: PageSize>(Page<S>);
impl<S: PageSize> MapperFlush<S> {
/// Create a new flush promise
fn new(page: Page<S>) -> Self {
MapperFlush(page)
}
/// Flush the page from the TLB to ensure that the newest mapping is used.
pub fn flush(self) {
tlb::flush(self.0.start_address());
}
/// Don't flush the TLB and silence the “must be used” warning.
pub fn ignore(self) {}
}
/// A trait for common page table operations.
pub trait Mapper<S: PageSize> {
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
fn map_to<A>(
&mut self,
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
allocator: A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FnMut() -> Option<PhysFrame>;
/// Removes a mapping from the page table.
///
/// If this function is successful, it deallocates the mapped frame via the passed `allocator`.
fn unmap<A>(&mut self, page: Page<S>, allocator: A) -> Result<MapperFlush<S>, UnmapError>
where
A: FnOnce(PhysFrame<S>);
/// Updates the flags of an existing mapping.
fn update_flags(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlush<S>, FlagUpdateError>;
/// Return the frame that the specified page is mapped to.
fn translate_page(&self, page: Page<S>) -> Option<PhysFrame<S>>;
}
pub struct RecursivePageTable<'a> {
p4: &'a mut PageTable,
recursive_index: u9,
}
/// An error indicating that the passed page table is not recursively mapped.
///
/// Returned from `RecursivePageTable::new`.
#[derive(Debug)]
pub struct NotRecursivelyMapped;
#[derive(Debug)]
pub enum MapToError {
FrameAllocationFailed,
EntryWithInvalidFlagsPresent,
PageAlreadyInUse,
}
#[derive(Debug)]
pub enum UnmapError {
EntryWithInvalidFlagsPresent(PageTableFlags),
PageNotMapped,
InvalidFrameAddressInPageTable,
}
#[derive(Debug)]
pub enum FlagUpdateError {
PageNotMapped,
}
impl<'a> RecursivePageTable<'a> {
/// Creates a new RecursivePageTable from the passed level 4 PageTable.
///
/// The page table must be recursively mapped, that means:
///
/// - The page table must have one recursive entry, i.e. an entry that points to the table
/// itself.
/// - The reference must use that “loop”, i.e. be of the form `0o_xxx_xxx_xxx_xxx_0000`
/// where `xxx` is the recursive entry.
/// - The page table must be active, i.e. the CR3 register must contain its physical address.
///
/// Otherwise `Err(NotRecursivelyMapped)` is returned.
pub fn new(table: &'a mut PageTable) -> Result<Self, NotRecursivelyMapped> {
let page = Page::containing_address(VirtAddr::new(table as *const _ as u64));
let recursive_index = page.p4_index();
if page.p3_index() != recursive_index
|| page.p2_index() != recursive_index
|| page.p1_index() != recursive_index
{
return Err(NotRecursivelyMapped);
}
if Ok(Cr3::read().0) != table[recursive_index].frame() {
return Err(NotRecursivelyMapped);
}
Ok(RecursivePageTable {
p4: table,
recursive_index,
})
}
/// Creates a new RecursivePageTable without performing any checks.
///
/// The `recursive_index` parameter must be the index of the recursively mapped entry.
pub unsafe fn new_unchecked(table: &'a mut PageTable, recursive_index: u9) -> Self {
RecursivePageTable {
p4: table,
recursive_index,
}
}
pub fn identity_map<A, S>(
&mut self,
frame: PhysFrame<S>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
S: PageSize,
Self: Mapper<S>,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, allocator)
}
fn create_next_table<'b, A>(
entry: &'b mut PageTableEntry,
next_table_page: Page,
mut allocator: A,
) -> Result<&'b mut PageTable, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
use structures::paging::PageTableFlags as Flags;
let created;
if entry.is_unused() {
if let Some(frame) = allocator() {
entry.set_frame(frame, Flags::PRESENT | Flags::WRITABLE);
created = true;
} else {
return Err(MapToError::FrameAllocationFailed);
}
} else {
created = false;
}
if entry.flags().contains(Flags::HUGE_PAGE) {
return Err(MapToError::EntryWithInvalidFlagsPresent);
}
let page_table_ptr = next_table_page.start_address().as_mut_ptr();
let page_table: &mut PageTable = unsafe { &mut *(page_table_ptr) };
if created {
page_table.zero();
}
Ok(page_table)
}
}
impl<'a> Mapper<Size1GiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size1GiB>,
frame: PhysFrame<Size1GiB>,
flags: PageTableFlags,
mut allocator: A,
) -> Result<MapperFlush<Size1GiB>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = Self::create_next_table(&mut p4[page.p4_index()], p3_page, &mut allocator)?;
if !p3[page.p3_index()].is_unused() {
return Err(MapToError::PageAlreadyInUse);
}
p3[page.p3_index()].set_addr(frame.start_address(), flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn unmap<A>(
&mut self,
page: Page<Size1GiB>,
allocator: A,
) -> Result<MapperFlush<Size1GiB>, UnmapError>
where
A: FnOnce(PhysFrame<Size1GiB>),
{
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p4_entry.flags()),
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &mut p3[page.p3_index()];
let flags = p3_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()));
}
let frame = PhysFrame::from_start_address(p3_entry.addr())
.map_err(|()| UnmapError::InvalidFrameAddressInPageTable)?;
allocator(frame);
p3_entry.set_unused();
Ok(MapperFlush::new(page))
}
fn update_flags(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size1GiB>, FlagUpdateError> {
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p3[page.p3_index()].set_flags(flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size1GiB>) -> Option<PhysFrame<Size1GiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p3_entry.addr()).ok()
}
}
impl<'a> Mapper<Size2MiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size2MiB>,
frame: PhysFrame<Size2MiB>,
flags: PageTableFlags,
mut allocator: A,
) -> Result<MapperFlush<Size2MiB>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = Self::create_next_table(&mut p4[page.p4_index()], p3_page, &mut allocator)?;
let p2_page = p2_page(page, self.recursive_index);
let p2 = Self::create_next_table(&mut p3[page.p3_index()], p2_page, &mut allocator)?;
if !p2[page.p2_index()].is_unused() {
return Err(MapToError::PageAlreadyInUse);
}
p2[page.p2_index()].set_addr(frame.start_address(), flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn unmap<A>(
&mut self,
page: Page<Size2MiB>,
allocator: A,
) -> Result<MapperFlush<Size2MiB>, UnmapError>
where
A: FnOnce(PhysFrame<Size2MiB>),
{
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p4_entry.flags()),
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &mut p2[page.p2_index()];
let flags = p2_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::EntryWithInvalidFlagsPresent(p2_entry.flags()));
}
let frame = PhysFrame::from_start_address(p2_entry.addr())
.map_err(|()| UnmapError::InvalidFrameAddressInPageTable)?;
allocator(frame);
p2_entry.set_unused();
Ok(MapperFlush::new(page))
}
fn update_flags(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size2MiB>, FlagUpdateError> {
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p2[page.p2_index()].set_flags(flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size2MiB>) -> Option<PhysFrame<Size2MiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p2_entry.addr()).ok()
}
}
impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
mut allocator: A,
) -> Result<MapperFlush<Size4KiB>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = Self::create_next_table(&mut p4[page.p4_index()], p3_page, &mut allocator)?;
let p2_page = p2_page(page, self.recursive_index);
let p2 = Self::create_next_table(&mut p3[page.p3_index()], p2_page, &mut allocator)?;
let p1_page = p1_page(page, self.recursive_index);
let p1 = Self::create_next_table(&mut p2[page.p2_index()], p1_page, &mut allocator)?;
if !p1[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyInUse);
}
p1[page.p1_index()].set_frame(frame, flags);
Ok(MapperFlush::new(page))
}
fn unmap<A>(
&mut self,
page: Page<Size4KiB>,
allocator: A,
) -> Result<MapperFlush<Size4KiB>, UnmapError>
where
A: FnOnce(PhysFrame<Size4KiB>),
{
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p4_entry.flags()),
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1_entry = &mut p1[page.p1_index()];
let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
allocator(frame);
p1_entry.set_unused();
Ok(MapperFlush::new(page))
}
fn update_flags(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
if p1[page.p1_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p1[page.p1_index()].set_flags(flags);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size4KiB>) -> Option<PhysFrame<Size4KiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
let p1 = unsafe { &*(p1_ptr(page, self.recursive_index)) };
let p1_entry = &p1[page.p1_index()];
if p1_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p1_entry.addr()).ok()
}
}
fn p3_ptr<S: PageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p3_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p3_page<S: PageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
recursive_index,
page.p4_index(),
)
}
fn p2_ptr<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p2_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p2_page<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
page.p4_index(),
page.p3_index(),
)
}
fn p1_ptr(page: Page<Size4KiB>, recursive_index: u9) -> *mut PageTable {
p1_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p1_page(page: Page<Size4KiB>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
page.p4_index(),
page.p3_index(),
page.p2_index(),
)
}
Move identity_map to Mapper trait
use instructions::tlb;
use registers::control::Cr3;
use structures::paging::page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags};
use structures::paging::{NotGiantPageSize, Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB};
use ux::u9;
use VirtAddr;
/// This type represents a page whose mapping has changed in the page table.
///
/// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs
/// to be flushed from the TLB before it's accessed. This type is returned from function that
/// change the mapping of a page to ensure that the TLB flush is not forgotten.
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlush<S: PageSize>(Page<S>);
impl<S: PageSize> MapperFlush<S> {
/// Create a new flush promise
fn new(page: Page<S>) -> Self {
MapperFlush(page)
}
/// Flush the page from the TLB to ensure that the newest mapping is used.
pub fn flush(self) {
tlb::flush(self.0.start_address());
}
/// Don't flush the TLB and silence the “must be used” warning.
pub fn ignore(self) {}
}
/// A trait for common page table operations.
pub trait Mapper<S: PageSize> {
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
fn map_to<A>(
&mut self,
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
allocator: A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FnMut() -> Option<PhysFrame>;
/// Removes a mapping from the page table.
///
/// If this function is successful, it deallocates the mapped frame via the passed `allocator`.
fn unmap<A>(&mut self, page: Page<S>, allocator: A) -> Result<MapperFlush<S>, UnmapError>
where
A: FnOnce(PhysFrame<S>);
/// Updates the flags of an existing mapping.
fn update_flags(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlush<S>, FlagUpdateError>;
/// Return the frame that the specified page is mapped to.
fn translate_page(&self, page: Page<S>) -> Option<PhysFrame<S>>;
/// Maps the given frame to the virtual page with the same address.
fn identity_map<A>(
&mut self,
frame: PhysFrame<S>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
S: PageSize,
Self: Mapper<S>,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, allocator)
}
}
pub struct RecursivePageTable<'a> {
p4: &'a mut PageTable,
recursive_index: u9,
}
/// An error indicating that the passed page table is not recursively mapped.
///
/// Returned from `RecursivePageTable::new`.
#[derive(Debug)]
pub struct NotRecursivelyMapped;
#[derive(Debug)]
pub enum MapToError {
FrameAllocationFailed,
EntryWithInvalidFlagsPresent,
PageAlreadyInUse,
}
#[derive(Debug)]
pub enum UnmapError {
EntryWithInvalidFlagsPresent(PageTableFlags),
PageNotMapped,
InvalidFrameAddressInPageTable,
}
#[derive(Debug)]
pub enum FlagUpdateError {
PageNotMapped,
}
impl<'a> RecursivePageTable<'a> {
/// Creates a new RecursivePageTable from the passed level 4 PageTable.
///
/// The page table must be recursively mapped, that means:
///
/// - The page table must have one recursive entry, i.e. an entry that points to the table
/// itself.
/// - The reference must use that “loop”, i.e. be of the form `0o_xxx_xxx_xxx_xxx_0000`
/// where `xxx` is the recursive entry.
/// - The page table must be active, i.e. the CR3 register must contain its physical address.
///
/// Otherwise `Err(NotRecursivelyMapped)` is returned.
pub fn new(table: &'a mut PageTable) -> Result<Self, NotRecursivelyMapped> {
let page = Page::containing_address(VirtAddr::new(table as *const _ as u64));
let recursive_index = page.p4_index();
if page.p3_index() != recursive_index
|| page.p2_index() != recursive_index
|| page.p1_index() != recursive_index
{
return Err(NotRecursivelyMapped);
}
if Ok(Cr3::read().0) != table[recursive_index].frame() {
return Err(NotRecursivelyMapped);
}
Ok(RecursivePageTable {
p4: table,
recursive_index,
})
}
/// Creates a new RecursivePageTable without performing any checks.
///
/// The `recursive_index` parameter must be the index of the recursively mapped entry.
pub unsafe fn new_unchecked(table: &'a mut PageTable, recursive_index: u9) -> Self {
RecursivePageTable {
p4: table,
recursive_index,
}
}
fn create_next_table<'b, A>(
entry: &'b mut PageTableEntry,
next_table_page: Page,
mut allocator: A,
) -> Result<&'b mut PageTable, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
use structures::paging::PageTableFlags as Flags;
let created;
if entry.is_unused() {
if let Some(frame) = allocator() {
entry.set_frame(frame, Flags::PRESENT | Flags::WRITABLE);
created = true;
} else {
return Err(MapToError::FrameAllocationFailed);
}
} else {
created = false;
}
if entry.flags().contains(Flags::HUGE_PAGE) {
return Err(MapToError::EntryWithInvalidFlagsPresent);
}
let page_table_ptr = next_table_page.start_address().as_mut_ptr();
let page_table: &mut PageTable = unsafe { &mut *(page_table_ptr) };
if created {
page_table.zero();
}
Ok(page_table)
}
}
impl<'a> Mapper<Size1GiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size1GiB>,
frame: PhysFrame<Size1GiB>,
flags: PageTableFlags,
mut allocator: A,
) -> Result<MapperFlush<Size1GiB>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = Self::create_next_table(&mut p4[page.p4_index()], p3_page, &mut allocator)?;
if !p3[page.p3_index()].is_unused() {
return Err(MapToError::PageAlreadyInUse);
}
p3[page.p3_index()].set_addr(frame.start_address(), flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn unmap<A>(
&mut self,
page: Page<Size1GiB>,
allocator: A,
) -> Result<MapperFlush<Size1GiB>, UnmapError>
where
A: FnOnce(PhysFrame<Size1GiB>),
{
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p4_entry.flags()),
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &mut p3[page.p3_index()];
let flags = p3_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()));
}
let frame = PhysFrame::from_start_address(p3_entry.addr())
.map_err(|()| UnmapError::InvalidFrameAddressInPageTable)?;
allocator(frame);
p3_entry.set_unused();
Ok(MapperFlush::new(page))
}
fn update_flags(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size1GiB>, FlagUpdateError> {
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p3[page.p3_index()].set_flags(flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size1GiB>) -> Option<PhysFrame<Size1GiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p3_entry.addr()).ok()
}
}
impl<'a> Mapper<Size2MiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size2MiB>,
frame: PhysFrame<Size2MiB>,
flags: PageTableFlags,
mut allocator: A,
) -> Result<MapperFlush<Size2MiB>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = Self::create_next_table(&mut p4[page.p4_index()], p3_page, &mut allocator)?;
let p2_page = p2_page(page, self.recursive_index);
let p2 = Self::create_next_table(&mut p3[page.p3_index()], p2_page, &mut allocator)?;
if !p2[page.p2_index()].is_unused() {
return Err(MapToError::PageAlreadyInUse);
}
p2[page.p2_index()].set_addr(frame.start_address(), flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn unmap<A>(
&mut self,
page: Page<Size2MiB>,
allocator: A,
) -> Result<MapperFlush<Size2MiB>, UnmapError>
where
A: FnOnce(PhysFrame<Size2MiB>),
{
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p4_entry.flags()),
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &mut p2[page.p2_index()];
let flags = p2_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::EntryWithInvalidFlagsPresent(p2_entry.flags()));
}
let frame = PhysFrame::from_start_address(p2_entry.addr())
.map_err(|()| UnmapError::InvalidFrameAddressInPageTable)?;
allocator(frame);
p2_entry.set_unused();
Ok(MapperFlush::new(page))
}
fn update_flags(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size2MiB>, FlagUpdateError> {
use structures::paging::PageTableFlags as Flags;
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p2[page.p2_index()].set_flags(flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size2MiB>) -> Option<PhysFrame<Size2MiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p2_entry.addr()).ok()
}
}
impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
mut allocator: A,
) -> Result<MapperFlush<Size4KiB>, MapToError>
where
A: FnMut() -> Option<PhysFrame>,
{
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = Self::create_next_table(&mut p4[page.p4_index()], p3_page, &mut allocator)?;
let p2_page = p2_page(page, self.recursive_index);
let p2 = Self::create_next_table(&mut p3[page.p3_index()], p2_page, &mut allocator)?;
let p1_page = p1_page(page, self.recursive_index);
let p1 = Self::create_next_table(&mut p2[page.p2_index()], p1_page, &mut allocator)?;
if !p1[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyInUse);
}
p1[page.p1_index()].set_frame(frame, flags);
Ok(MapperFlush::new(page))
}
fn unmap<A>(
&mut self,
page: Page<Size4KiB>,
allocator: A,
) -> Result<MapperFlush<Size4KiB>, UnmapError>
where
A: FnOnce(PhysFrame<Size4KiB>),
{
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p4_entry.flags()),
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1_entry = &mut p1[page.p1_index()];
let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::EntryWithInvalidFlagsPresent(p3_entry.flags()),
})?;
allocator(frame);
p1_entry.set_unused();
Ok(MapperFlush::new(page))
}
fn update_flags(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
if p1[page.p1_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p1[page.p1_index()].set_flags(flags);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size4KiB>) -> Option<PhysFrame<Size4KiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
let p1 = unsafe { &*(p1_ptr(page, self.recursive_index)) };
let p1_entry = &p1[page.p1_index()];
if p1_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p1_entry.addr()).ok()
}
}
fn p3_ptr<S: PageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p3_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p3_page<S: PageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
recursive_index,
page.p4_index(),
)
}
fn p2_ptr<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p2_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p2_page<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
page.p4_index(),
page.p3_index(),
)
}
fn p1_ptr(page: Page<Size4KiB>, recursive_index: u9) -> *mut PageTable {
p1_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p1_page(page: Page<Size4KiB>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
page.p4_index(),
page.p3_index(),
page.p2_index(),
)
}
|
use std::collections::BTreeMap;
use std::io::prelude::*;
use byteorder::{ReadBytesExt, BigEndian};
use super::Value;
use super::super::deserialize::{DecoderError, DecodeResult};
use super::super::marker as m;
pub fn from_reader<'a, R: Read + 'a>(reader: &mut R) -> DecodeResult<Value> {
let mut builder = Builder::new(reader);
builder.build()
}
enum ParserEvent {
Null,
True,
False,
Integer(i64),
Float(f64),
String(usize),
List(usize),
Map(usize),
Struct(u8, usize),
}
use self::ParserEvent as ev;
type ParserEventResult = DecodeResult<ParserEvent>;
pub struct Builder<'a, R: Read + 'a> {
reader: &'a mut R,
stack: Vec<Value>,
}
impl<'a, R: Read + 'a> Builder<'a, R> {
pub fn new(reader: &'a mut R) -> Self {
Builder {
reader: reader,
stack: Vec::new(),
}
}
pub fn build(&mut self) -> DecodeResult<Value> {
try!(self.parse());
Ok(self.stack.pop().unwrap_or(Value::Null))
}
pub fn parse(&mut self) -> DecodeResult<()> {
let mut buf = [0u8; 1];
let bytes_read = try!(self.reader.read(&mut buf));
if bytes_read == 0 {
return Ok(())
}
match self.read_next(buf[0]) {
Ok(e) => match e {
ev::Null => self.stack.push(Value::Null),
ev::True => self.stack.push(Value::Boolean(true)),
ev::False => self.stack.push(Value::Boolean(false)),
ev::Integer(v) => self.stack.push(Value::Integer(v)),
ev::Float(v) => self.stack.push(Value::Float(v)),
ev::String(size) => {
let value = try!(self.read_string(size));
self.stack.push(Value::String(value));
},
ev::List(size) => {
let values = {
let mut values = vec![];
for _ in 0..size {
// println!("{}", i);
try!(self.parse());
match self.stack.pop() {
Some(v) => values.push(v),
_ => return Err(DecoderError::UnexpectedEOF)
}
}
values
};
self.stack.push(Value::List(values));
},
ev::Map(size) => {
let size = size * 2;
let values = {
let mut cur_key: String = String::new();
let mut values: BTreeMap<String, Value> = BTreeMap::new();
for i in 1..(size + 1) {
try!(self.parse());
match self.stack.pop() {
Some(Value::String(ref k)) if i % 2 != 0 => cur_key = k.to_owned(),
Some(ref v) if i % 2 != 0 => return Err(DecoderError::UnexpectedInput(
"Map key".to_owned(), format!("{:?}", v)
)),
Some(v) => {
if cur_key.is_empty() {
return Err(DecoderError::UnexpectedInput(
"Map key".to_owned(), "None".to_owned()
))
}
values.insert(cur_key.clone(), v);
},
_ => return Err(DecoderError::UnexpectedEOF),
}
}
values
};
self.stack.push(Value::Map(values));
},
ev::Struct(s, size) => {
let values = {
let mut values = vec![];
for _ in 0..size {
try!(self.parse());
match self.stack.pop() {
Some(v) => values.push(v),
_ => return Err(DecoderError::UnexpectedEOF)
}
}
values
};
self.stack.push(Value::Structure(s, values));
},
},
Err(e) => return Err(From::from(e))
};
Ok(())
}
fn read_next(&mut self, marker: u8) -> ParserEventResult {
match marker {
m::NULL => Ok(ev::Null),
m::TRUE => Ok(ev::True),
m::FALSE => Ok(ev::False),
v @ 0x00...0x7F => Ok(ev::Integer(v as i64)),
v @ 0xF0...0xFF => Ok(ev::Integer(((v | 0b1111_0000) as i8) as i64)),
m::INT_8 => self.read_int(8),
m::INT_16 => self.read_int(16),
m::INT_32 => self.read_int(32),
m::INT_64 => self.read_int(64),
m::FLOAT => self.reader.read_f64::<BigEndian>().map(
|v| ev::Float(v)).map_err(From::from),
v @ 0x80...0x8F => Ok(ev::String((v & 0b0000_1111) as usize)),
m::STRING_8 => self.read_len(8).map(|v| ev::String(v)),
m::STRING_16 => self.read_len(16).map(|v| ev::String(v)),
m::STRING_32 => self.read_len(32).map(|v| ev::String(v)),
v @ 0x90...0x9F => Ok(ev::List((v & 0b0000_1111) as usize)),
m::LIST_8 => self.read_len(8).map(|v| ev::List(v)),
m::LIST_16 => self.read_len(16).map(|v| ev::List(v)),
m::LIST_32 => self.read_len(32).map(|v| ev::List(v)),
v @ 0xA0...0xAF => Ok(ev::Map((v & 0b0000_1111) as usize)),
m::MAP_8 => self.read_len(8).map(|v| ev::Map(v)),
m::MAP_16 => self.read_len(16).map(|v| ev::Map(v)),
m::MAP_32 => self.read_len(32).map(|v| ev::Map(v)),
v @ 0xB0...0xBF => self.reader.read_u8().map(
|s| ev::Struct(s, (v & 0b0000_1111) as usize)).map_err(From::from),
m::STRUCT_8 => self.read_len(8)
.map_err(From::from)
.and_then(|size| self.reader.read_u8()
.map(|sig| ev::Struct(sig, size))
.map_err(From::from)),
m::STRUCT_16 => self.read_len(16)
.map_err(From::from)
.and_then(|size| self.reader.read_u8()
.map(|sig| ev::Struct(sig, size))
.map_err(From::from)),
_ => unreachable!()
}
}
fn read_int(&mut self, size: u8) -> ParserEventResult {
match size {
8 => self.reader.read_i8().map(|v| ev::Integer(v as i64)).map_err(From::from),
16 => self.reader.read_i16::<BigEndian>().map(|v| ev::Integer(v as i64)).map_err(From::from),
32 => self.reader.read_i32::<BigEndian>().map(|v| ev::Integer(v as i64)).map_err(From::from),
64 => self.reader.read_i64::<BigEndian>().map(|v| ev::Integer(v as i64)).map_err(From::from),
_ => unreachable!(),
}
}
fn read_len(&mut self, size: usize) -> DecodeResult<usize> {
match size {
8 => self.reader.read_u8().map(|v| v as usize).map_err(From::from),
16 => self.reader.read_u16::<BigEndian>().map(|v| v as usize).map_err(From::from),
32 => self.reader.read_u32::<BigEndian>().map(|v| v as usize).map_err(From::from),
_ => unreachable!(),
}
}
fn read_string(&mut self, size: usize) -> DecodeResult<String> {
let mut store;
if size < 4096 {
store = vec![0u8; size];
try!(self.reader.read(&mut store));
} else {
store = Vec::with_capacity(size);
let mut buf = [0u8; 4096];
let loops = (size as f32 / 4096.0).floor() as usize;
for _ in 0..loops {
let bytes = try!(self.reader.read(&mut buf));
store.extend(buf[0..bytes].iter());
}
if size % 4096 > 0 {
let mut buf = vec![0u8; size % 4096];
try!(self.reader.read(&mut buf));
store.append(&mut buf);
}
}
String::from_utf8(store).map_err(From::from)
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::string::String;
use std::io::Cursor;
use super::from_reader;
use super::super::Value;
use ::v1::packstream::marker as m;
#[test]
fn decode_nil() {
let mut input = Cursor::new(vec![0xC0]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Null, result);
}
#[test]
fn decode_bool() {
let mut input = Cursor::new(vec![0xC3]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Boolean(true), result);
let mut input = Cursor::new(vec![0xC2]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Boolean(false), result);
}
// Integer 64
#[test]
fn decode_int64_positive() {
let mut input = Cursor::new(vec![m::INT_64, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_POS_INT_64.1), result);
}
#[test]
fn decode_int64_negative() {
let mut input = Cursor::new(vec![m::INT_64, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_64.0), result);
}
// Integer 32
#[test]
fn decode_int32_positive() {
let mut input = Cursor::new(vec![m::INT_32, 0x7F, 0xFF, 0xFF, 0xFF]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_POS_INT_32.1), result);
}
#[test]
fn decode_int32_negative() {
let mut input = Cursor::new(vec![m::INT_32, 0x80, 0x00, 0x00, 0x00]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_32.0), result);
}
// Integer 16
#[test]
fn decode_int16_positive() {
let mut input = Cursor::new(vec![m::INT_16, 0x7F, 0xFF]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_POS_INT_16.1), result);
}
#[test]
fn decode_int16_negative() {
let mut input = Cursor::new(vec![m::INT_16, 0x80, 0x00]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_16.0), result);
}
// Integer 8
#[test]
fn decode_int8_positive() {
let mut input = Cursor::new(vec![0x7F]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_TINY_INT.1), result);
}
#[test]
fn decode_int8_negative() {
let mut input = Cursor::new(vec![m::INT_8, 0x80]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_8.0), result);
let mut input = Cursor::new(vec![0xF0]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_TINY_INT.0), result);
}
#[test]
fn decode_float_positive() {
let mut input = Cursor::new(vec![m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Float(1.1), result);
}
#[test]
fn decode_float_negative() {
let mut input = Cursor::new(vec![m::FLOAT, 0xBF, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Float(-1.1), result);
}
// #[test]
// fn decode_string32() {
// let size = 70_000;
// let mut input = Cursor::new((0..size).fold(
// vec![m::STRING_32, 0x00, 0x01, 0x11, 0x70],
// |mut acc, _| { acc.push(b'A'); acc }
// ));
//
// let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
// let result = from_reader(&mut input).unwrap();
//
// assert_eq!(Value::String(expected), result);
// }
//
// #[test]
// fn decode_string16() {
// let size = 5_000;
// let mut input = Cursor::new((0..size).fold(
// vec![m::STRING_16, 0x13, 0x88],
// |mut acc, _| { acc.push(b'A'); acc }
// ));
//
// let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
// let result = from_reader(&mut input).unwrap();
//
// assert_eq!(Value::String(expected), result);
// }
#[test]
fn decode_string8() {
let size = 100;
let mut input = Cursor::new((0..size).fold(
vec![m::STRING_8, 0x64],
|mut acc, _| { acc.push(b'A'); acc }
));
let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(expected), result);
}
#[test]
fn decode_tiny_string() {
for marker in 0x80..0x8F {
let size = marker - m::TINY_STRING_NIBBLE;
let mut input = Cursor::new((0..size).fold(
vec![marker],
|mut acc, _| { acc.push(b'A'); acc }
));
let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(expected), result);
}
}
#[test]
fn decode_char() {
for c in b'A'..b'Z' {
let mut input = Cursor::new(vec![0x81, c]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(format!("{}", c as char)), result);
}
}
#[test]
fn decode_list32() {
let size = 70_000;
let mut input = Cursor::new((0..size).fold(
vec![m::LIST_32, 0x00, 0x01, 0x11, 0x70],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_list16() {
let size = 5_000;
let mut input = Cursor::new((0..size).fold(
vec![m::LIST_16, 0x13, 0x88],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_list8() {
let size = 200;
let mut input = Cursor::new((0..size).fold(
vec![m::LIST_8, 0xC8],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_tiny_list() {
for marker in 0x90..0x9F {
let size = (marker - m::TINY_LIST_NIBBLE) as usize;
let mut input = Cursor::new((0..size).fold(
vec![marker],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
}
#[test]
fn decode_list_of_string() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()); size]);
assert_eq!(expected, result);
}
#[test]
fn decode_list_of_int() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::INT_16, 0x7D, 0x00,
m::INT_16, 0x7D, 0x00,
m::INT_16, 0x7D, 0x00]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Integer(32_000); size]);
assert_eq!(expected, result);
}
#[test]
fn decode_list_of_float() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Float(1.1); size]);
assert_eq!(expected, result);
}
#[test]
fn decode_list_of_bool() {
let size = 4;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::TRUE, m::FALSE, m::TRUE, m::FALSE]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Boolean(true),
Value::Boolean(false),
Value::Boolean(true),
Value::Boolean(false)]);
assert_eq!(expected, result);
}
#[test]
fn decode_mixed_list() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
0x01,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
m::TINY_STRING_NIBBLE + 1, 0x41]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Integer(1),
Value::Float(1.1),
Value::String("A".to_owned())]);
assert_eq!(expected, result);
}
#[test]
fn decode_map32() {
let size = 70_000;
let mut input = Cursor::new((0..size).fold(
vec![m::MAP_32, 0x00, 0x01, 0x11, 0x70],
|mut acc, i| {
let b1 = 48 + ((i % 100000) / 10000) as u8;
let b2 = 48 + ((i % 10000) / 1000) as u8;
let b3 = 48 + ((i % 1000) / 100) as u8;
let b4 = 48 + ((i % 100) / 10) as u8;
let b5 = 48 + (i % 10) as u8;
acc.extend([0x85, b1, b2, b3, b4, b5, 0x01].iter());
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{:05}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map16() {
let size = 5_000;
let mut input = Cursor::new((0..size).fold(
vec![m::MAP_16, 0x13, 0x88],
|mut acc, i| {
let b1 = 48 + ((i % 10000) / 1000) as u8;
let b2 = 48 + ((i % 1000) / 100) as u8;
let b3 = 48 + ((i % 100) / 10) as u8;
let b4 = 48 + (i % 10) as u8;
acc.extend([0x84, b1, b2, b3, b4, 0x01].iter());
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{:04}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map8() {
let size = 200;
let mut input = Cursor::new((0..size).fold(
vec![m::MAP_8, 0xC8],
|mut acc, i| {
let b1 = 48 + ((i % 1000) / 100) as u8;
let b2 = 48 + ((i % 100) / 10) as u8;
let b3 = 48 + (i % 10) as u8;
acc.extend([0x83, b1, b2, b3, 0x01].iter());
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{:03}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_tiny_map() {
let size = 3;
let mut input = Cursor::new((0..size).fold(
vec![m::TINY_MAP_NIBBLE + size],
|mut acc, i| {
acc.extend([0x81, 0x30 + i].iter());
acc.push(0x01);
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_string() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
0x81, 0x42,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
0x81, 0x43,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()));
expected.insert("B".to_owned(), Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()));
expected.insert("C".to_owned(), Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_int() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41, m::INT_16, 0x7D, 0x00,
0x81, 0x42, m::INT_16, 0x7D, 0x00,
0x81, 0x43, m::INT_16, 0x7D, 0x00]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::Integer(32_000));
expected.insert("B".to_owned(), Value::Integer(32_000));
expected.insert("C".to_owned(), Value::Integer(32_000));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_float() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41, m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
0x81, 0x42, m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
0x81, 0x43, m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::Float(1.1));
expected.insert("B".to_owned(), Value::Float(1.1));
expected.insert("C".to_owned(), Value::Float(1.1));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_bool() {
let size = 4;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41, m::TRUE,
0x81, 0x42, m::FALSE,
0x81, 0x43, m::TRUE,
0x81, 0x44, m::FALSE]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::Boolean(true));
expected.insert("B".to_owned(), Value::Boolean(false));
expected.insert("C".to_owned(), Value::Boolean(true));
expected.insert("D".to_owned(), Value::Boolean(false));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_tiny_structure() {
let mut input = Cursor::new(vec![m::TINY_STRUCT_NIBBLE + 0x03, 0x22,
0x01,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
0x81, 0x41
]);
let expected = Value::Structure(0x22, vec![Value::Integer(1),
Value::Float(1.1),
Value::String("A".to_owned())]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_structure8() {
let size = 16;
let mut input = Cursor::new((0..size).fold(
vec![m::STRUCT_8, 0x10, 0x22],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::Structure(0x22, vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_structure16() {
let size = 256;
let mut input = Cursor::new((0..size).fold(
vec![m::STRUCT_16, 0x01, 0x00, 0x22],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::Structure(0x22, vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
}
add missing string tests
use std::collections::BTreeMap;
use std::io::prelude::*;
use byteorder::{ReadBytesExt, BigEndian};
use super::Value;
use super::super::deserialize::{DecoderError, DecodeResult};
use super::super::marker as m;
pub fn from_reader<'a, R: Read + 'a>(reader: &mut R) -> DecodeResult<Value> {
let mut builder = Builder::new(reader);
builder.build()
}
enum ParserEvent {
Null,
True,
False,
Integer(i64),
Float(f64),
String(usize),
List(usize),
Map(usize),
Struct(u8, usize),
}
use self::ParserEvent as ev;
type ParserEventResult = DecodeResult<ParserEvent>;
pub struct Builder<'a, R: Read + 'a> {
reader: &'a mut R,
stack: Vec<Value>,
}
impl<'a, R: Read + 'a> Builder<'a, R> {
pub fn new(reader: &'a mut R) -> Self {
Builder {
reader: reader,
stack: Vec::new(),
}
}
pub fn build(&mut self) -> DecodeResult<Value> {
try!(self.parse());
Ok(self.stack.pop().unwrap_or(Value::Null))
}
pub fn parse(&mut self) -> DecodeResult<()> {
let mut buf = [0u8; 1];
let bytes_read = try!(self.reader.read(&mut buf));
if bytes_read == 0 {
return Ok(())
}
match self.read_next(buf[0]) {
Ok(e) => match e {
ev::Null => self.stack.push(Value::Null),
ev::True => self.stack.push(Value::Boolean(true)),
ev::False => self.stack.push(Value::Boolean(false)),
ev::Integer(v) => self.stack.push(Value::Integer(v)),
ev::Float(v) => self.stack.push(Value::Float(v)),
ev::String(size) => {
let value = try!(self.read_string(size));
self.stack.push(Value::String(value));
},
ev::List(size) => {
let values = {
let mut values = vec![];
for _ in 0..size {
// println!("{}", i);
try!(self.parse());
match self.stack.pop() {
Some(v) => values.push(v),
_ => return Err(DecoderError::UnexpectedEOF)
}
}
values
};
self.stack.push(Value::List(values));
},
ev::Map(size) => {
let size = size * 2;
let values = {
let mut cur_key: String = String::new();
let mut values: BTreeMap<String, Value> = BTreeMap::new();
for i in 1..(size + 1) {
try!(self.parse());
match self.stack.pop() {
Some(Value::String(ref k)) if i % 2 != 0 => cur_key = k.to_owned(),
Some(ref v) if i % 2 != 0 => return Err(DecoderError::UnexpectedInput(
"Map key".to_owned(), format!("{:?}", v)
)),
Some(v) => {
if cur_key.is_empty() {
return Err(DecoderError::UnexpectedInput(
"Map key".to_owned(), "None".to_owned()
))
}
values.insert(cur_key.clone(), v);
},
_ => return Err(DecoderError::UnexpectedEOF),
}
}
values
};
self.stack.push(Value::Map(values));
},
ev::Struct(s, size) => {
let values = {
let mut values = vec![];
for _ in 0..size {
try!(self.parse());
match self.stack.pop() {
Some(v) => values.push(v),
_ => return Err(DecoderError::UnexpectedEOF)
}
}
values
};
self.stack.push(Value::Structure(s, values));
},
},
Err(e) => return Err(From::from(e))
};
Ok(())
}
fn read_next(&mut self, marker: u8) -> ParserEventResult {
match marker {
m::NULL => Ok(ev::Null),
m::TRUE => Ok(ev::True),
m::FALSE => Ok(ev::False),
v @ 0x00...0x7F => Ok(ev::Integer(v as i64)),
v @ 0xF0...0xFF => Ok(ev::Integer(((v | 0b1111_0000) as i8) as i64)),
m::INT_8 => self.read_int(8),
m::INT_16 => self.read_int(16),
m::INT_32 => self.read_int(32),
m::INT_64 => self.read_int(64),
m::FLOAT => self.reader.read_f64::<BigEndian>().map(
|v| ev::Float(v)).map_err(From::from),
v @ 0x80...0x8F => Ok(ev::String((v & 0b0000_1111) as usize)),
m::STRING_8 => self.read_len(8).map(|v| ev::String(v)),
m::STRING_16 => self.read_len(16).map(|v| ev::String(v)),
m::STRING_32 => self.read_len(32).map(|v| ev::String(v)),
v @ 0x90...0x9F => Ok(ev::List((v & 0b0000_1111) as usize)),
m::LIST_8 => self.read_len(8).map(|v| ev::List(v)),
m::LIST_16 => self.read_len(16).map(|v| ev::List(v)),
m::LIST_32 => self.read_len(32).map(|v| ev::List(v)),
v @ 0xA0...0xAF => Ok(ev::Map((v & 0b0000_1111) as usize)),
m::MAP_8 => self.read_len(8).map(|v| ev::Map(v)),
m::MAP_16 => self.read_len(16).map(|v| ev::Map(v)),
m::MAP_32 => self.read_len(32).map(|v| ev::Map(v)),
v @ 0xB0...0xBF => self.reader.read_u8().map(
|s| ev::Struct(s, (v & 0b0000_1111) as usize)).map_err(From::from),
m::STRUCT_8 => self.read_len(8)
.map_err(From::from)
.and_then(|size| self.reader.read_u8()
.map(|sig| ev::Struct(sig, size))
.map_err(From::from)),
m::STRUCT_16 => self.read_len(16)
.map_err(From::from)
.and_then(|size| self.reader.read_u8()
.map(|sig| ev::Struct(sig, size))
.map_err(From::from)),
_ => unreachable!()
}
}
fn read_int(&mut self, size: u8) -> ParserEventResult {
match size {
8 => self.reader.read_i8().map(|v| ev::Integer(v as i64)).map_err(From::from),
16 => self.reader.read_i16::<BigEndian>().map(|v| ev::Integer(v as i64)).map_err(From::from),
32 => self.reader.read_i32::<BigEndian>().map(|v| ev::Integer(v as i64)).map_err(From::from),
64 => self.reader.read_i64::<BigEndian>().map(|v| ev::Integer(v as i64)).map_err(From::from),
_ => unreachable!(),
}
}
fn read_len(&mut self, size: usize) -> DecodeResult<usize> {
match size {
8 => self.reader.read_u8().map(|v| v as usize).map_err(From::from),
16 => self.reader.read_u16::<BigEndian>().map(|v| v as usize).map_err(From::from),
32 => self.reader.read_u32::<BigEndian>().map(|v| v as usize).map_err(From::from),
_ => unreachable!(),
}
}
fn read_string(&mut self, size: usize) -> DecodeResult<String> {
let mut store;
if size < 4096 {
store = vec![0u8; size];
try!(self.reader.read(&mut store));
} else {
store = Vec::with_capacity(size);
let mut buf = [0u8; 4096];
let loops = (size as f32 / 4096.0).floor() as usize;
for _ in 0..loops {
let bytes = try!(self.reader.read(&mut buf));
store.extend(buf[0..bytes].iter());
}
if size % 4096 > 0 {
let mut buf = vec![0u8; size % 4096];
try!(self.reader.read(&mut buf));
store.append(&mut buf);
}
}
String::from_utf8(store).map_err(From::from)
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::string::String;
use std::io::Cursor;
use super::from_reader;
use super::super::Value;
use ::v1::packstream::marker as m;
#[test]
fn decode_nil() {
let mut input = Cursor::new(vec![0xC0]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Null, result);
}
#[test]
fn decode_bool() {
let mut input = Cursor::new(vec![0xC3]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Boolean(true), result);
let mut input = Cursor::new(vec![0xC2]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Boolean(false), result);
}
// Integer 64
#[test]
fn decode_int64_positive() {
let mut input = Cursor::new(vec![m::INT_64, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_POS_INT_64.1), result);
}
#[test]
fn decode_int64_negative() {
let mut input = Cursor::new(vec![m::INT_64, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_64.0), result);
}
// Integer 32
#[test]
fn decode_int32_positive() {
let mut input = Cursor::new(vec![m::INT_32, 0x7F, 0xFF, 0xFF, 0xFF]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_POS_INT_32.1), result);
}
#[test]
fn decode_int32_negative() {
let mut input = Cursor::new(vec![m::INT_32, 0x80, 0x00, 0x00, 0x00]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_32.0), result);
}
// Integer 16
#[test]
fn decode_int16_positive() {
let mut input = Cursor::new(vec![m::INT_16, 0x7F, 0xFF]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_POS_INT_16.1), result);
}
#[test]
fn decode_int16_negative() {
let mut input = Cursor::new(vec![m::INT_16, 0x80, 0x00]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_16.0), result);
}
// Integer 8
#[test]
fn decode_int8_positive() {
let mut input = Cursor::new(vec![0x7F]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_TINY_INT.1), result);
}
#[test]
fn decode_int8_negative() {
let mut input = Cursor::new(vec![m::INT_8, 0x80]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_NEG_INT_8.0), result);
let mut input = Cursor::new(vec![0xF0]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Integer(m::RANGE_TINY_INT.0), result);
}
#[test]
fn decode_float_positive() {
let mut input = Cursor::new(vec![m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Float(1.1), result);
}
#[test]
fn decode_float_negative() {
let mut input = Cursor::new(vec![m::FLOAT, 0xBF, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::Float(-1.1), result);
}
#[test]
fn decode_string32() {
let size = 70_000;
let mut input = Cursor::new((0..size).fold(
vec![m::STRING_32, 0x00, 0x01, 0x11, 0x70],
|mut acc, _| { acc.push(b'A'); acc }
));
let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(expected), result);
}
#[test]
fn decode_string16() {
let size = 5_000;
let mut input = Cursor::new((0..size).fold(
vec![m::STRING_16, 0x13, 0x88],
|mut acc, _| { acc.push(b'A'); acc }
));
let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(expected), result);
}
#[test]
fn decode_string8() {
let size = 200;
let mut input = Cursor::new((0..size).fold(
vec![m::STRING_8, 0xC8],
|mut acc, _| { acc.push(b'A'); acc }
));
let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(expected), result);
}
#[test]
fn decode_tiny_string() {
for marker in 0x80..0x8F {
let size = marker - m::TINY_STRING_NIBBLE;
let mut input = Cursor::new((0..size).fold(
vec![marker],
|mut acc, _| { acc.push(b'A'); acc }
));
let expected = (0..size).fold(String::new(), |mut acc, _| { acc.push('A'); acc });
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(expected), result);
}
}
#[test]
fn decode_char() {
for c in b'A'..b'Z' {
let mut input = Cursor::new(vec![0x81, c]);
let result = from_reader(&mut input).unwrap();
assert_eq!(Value::String(format!("{}", c as char)), result);
}
}
#[test]
fn decode_list32() {
let size = 70_000;
let mut input = Cursor::new((0..size).fold(
vec![m::LIST_32, 0x00, 0x01, 0x11, 0x70],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_list16() {
let size = 5_000;
let mut input = Cursor::new((0..size).fold(
vec![m::LIST_16, 0x13, 0x88],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_list8() {
let size = 200;
let mut input = Cursor::new((0..size).fold(
vec![m::LIST_8, 0xC8],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_tiny_list() {
for marker in 0x90..0x9F {
let size = (marker - m::TINY_LIST_NIBBLE) as usize;
let mut input = Cursor::new((0..size).fold(
vec![marker],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::List(vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
}
#[test]
fn decode_list_of_string() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()); size]);
assert_eq!(expected, result);
}
#[test]
fn decode_list_of_int() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::INT_16, 0x7D, 0x00,
m::INT_16, 0x7D, 0x00,
m::INT_16, 0x7D, 0x00]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Integer(32_000); size]);
assert_eq!(expected, result);
}
#[test]
fn decode_list_of_float() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Float(1.1); size]);
assert_eq!(expected, result);
}
#[test]
fn decode_list_of_bool() {
let size = 4;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
m::TRUE, m::FALSE, m::TRUE, m::FALSE]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Boolean(true),
Value::Boolean(false),
Value::Boolean(true),
Value::Boolean(false)]);
assert_eq!(expected, result);
}
#[test]
fn decode_mixed_list() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_LIST_NIBBLE + size as u8,
0x01,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
m::TINY_STRING_NIBBLE + 1, 0x41]
);
let result = from_reader(&mut input).unwrap();
let expected = Value::List(vec![Value::Integer(1),
Value::Float(1.1),
Value::String("A".to_owned())]);
assert_eq!(expected, result);
}
#[test]
fn decode_map32() {
let size = 70_000;
let mut input = Cursor::new((0..size).fold(
vec![m::MAP_32, 0x00, 0x01, 0x11, 0x70],
|mut acc, i| {
let b1 = 48 + ((i % 100000) / 10000) as u8;
let b2 = 48 + ((i % 10000) / 1000) as u8;
let b3 = 48 + ((i % 1000) / 100) as u8;
let b4 = 48 + ((i % 100) / 10) as u8;
let b5 = 48 + (i % 10) as u8;
acc.extend([0x85, b1, b2, b3, b4, b5, 0x01].iter());
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{:05}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map16() {
let size = 5_000;
let mut input = Cursor::new((0..size).fold(
vec![m::MAP_16, 0x13, 0x88],
|mut acc, i| {
let b1 = 48 + ((i % 10000) / 1000) as u8;
let b2 = 48 + ((i % 1000) / 100) as u8;
let b3 = 48 + ((i % 100) / 10) as u8;
let b4 = 48 + (i % 10) as u8;
acc.extend([0x84, b1, b2, b3, b4, 0x01].iter());
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{:04}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map8() {
let size = 200;
let mut input = Cursor::new((0..size).fold(
vec![m::MAP_8, 0xC8],
|mut acc, i| {
let b1 = 48 + ((i % 1000) / 100) as u8;
let b2 = 48 + ((i % 100) / 10) as u8;
let b3 = 48 + (i % 10) as u8;
acc.extend([0x83, b1, b2, b3, 0x01].iter());
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{:03}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_tiny_map() {
let size = 3;
let mut input = Cursor::new((0..size).fold(
vec![m::TINY_MAP_NIBBLE + size],
|mut acc, i| {
acc.extend([0x81, 0x30 + i].iter());
acc.push(0x01);
acc
}
));
let expected = Value::Map((0..size).fold(
BTreeMap::<String, Value>::new(),
|mut acc, i| { acc.insert(format!("{}", i), Value::Integer(1)); acc }
));
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_string() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
0x81, 0x42,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A,
0x81, 0x43,
m::STRING_8, 0x1A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()));
expected.insert("B".to_owned(), Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()));
expected.insert("C".to_owned(), Value::String("abcdefghijklmnopqrstuvwxyz".to_owned()));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_int() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41, m::INT_16, 0x7D, 0x00,
0x81, 0x42, m::INT_16, 0x7D, 0x00,
0x81, 0x43, m::INT_16, 0x7D, 0x00]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::Integer(32_000));
expected.insert("B".to_owned(), Value::Integer(32_000));
expected.insert("C".to_owned(), Value::Integer(32_000));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_float() {
let size = 3;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41, m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
0x81, 0x42, m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
0x81, 0x43, m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::Float(1.1));
expected.insert("B".to_owned(), Value::Float(1.1));
expected.insert("C".to_owned(), Value::Float(1.1));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_map_of_bool() {
let size = 4;
let mut input = Cursor::new(
vec![m::TINY_MAP_NIBBLE + size,
0x81, 0x41, m::TRUE,
0x81, 0x42, m::FALSE,
0x81, 0x43, m::TRUE,
0x81, 0x44, m::FALSE]
);
let expected = {
let mut expected: BTreeMap<String, Value> = BTreeMap::new();
expected.insert("A".to_owned(), Value::Boolean(true));
expected.insert("B".to_owned(), Value::Boolean(false));
expected.insert("C".to_owned(), Value::Boolean(true));
expected.insert("D".to_owned(), Value::Boolean(false));
Value::Map(expected)
};
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_tiny_structure() {
let mut input = Cursor::new(vec![m::TINY_STRUCT_NIBBLE + 0x03, 0x22,
0x01,
m::FLOAT, 0x3F, 0xF1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9A,
0x81, 0x41
]);
let expected = Value::Structure(0x22, vec![Value::Integer(1),
Value::Float(1.1),
Value::String("A".to_owned())]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_structure8() {
let size = 16;
let mut input = Cursor::new((0..size).fold(
vec![m::STRUCT_8, 0x10, 0x22],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::Structure(0x22, vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
#[test]
fn decode_structure16() {
let size = 256;
let mut input = Cursor::new((0..size).fold(
vec![m::STRUCT_16, 0x01, 0x00, 0x22],
|mut acc, _| { acc.push(0x01); acc }
));
let expected = Value::Structure(0x22, vec![Value::Integer(1); size]);
let result = from_reader(&mut input).unwrap();
assert_eq!(expected, result);
}
}
|
use crate::text::{Attributes, Text};
use crate::widgets::{Widget, WidgetStream};
use alsa::mixer::{SelemChannelId, SelemId};
use alsa::{self, Mixer, PollDescriptors};
use anyhow::{anyhow, Context, Result};
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::pin::Pin;
use std::task::Poll;
use tokio::io::unix::AsyncFd;
use tokio_stream::{Stream, StreamExt};
/// Shows the current volume of the default ALSA output.
///
/// This widget shows the current volume of the default ALSA output, or '`M`' if
/// the output is muted.
///
/// The widget uses `alsa-lib` to receive events when the volume changes,
/// avoiding expensive polling. If you do not have `alsa-lib` installed, you
/// can disable the `volume-widget` feature on the `cnx` crate to avoid
/// compiling this widget.
pub struct Volume {
attr: Attributes,
}
impl Volume {
/// Creates a new Volume widget.
///
/// Creates a new `Volume` widget, whose text will be displayed
/// with the given [`Attributes`].
///
/// The [`Cnx`] instance is borrowed during construction in order to get
/// access to handles of its event loop. However, it is not borrowed for the
/// lifetime of the widget. See the [`cnx_add_widget!()`] for more discussion
/// about the lifetime of the borrow.
///
/// [`Attributes`]: ../text/struct.Attributes.html
/// [`Cnx`]: ../struct.Cnx.html
/// [`cnx_add_widget!()`]: ../macro.cnx_add_widget.html
///
/// # Examples
///
/// ```
/// # #[macro_use]
/// # extern crate cnx;
/// #
/// # use cnx::*;
/// # use cnx::text::*;
/// # use cnx::widgets::*;
/// # use anyhow::Result;
/// #
/// # fn run() -> Result<()> {
/// let attr = Attributes {
/// font: Font::new("SourceCodePro 21"),
/// fg_color: Color::white(),
/// bg_color: None,
/// padding: Padding::new(8.0, 8.0, 0.0, 0.0),
/// };
///
/// let mut cnx = Cnx::new(Position::Top);
/// cnx.add_widget(volume::Volume::new(attr.clone()));
/// # Ok(())
/// # }
/// # fn main() { run().unwrap(); }
/// ```
pub fn new(attr: Attributes) -> Volume {
Volume { attr }
}
}
// https://github.com/mjkillough/cnx/blob/92c24238be541c75d88181208862505739be33fd/src/widgets/volume.rs
impl Widget for Volume {
fn into_stream(self: Box<Self>) -> Result<WidgetStream> {
let mixer_name = "default";
// We don't attempt to use the same mixer to listen for events and to
// recompute the mixer state (in the callback below) as the Mixer seems
// to cache the state from when it was created. It's relatively cheap
// create a new mixer each time we get an event though.
let mixer = Mixer::new(mixer_name, true)
.with_context(|| format!("Failed to open ALSA mixer: {}", mixer_name))?;
let stream = AlsaEventStream::new(mixer)?.map(move |()| {
// FrontLeft has special meaning in ALSA and is the channel
// that's used when the mixer is mono.
let channel = SelemChannelId::FrontLeft;
let mixer = Mixer::new(mixer_name, true)?;
let master = mixer.find_selem(&SelemId::new("Master", 0))
.ok_or_else(|| anyhow!("Couldn't open Master channel"))?;
let mute = master.get_playback_switch(channel)? == 0;
let text = if !mute {
let volume = master.get_playback_volume(channel)?;
let (min, max) = master.get_playback_volume_range();
let percentage = (volume as f64 / (max as f64 - min as f64)) * 100.0;
format!("<span foreground=\"#808080\">[</span>🔈 {:.0}%<span foreground=\"#808080\">]</span>", percentage)
} else {
"🔇".to_owned()
};
Ok(vec![Text {
attr: self.attr.clone(),
text,
stretch: false,
markup: true,
}])
});
Ok(Box::pin(stream))
}
}
struct AlsaEvented(Mixer);
impl AlsaEvented {
fn mixer(&self) -> &Mixer {
&self.0
}
fn fds(&self) -> Vec<RawFd> {
self.0.get().map_or(vec![], |vec_poll| {
vec_poll.iter().map(|pollfd| pollfd.fd).collect()
})
}
}
struct AlsaEventStream {
poll: AsyncFd<AlsaEvented>,
initial: bool,
}
impl AsRawFd for AlsaEvented {
fn as_raw_fd(&self) -> RawFd {
self.fds()
.into_iter()
.next()
.expect("volume: as_raw_fd empty")
}
}
impl AlsaEventStream {
fn new(mixer: Mixer) -> Result<AlsaEventStream> {
Ok(AlsaEventStream {
poll: AsyncFd::new(AlsaEvented(mixer))?,
// The first few calls to poll() need to process any existing events.
// We don't know what state the fds are in when we give them to tokio
// and it's edge-triggered.
initial: true,
})
}
}
impl Stream for AlsaEventStream {
// We don't bother yielding the events and just yield unit each time we get
// an event. This stream is used only to get woken up when the ALSA state
// changes - the caller is expected to requery all necessary state when
// it receives a new item from the stream.
type Item = ();
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context,
) -> Poll<Option<Self::Item>> {
// Always assume we're ready initially, so that we can clear the
// state of the fds.
// Do a poll with a timeout of 0 to figure out exactly which fds were
// woken up, followed by a call to revents() which clears the pending
// events. We don't actually care what the events are - we're just
// using it as a wake-up so we can check the volume again.
if self.initial {
let mixer = self.poll.get_ref().mixer();
let _poll_result = alsa::poll::poll_all(&[mixer], 0);
self.initial = false;
return Poll::Ready(Some(()));
}
// All events have been consumed - tell Tokio we're interested in waiting
// for more again.
match self.poll.poll_read_ready(cx) {
Poll::Ready(Ok(mut r)) => {
let mixer = self.poll.get_ref().mixer();
let _poll_result = alsa::poll::poll_all(&[mixer], 0);
let _result = mixer.handle_events();
r.clear_ready();
Poll::Ready(Some(()))
}
Poll::Ready(Err(_)) => Poll::Ready(Some(())),
Poll::Pending => Poll::Pending,
}
}
}
Send None on error
use crate::text::{Attributes, Text};
use crate::widgets::{Widget, WidgetStream};
use alsa::mixer::{SelemChannelId, SelemId};
use alsa::{self, Mixer, PollDescriptors};
use anyhow::{anyhow, Context, Result};
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::pin::Pin;
use std::task::Poll;
use tokio::io::unix::AsyncFd;
use tokio_stream::{Stream, StreamExt};
/// Shows the current volume of the default ALSA output.
///
/// This widget shows the current volume of the default ALSA output, or '`M`' if
/// the output is muted.
///
/// The widget uses `alsa-lib` to receive events when the volume changes,
/// avoiding expensive polling. If you do not have `alsa-lib` installed, you
/// can disable the `volume-widget` feature on the `cnx` crate to avoid
/// compiling this widget.
pub struct Volume {
attr: Attributes,
}
impl Volume {
/// Creates a new Volume widget.
///
/// Creates a new `Volume` widget, whose text will be displayed
/// with the given [`Attributes`].
///
/// The [`Cnx`] instance is borrowed during construction in order to get
/// access to handles of its event loop. However, it is not borrowed for the
/// lifetime of the widget. See the [`cnx_add_widget!()`] for more discussion
/// about the lifetime of the borrow.
///
/// [`Attributes`]: ../text/struct.Attributes.html
/// [`Cnx`]: ../struct.Cnx.html
/// [`cnx_add_widget!()`]: ../macro.cnx_add_widget.html
///
/// # Examples
///
/// ```
/// # #[macro_use]
/// # extern crate cnx;
/// #
/// # use cnx::*;
/// # use cnx::text::*;
/// # use cnx::widgets::*;
/// # use anyhow::Result;
/// #
/// # fn run() -> Result<()> {
/// let attr = Attributes {
/// font: Font::new("SourceCodePro 21"),
/// fg_color: Color::white(),
/// bg_color: None,
/// padding: Padding::new(8.0, 8.0, 0.0, 0.0),
/// };
///
/// let mut cnx = Cnx::new(Position::Top);
/// cnx.add_widget(volume::Volume::new(attr.clone()));
/// # Ok(())
/// # }
/// # fn main() { run().unwrap(); }
/// ```
pub fn new(attr: Attributes) -> Volume {
Volume { attr }
}
}
// https://github.com/mjkillough/cnx/blob/92c24238be541c75d88181208862505739be33fd/src/widgets/volume.rs
impl Widget for Volume {
fn into_stream(self: Box<Self>) -> Result<WidgetStream> {
let mixer_name = "default";
// We don't attempt to use the same mixer to listen for events and to
// recompute the mixer state (in the callback below) as the Mixer seems
// to cache the state from when it was created. It's relatively cheap
// create a new mixer each time we get an event though.
let mixer = Mixer::new(mixer_name, true)
.with_context(|| format!("Failed to open ALSA mixer: {}", mixer_name))?;
let stream = AlsaEventStream::new(mixer)?.map(move |()| {
// FrontLeft has special meaning in ALSA and is the channel
// that's used when the mixer is mono.
let channel = SelemChannelId::FrontLeft;
let mixer = Mixer::new(mixer_name, true)?;
let master = mixer.find_selem(&SelemId::new("Master", 0))
.ok_or_else(|| anyhow!("Couldn't open Master channel"))?;
let mute = master.get_playback_switch(channel)? == 0;
let text = if !mute {
let volume = master.get_playback_volume(channel)?;
let (min, max) = master.get_playback_volume_range();
let percentage = (volume as f64 / (max as f64 - min as f64)) * 100.0;
format!("<span foreground=\"#808080\">[</span>🔈 {:.0}%<span foreground=\"#808080\">]</span>", percentage)
} else {
"🔇".to_owned()
};
Ok(vec![Text {
attr: self.attr.clone(),
text,
stretch: false,
markup: true,
}])
});
Ok(Box::pin(stream))
}
}
struct AlsaEvented(Mixer);
impl AlsaEvented {
fn mixer(&self) -> &Mixer {
&self.0
}
fn fds(&self) -> Vec<RawFd> {
self.0.get().map_or(vec![], |vec_poll| {
vec_poll.iter().map(|pollfd| pollfd.fd).collect()
})
}
}
struct AlsaEventStream {
poll: AsyncFd<AlsaEvented>,
initial: bool,
}
impl AsRawFd for AlsaEvented {
fn as_raw_fd(&self) -> RawFd {
self.fds()
.into_iter()
.next()
.expect("volume: as_raw_fd empty")
}
}
impl AlsaEventStream {
fn new(mixer: Mixer) -> Result<AlsaEventStream> {
Ok(AlsaEventStream {
poll: AsyncFd::new(AlsaEvented(mixer))?,
// The first few calls to poll() need to process any existing events.
// We don't know what state the fds are in when we give them to tokio
// and it's edge-triggered.
initial: true,
})
}
}
impl Stream for AlsaEventStream {
// We don't bother yielding the events and just yield unit each time we get
// an event. This stream is used only to get woken up when the ALSA state
// changes - the caller is expected to requery all necessary state when
// it receives a new item from the stream.
type Item = ();
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context,
) -> Poll<Option<Self::Item>> {
// Always assume we're ready initially, so that we can clear the
// state of the fds.
// Do a poll with a timeout of 0 to figure out exactly which fds were
// woken up, followed by a call to revents() which clears the pending
// events. We don't actually care what the events are - we're just
// using it as a wake-up so we can check the volume again.
if self.initial {
let mixer = self.poll.get_ref().mixer();
let _poll_result = alsa::poll::poll_all(&[mixer], 0);
self.initial = false;
return Poll::Ready(Some(()));
}
// All events have been consumed - tell Tokio we're interested in waiting
// for more again.
match self.poll.poll_read_ready(cx) {
Poll::Ready(Ok(mut r)) => {
let mixer = self.poll.get_ref().mixer();
let _poll_result = alsa::poll::poll_all(&[mixer], 0);
let _result = mixer.handle_events();
r.clear_ready();
Poll::Ready(Some(()))
}
Poll::Ready(Err(_)) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
|
// Copyright (c) 2016-2019 Fabian Schuiki
mod common;
use common::*;
#[test]
fn empty_module() {
simple_logger::init().unwrap();
let sess = Session::new();
let ast = parse("module foo; endmodule");
let gcx = GlobalContext::new(&sess);
let cx = Context::new(&gcx);
cx.add_root_nodes(&ast).unwrap();
let m = cx.find_global_item("foo".into()).unwrap();
let code = cx.generate_code(m.into()).unwrap();
let asm = module_to_string(&code);
eprintln!("{}", asm.trim());
assert_eq!(asm.trim(), "entity @foo () () {\n}");
}
svlog: refactor codegen test
// Copyright (c) 2016-2019 Fabian Schuiki
mod common;
use common::*;
/// Compile a module in a piece of verilog code and return the LLHD.
fn compile_module(name: &str, code: &str) -> String {
simple_logger::init().is_ok();
let sess = Session::new();
let ast = parse(code);
let gcx = GlobalContext::new(&sess);
let cx = Context::new(&gcx);
cx.add_root_nodes(&ast).unwrap();
let m = cx.find_global_item(name.into()).unwrap();
let code = cx.generate_code(m.into()).unwrap();
module_to_string(&code)
}
#[test]
fn empty_module() {
assert_eq!(
compile_module("foo", "module foo; endmodule").trim(),
"entity @foo () () {\n}"
);
}
// #[test]
// fn module_with_trivial_ports() {
// assert_eq!(
// compile_module("foo", "module foo (input bit a, output bit z); endmodule").trim(),
// "entity @foo ($i1 %a) ($i1 %b) {\n}"
// );
// }
|
import std::map::{hashmap, str_hash};
import libc::{c_uint, c_int};
import lib::llvm::llvm;
import syntax::codemap;
import codemap::span;
import lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
CallConv, TypeKind, AtomicBinOp, AtomicOrdering};
import common::*;
import driver::session::session;
fn B(cx: block) -> BuilderRef {
let b = cx.fcx.ccx.builder.B;
llvm::LLVMPositionBuilderAtEnd(b, cx.llbb);
ret b;
}
fn count_insn(cx: block, category: ~str) {
if cx.ccx().sess.count_llvm_insns() {
let h = cx.ccx().stats.llvm_insns;
let v = cx.ccx().stats.llvm_insn_ctxt;
// Build version of path with cycles removed.
// Pass 1: scan table mapping str -> rightmost pos.
let mm = str_hash();
let len = vec::len(*v);
let mut i = 0u;
while i < len {
mm.insert(copy v[i], i);
i += 1u;
}
// Pass 2: concat strings for each elt, skipping
// forwards over any cycles by advancing to rightmost
// occurrence of each element in path.
let mut s = ~".";
i = 0u;
while i < len {
let e = v[i];
i = mm.get(e);
s += ~"/";
s += e;
i += 1u;
}
s += ~"/";
s += category;
let n = alt h.find(s) { some(n) { n } _ { 0u } };
h.insert(s, n+1u);
}
}
// The difference between a block being unreachable and being terminated is
// somewhat obscure, and has to do with error checking. When a block is
// terminated, we're saying that trying to add any further statements in the
// block is an error. On the other hand, if something is unreachable, that
// means that the block was terminated in some way that we don't want to check
// for (fail/break/ret statements, call to diverging functions, etc), and
// further instructions to the block should simply be ignored.
fn RetVoid(cx: block) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"retvoid");
llvm::LLVMBuildRetVoid(B(cx));
}
fn Ret(cx: block, V: ValueRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"ret");
llvm::LLVMBuildRet(B(cx), V);
}
fn AggregateRet(cx: block, RetVals: ~[ValueRef]) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
unsafe {
llvm::LLVMBuildAggregateRet(B(cx), vec::unsafe::to_ptr(RetVals),
RetVals.len() as c_uint);
}
}
fn Br(cx: block, Dest: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"br");
llvm::LLVMBuildBr(B(cx), Dest);
}
fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef,
Else: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"condbr");
llvm::LLVMBuildCondBr(B(cx), If, Then, Else);
}
fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint)
-> ValueRef {
if cx.unreachable { ret _Undef(V); }
assert !cx.terminated;
cx.terminated = true;
ret llvm::LLVMBuildSwitch(B(cx), V, Else, NumCases as c_uint);
}
fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) {
if llvm::LLVMIsUndef(S) == lib::llvm::True { ret; }
llvm::LLVMAddCase(S, OnVal, Dest);
}
fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"indirectbr");
llvm::LLVMBuildIndirectBr(B(cx), Addr, NumDests as c_uint);
}
// This is a really awful way to get a zero-length c-string, but better (and a
// lot more efficient) than doing str::as_c_str("", ...) every time.
fn noname() -> *libc::c_char unsafe {
const cnull: uint = 0u;
ret unsafe::reinterpret_cast(ptr::addr_of(cnull));
}
fn Invoke(cx: block, Fn: ValueRef, Args: ~[ValueRef],
Then: BasicBlockRef, Catch: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
#debug["Invoke(%s with arguments (%s))",
val_str(cx.ccx().tn, Fn),
str::connect(vec::map(Args, |a| val_str(cx.ccx().tn, a)),
~", ")];
unsafe {
count_insn(cx, ~"invoke");
llvm::LLVMBuildInvoke(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, Then, Catch,
noname());
}
}
fn FastInvoke(cx: block, Fn: ValueRef, Args: ~[ValueRef],
Then: BasicBlockRef, Catch: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
unsafe {
count_insn(cx, ~"fastinvoke");
let v = llvm::LLVMBuildInvoke(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint,
Then, Catch, noname());
lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv);
}
}
fn Unreachable(cx: block) {
if cx.unreachable { ret; }
cx.unreachable = true;
if !cx.terminated {
count_insn(cx, ~"unreachable");
llvm::LLVMBuildUnreachable(B(cx));
}
}
fn _Undef(val: ValueRef) -> ValueRef {
ret llvm::LLVMGetUndef(val_ty(val));
}
/* Arithmetic */
fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"add");
ret llvm::LLVMBuildAdd(B(cx), LHS, RHS, noname());
}
fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nswadd");
ret llvm::LLVMBuildNSWAdd(B(cx), LHS, RHS, noname());
}
fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nuwadd");
ret llvm::LLVMBuildNUWAdd(B(cx), LHS, RHS, noname());
}
fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"fadd");
ret llvm::LLVMBuildFAdd(B(cx), LHS, RHS, noname());
}
fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"sub");
ret llvm::LLVMBuildSub(B(cx), LHS, RHS, noname());
}
fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nwsub");
ret llvm::LLVMBuildNSWSub(B(cx), LHS, RHS, noname());
}
fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nuwsub");
ret llvm::LLVMBuildNUWSub(B(cx), LHS, RHS, noname());
}
fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"sub");
ret llvm::LLVMBuildFSub(B(cx), LHS, RHS, noname());
}
fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"mul");
ret llvm::LLVMBuildMul(B(cx), LHS, RHS, noname());
}
fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nswmul");
ret llvm::LLVMBuildNSWMul(B(cx), LHS, RHS, noname());
}
fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nuwmul");
ret llvm::LLVMBuildNUWMul(B(cx), LHS, RHS, noname());
}
fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"fmul");
ret llvm::LLVMBuildFMul(B(cx), LHS, RHS, noname());
}
fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"udiv");
ret llvm::LLVMBuildUDiv(B(cx), LHS, RHS, noname());
}
fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"sdiv");
ret llvm::LLVMBuildSDiv(B(cx), LHS, RHS, noname());
}
fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"extractsdiv");
ret llvm::LLVMBuildExactSDiv(B(cx), LHS, RHS, noname());
}
fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"fdiv");
ret llvm::LLVMBuildFDiv(B(cx), LHS, RHS, noname());
}
fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"urem");
ret llvm::LLVMBuildURem(B(cx), LHS, RHS, noname());
}
fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"srem");
ret llvm::LLVMBuildSRem(B(cx), LHS, RHS, noname());
}
fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"frem");
ret llvm::LLVMBuildFRem(B(cx), LHS, RHS, noname());
}
fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"shl");
ret llvm::LLVMBuildShl(B(cx), LHS, RHS, noname());
}
fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"lshr");
ret llvm::LLVMBuildLShr(B(cx), LHS, RHS, noname());
}
fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"ashr");
ret llvm::LLVMBuildAShr(B(cx), LHS, RHS, noname());
}
fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"and");
ret llvm::LLVMBuildAnd(B(cx), LHS, RHS, noname());
}
fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"or");
ret llvm::LLVMBuildOr(B(cx), LHS, RHS, noname());
}
fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"xor");
ret llvm::LLVMBuildXor(B(cx), LHS, RHS, noname());
}
fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) ->
ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"binop");
ret llvm::LLVMBuildBinOp(B(cx), Op, LHS, RHS, noname());
}
fn Neg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"neg");
ret llvm::LLVMBuildNeg(B(cx), V, noname());
}
fn NSWNeg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"nswneg");
ret llvm::LLVMBuildNSWNeg(B(cx), V, noname());
}
fn NUWNeg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"nuwneg");
ret llvm::LLVMBuildNUWNeg(B(cx), V, noname());
}
fn FNeg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"fneg");
ret llvm::LLVMBuildFNeg(B(cx), V, noname());
}
fn Not(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"not");
ret llvm::LLVMBuildNot(B(cx), V, noname());
}
/* Memory */
fn Malloc(cx: block, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"malloc");
ret llvm::LLVMBuildMalloc(B(cx), Ty, noname());
}
fn ArrayMalloc(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"arraymalloc");
ret llvm::LLVMBuildArrayMalloc(B(cx), Ty, Val, noname());
}
fn Alloca(cx: block, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(Ty)); }
count_insn(cx, ~"alloca");
ret llvm::LLVMBuildAlloca(B(cx), Ty, noname());
}
fn ArrayAlloca(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(Ty)); }
count_insn(cx, ~"arrayalloca");
ret llvm::LLVMBuildArrayAlloca(B(cx), Ty, Val, noname());
}
fn Free(cx: block, PointerVal: ValueRef) {
if cx.unreachable { ret; }
count_insn(cx, ~"free");
llvm::LLVMBuildFree(B(cx), PointerVal);
}
fn Load(cx: block, PointerVal: ValueRef) -> ValueRef {
let ccx = cx.fcx.ccx;
if cx.unreachable {
let ty = val_ty(PointerVal);
let eltty = if llvm::LLVMGetTypeKind(ty) == lib::llvm::Array {
llvm::LLVMGetElementType(ty) } else { ccx.int_type };
ret llvm::LLVMGetUndef(eltty);
}
count_insn(cx, ~"load");
ret llvm::LLVMBuildLoad(B(cx), PointerVal, noname());
}
fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) {
if cx.unreachable { ret; }
#debug["Store %s -> %s",
val_str(cx.ccx().tn, Val),
val_str(cx.ccx().tn, Ptr)];
count_insn(cx, ~"store");
llvm::LLVMBuildStore(B(cx), Val, Ptr);
}
fn GEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
unsafe {
count_insn(cx, ~"gep");
ret llvm::LLVMBuildGEP(B(cx), Pointer, vec::unsafe::to_ptr(Indices),
Indices.len() as c_uint, noname());
}
}
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
fn GEPi(cx: block, base: ValueRef, ixs: ~[uint]) -> ValueRef {
let mut v: ~[ValueRef] = ~[];
for vec::each(ixs) |i| { vec::push(v, C_i32(i as i32)); }
count_insn(cx, ~"gepi");
ret InBoundsGEP(cx, base, v);
}
fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
unsafe {
count_insn(cx, ~"inboundsgep");
ret llvm::LLVMBuildInBoundsGEP(B(cx), Pointer,
vec::unsafe::to_ptr(Indices),
Indices.len() as c_uint,
noname());
}
}
fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
count_insn(cx, ~"structgep");
ret llvm::LLVMBuildStructGEP(B(cx), Pointer, Idx as c_uint, noname());
}
fn GlobalString(cx: block, _Str: *libc::c_char) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"globalstring");
ret llvm::LLVMBuildGlobalString(B(cx), _Str, noname());
}
fn GlobalStringPtr(cx: block, _Str: *libc::c_char) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"globalstringptr");
ret llvm::LLVMBuildGlobalStringPtr(B(cx), _Str, noname());
}
/* Casts */
fn Trunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"trunc");
ret llvm::LLVMBuildTrunc(B(cx), Val, DestTy, noname());
}
fn ZExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"zext");
ret llvm::LLVMBuildZExt(B(cx), Val, DestTy, noname());
}
fn SExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"sext");
ret llvm::LLVMBuildSExt(B(cx), Val, DestTy, noname());
}
fn FPToUI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fptoui");
ret llvm::LLVMBuildFPToUI(B(cx), Val, DestTy, noname());
}
fn FPToSI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fptosi");
ret llvm::LLVMBuildFPToSI(B(cx), Val, DestTy, noname());
}
fn UIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"uitofp");
ret llvm::LLVMBuildUIToFP(B(cx), Val, DestTy, noname());
}
fn SIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"sitofp");
ret llvm::LLVMBuildSIToFP(B(cx), Val, DestTy, noname());
}
fn FPTrunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fptrunc");
ret llvm::LLVMBuildFPTrunc(B(cx), Val, DestTy, noname());
}
fn FPExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fpext");
ret llvm::LLVMBuildFPExt(B(cx), Val, DestTy, noname());
}
fn PtrToInt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"ptrtoint");
ret llvm::LLVMBuildPtrToInt(B(cx), Val, DestTy, noname());
}
fn IntToPtr(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"inttoptr");
ret llvm::LLVMBuildIntToPtr(B(cx), Val, DestTy, noname());
}
fn BitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"bitcast");
ret llvm::LLVMBuildBitCast(B(cx), Val, DestTy, noname());
}
fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"zextorbitcast");
ret llvm::LLVMBuildZExtOrBitCast(B(cx), Val, DestTy, noname());
}
fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"sextorbitcast");
ret llvm::LLVMBuildSExtOrBitCast(B(cx), Val, DestTy, noname());
}
fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"truncorbitcast");
ret llvm::LLVMBuildTruncOrBitCast(B(cx), Val, DestTy, noname());
}
fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: TypeRef,
_Name: *u8) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"cast");
ret llvm::LLVMBuildCast(B(cx), Op, Val, DestTy, noname());
}
fn PointerCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"pointercast");
ret llvm::LLVMBuildPointerCast(B(cx), Val, DestTy, noname());
}
fn IntCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"intcast");
ret llvm::LLVMBuildIntCast(B(cx), Val, DestTy, noname());
}
fn FPCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fpcast");
ret llvm::LLVMBuildFPCast(B(cx), Val, DestTy, noname());
}
/* Comparisons */
fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef)
-> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"icmp");
ret llvm::LLVMBuildICmp(B(cx), Op as c_uint, LHS, RHS, noname());
}
fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef)
-> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"fcmp");
ret llvm::LLVMBuildFCmp(B(cx), Op as c_uint, LHS, RHS, noname());
}
/* Miscellaneous instructions */
fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
count_insn(cx, ~"emptyphi");
ret llvm::LLVMBuildPhi(B(cx), Ty, noname());
}
fn Phi(cx: block, Ty: TypeRef, vals: ~[ValueRef], bbs: ~[BasicBlockRef])
-> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
assert vals.len() == bbs.len();
let phi = EmptyPhi(cx, Ty);
unsafe {
count_insn(cx, ~"addincoming");
llvm::LLVMAddIncoming(phi, vec::unsafe::to_ptr(vals),
vec::unsafe::to_ptr(bbs),
vals.len() as c_uint);
ret phi;
}
}
fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
if llvm::LLVMIsUndef(phi) == lib::llvm::True { ret; }
unsafe {
let valptr = unsafe::reinterpret_cast(ptr::addr_of(val));
let bbptr = unsafe::reinterpret_cast(ptr::addr_of(bb));
llvm::LLVMAddIncoming(phi, valptr, bbptr, 1 as c_uint);
}
}
fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef {
let ccx = cx.fcx.ccx;
let ty = val_ty(Fn);
let retty = if llvm::LLVMGetTypeKind(ty) == lib::llvm::Integer {
llvm::LLVMGetReturnType(ty) } else { ccx.int_type };
count_insn(cx, ~"");
ret llvm::LLVMGetUndef(retty);
}
fn add_span_comment(bcx: block, sp: span, text: ~str) {
let ccx = bcx.ccx();
if !ccx.sess.no_asm_comments() {
let s = text + ~" (" + codemap::span_to_str(sp, ccx.sess.codemap)
+ ~")";
log(debug, s);
add_comment(bcx, s);
}
}
fn add_comment(bcx: block, text: ~str) {
let ccx = bcx.ccx();
if !ccx.sess.no_asm_comments() {
let sanitized = str::replace(text, ~"$", ~"");
let comment_text = ~"# " + sanitized;
let asm = str::as_c_str(comment_text, |c| {
str::as_c_str(~"", |e| {
count_insn(bcx, ~"inlineasm");
llvm::LLVMConstInlineAsm(T_fn(~[], T_void()), c, e,
False, False)
})
});
Call(bcx, asm, ~[]);
}
}
fn Call(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef {
if cx.unreachable { ret _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, ~"call");
#debug["Call(Fn=%s, Args=%?)",
val_str(cx.ccx().tn, Fn),
Args.map(|arg| val_str(cx.ccx().tn, arg))];
ret llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
}
}
fn FastCall(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef {
if cx.unreachable { ret _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, ~"fastcall");
let v = llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv);
ret v;
}
}
fn CallWithConv(cx: block, Fn: ValueRef, Args: ~[ValueRef],
Conv: CallConv) -> ValueRef {
if cx.unreachable { ret _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, ~"callwithconv");
let v = llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
lib::llvm::SetInstructionCallConv(v, Conv);
ret v;
}
}
fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) ->
ValueRef {
if cx.unreachable { ret _Undef(Then); }
count_insn(cx, ~"select");
ret llvm::LLVMBuildSelect(B(cx), If, Then, Else, noname());
}
fn VAArg(cx: block, list: ValueRef, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
count_insn(cx, ~"vaarg");
ret llvm::LLVMBuildVAArg(B(cx), list, Ty, noname());
}
fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_nil()); }
count_insn(cx, ~"extractelement");
ret llvm::LLVMBuildExtractElement(B(cx), VecVal, Index, noname());
}
fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef,
Index: ValueRef) {
if cx.unreachable { ret; }
count_insn(cx, ~"insertelement");
llvm::LLVMBuildInsertElement(B(cx), VecVal, EltVal, Index, noname());
}
fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef,
Mask: ValueRef) {
if cx.unreachable { ret; }
count_insn(cx, ~"shufflevector");
llvm::LLVMBuildShuffleVector(B(cx), V1, V2, Mask, noname());
}
fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_nil()); }
count_insn(cx, ~"extractvalue");
ret llvm::LLVMBuildExtractValue(B(cx), AggVal, Index as c_uint, noname());
}
fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef,
Index: uint) {
if cx.unreachable { ret; }
count_insn(cx, ~"insertvalue");
llvm::LLVMBuildInsertValue(B(cx), AggVal, EltVal, Index as c_uint,
noname());
}
fn IsNull(cx: block, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"isnull");
ret llvm::LLVMBuildIsNull(B(cx), Val, noname());
}
fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"isnotnull");
ret llvm::LLVMBuildIsNotNull(B(cx), Val, noname());
}
fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
let ccx = cx.fcx.ccx;
if cx.unreachable { ret llvm::LLVMGetUndef(ccx.int_type); }
count_insn(cx, ~"ptrdiff");
ret llvm::LLVMBuildPtrDiff(B(cx), LHS, RHS, noname());
}
fn Trap(cx: block) {
if cx.unreachable { ret; }
let b = B(cx);
let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(b);
let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB);
let M: ModuleRef = llvm::LLVMGetGlobalParent(FN);
let T: ValueRef = str::as_c_str(~"llvm.trap", |buf| {
llvm::LLVMGetNamedFunction(M, buf)
});
assert (T as int != 0);
let Args: ~[ValueRef] = ~[];
unsafe {
count_insn(cx, ~"trap");
llvm::LLVMBuildCall(b, T, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
}
}
fn LandingPad(cx: block, Ty: TypeRef, PersFn: ValueRef,
NumClauses: uint) -> ValueRef {
assert !cx.terminated && !cx.unreachable;
count_insn(cx, ~"landingpad");
ret llvm::LLVMBuildLandingPad(B(cx), Ty, PersFn,
NumClauses as c_uint, noname());
}
fn SetCleanup(cx: block, LandingPad: ValueRef) {
count_insn(cx, ~"setcleanup");
llvm::LLVMSetCleanup(LandingPad, lib::llvm::True);
}
fn Resume(cx: block, Exn: ValueRef) -> ValueRef {
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"resume");
ret llvm::LLVMBuildResume(B(cx), Exn);
}
// Atomic Operations
fn AtomicRMW(cx: block, op: AtomicBinOp,
dst: ValueRef, src: ValueRef,
order: AtomicOrdering) -> ValueRef {
llvm::LLVMBuildAtomicRMW(B(cx), op, dst, src, order)
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
Fix formatting of multi-line blocks in asm-comments
import std::map::{hashmap, str_hash};
import libc::{c_uint, c_int};
import lib::llvm::llvm;
import syntax::codemap;
import codemap::span;
import lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
CallConv, TypeKind, AtomicBinOp, AtomicOrdering};
import common::*;
import driver::session::session;
fn B(cx: block) -> BuilderRef {
let b = cx.fcx.ccx.builder.B;
llvm::LLVMPositionBuilderAtEnd(b, cx.llbb);
ret b;
}
fn count_insn(cx: block, category: ~str) {
if cx.ccx().sess.count_llvm_insns() {
let h = cx.ccx().stats.llvm_insns;
let v = cx.ccx().stats.llvm_insn_ctxt;
// Build version of path with cycles removed.
// Pass 1: scan table mapping str -> rightmost pos.
let mm = str_hash();
let len = vec::len(*v);
let mut i = 0u;
while i < len {
mm.insert(copy v[i], i);
i += 1u;
}
// Pass 2: concat strings for each elt, skipping
// forwards over any cycles by advancing to rightmost
// occurrence of each element in path.
let mut s = ~".";
i = 0u;
while i < len {
let e = v[i];
i = mm.get(e);
s += ~"/";
s += e;
i += 1u;
}
s += ~"/";
s += category;
let n = alt h.find(s) { some(n) { n } _ { 0u } };
h.insert(s, n+1u);
}
}
// The difference between a block being unreachable and being terminated is
// somewhat obscure, and has to do with error checking. When a block is
// terminated, we're saying that trying to add any further statements in the
// block is an error. On the other hand, if something is unreachable, that
// means that the block was terminated in some way that we don't want to check
// for (fail/break/ret statements, call to diverging functions, etc), and
// further instructions to the block should simply be ignored.
fn RetVoid(cx: block) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"retvoid");
llvm::LLVMBuildRetVoid(B(cx));
}
fn Ret(cx: block, V: ValueRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"ret");
llvm::LLVMBuildRet(B(cx), V);
}
fn AggregateRet(cx: block, RetVals: ~[ValueRef]) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
unsafe {
llvm::LLVMBuildAggregateRet(B(cx), vec::unsafe::to_ptr(RetVals),
RetVals.len() as c_uint);
}
}
fn Br(cx: block, Dest: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"br");
llvm::LLVMBuildBr(B(cx), Dest);
}
fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef,
Else: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"condbr");
llvm::LLVMBuildCondBr(B(cx), If, Then, Else);
}
fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint)
-> ValueRef {
if cx.unreachable { ret _Undef(V); }
assert !cx.terminated;
cx.terminated = true;
ret llvm::LLVMBuildSwitch(B(cx), V, Else, NumCases as c_uint);
}
fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) {
if llvm::LLVMIsUndef(S) == lib::llvm::True { ret; }
llvm::LLVMAddCase(S, OnVal, Dest);
}
fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"indirectbr");
llvm::LLVMBuildIndirectBr(B(cx), Addr, NumDests as c_uint);
}
// This is a really awful way to get a zero-length c-string, but better (and a
// lot more efficient) than doing str::as_c_str("", ...) every time.
fn noname() -> *libc::c_char unsafe {
const cnull: uint = 0u;
ret unsafe::reinterpret_cast(ptr::addr_of(cnull));
}
fn Invoke(cx: block, Fn: ValueRef, Args: ~[ValueRef],
Then: BasicBlockRef, Catch: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
#debug["Invoke(%s with arguments (%s))",
val_str(cx.ccx().tn, Fn),
str::connect(vec::map(Args, |a| val_str(cx.ccx().tn, a)),
~", ")];
unsafe {
count_insn(cx, ~"invoke");
llvm::LLVMBuildInvoke(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, Then, Catch,
noname());
}
}
fn FastInvoke(cx: block, Fn: ValueRef, Args: ~[ValueRef],
Then: BasicBlockRef, Catch: BasicBlockRef) {
if cx.unreachable { ret; }
assert (!cx.terminated);
cx.terminated = true;
unsafe {
count_insn(cx, ~"fastinvoke");
let v = llvm::LLVMBuildInvoke(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint,
Then, Catch, noname());
lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv);
}
}
fn Unreachable(cx: block) {
if cx.unreachable { ret; }
cx.unreachable = true;
if !cx.terminated {
count_insn(cx, ~"unreachable");
llvm::LLVMBuildUnreachable(B(cx));
}
}
fn _Undef(val: ValueRef) -> ValueRef {
ret llvm::LLVMGetUndef(val_ty(val));
}
/* Arithmetic */
fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"add");
ret llvm::LLVMBuildAdd(B(cx), LHS, RHS, noname());
}
fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nswadd");
ret llvm::LLVMBuildNSWAdd(B(cx), LHS, RHS, noname());
}
fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nuwadd");
ret llvm::LLVMBuildNUWAdd(B(cx), LHS, RHS, noname());
}
fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"fadd");
ret llvm::LLVMBuildFAdd(B(cx), LHS, RHS, noname());
}
fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"sub");
ret llvm::LLVMBuildSub(B(cx), LHS, RHS, noname());
}
fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nwsub");
ret llvm::LLVMBuildNSWSub(B(cx), LHS, RHS, noname());
}
fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nuwsub");
ret llvm::LLVMBuildNUWSub(B(cx), LHS, RHS, noname());
}
fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"sub");
ret llvm::LLVMBuildFSub(B(cx), LHS, RHS, noname());
}
fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"mul");
ret llvm::LLVMBuildMul(B(cx), LHS, RHS, noname());
}
fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nswmul");
ret llvm::LLVMBuildNSWMul(B(cx), LHS, RHS, noname());
}
fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"nuwmul");
ret llvm::LLVMBuildNUWMul(B(cx), LHS, RHS, noname());
}
fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"fmul");
ret llvm::LLVMBuildFMul(B(cx), LHS, RHS, noname());
}
fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"udiv");
ret llvm::LLVMBuildUDiv(B(cx), LHS, RHS, noname());
}
fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"sdiv");
ret llvm::LLVMBuildSDiv(B(cx), LHS, RHS, noname());
}
fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"extractsdiv");
ret llvm::LLVMBuildExactSDiv(B(cx), LHS, RHS, noname());
}
fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"fdiv");
ret llvm::LLVMBuildFDiv(B(cx), LHS, RHS, noname());
}
fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"urem");
ret llvm::LLVMBuildURem(B(cx), LHS, RHS, noname());
}
fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"srem");
ret llvm::LLVMBuildSRem(B(cx), LHS, RHS, noname());
}
fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"frem");
ret llvm::LLVMBuildFRem(B(cx), LHS, RHS, noname());
}
fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"shl");
ret llvm::LLVMBuildShl(B(cx), LHS, RHS, noname());
}
fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"lshr");
ret llvm::LLVMBuildLShr(B(cx), LHS, RHS, noname());
}
fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"ashr");
ret llvm::LLVMBuildAShr(B(cx), LHS, RHS, noname());
}
fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"and");
ret llvm::LLVMBuildAnd(B(cx), LHS, RHS, noname());
}
fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"or");
ret llvm::LLVMBuildOr(B(cx), LHS, RHS, noname());
}
fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"xor");
ret llvm::LLVMBuildXor(B(cx), LHS, RHS, noname());
}
fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) ->
ValueRef {
if cx.unreachable { ret _Undef(LHS); }
count_insn(cx, ~"binop");
ret llvm::LLVMBuildBinOp(B(cx), Op, LHS, RHS, noname());
}
fn Neg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"neg");
ret llvm::LLVMBuildNeg(B(cx), V, noname());
}
fn NSWNeg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"nswneg");
ret llvm::LLVMBuildNSWNeg(B(cx), V, noname());
}
fn NUWNeg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"nuwneg");
ret llvm::LLVMBuildNUWNeg(B(cx), V, noname());
}
fn FNeg(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"fneg");
ret llvm::LLVMBuildFNeg(B(cx), V, noname());
}
fn Not(cx: block, V: ValueRef) -> ValueRef {
if cx.unreachable { ret _Undef(V); }
count_insn(cx, ~"not");
ret llvm::LLVMBuildNot(B(cx), V, noname());
}
/* Memory */
fn Malloc(cx: block, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"malloc");
ret llvm::LLVMBuildMalloc(B(cx), Ty, noname());
}
fn ArrayMalloc(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"arraymalloc");
ret llvm::LLVMBuildArrayMalloc(B(cx), Ty, Val, noname());
}
fn Alloca(cx: block, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(Ty)); }
count_insn(cx, ~"alloca");
ret llvm::LLVMBuildAlloca(B(cx), Ty, noname());
}
fn ArrayAlloca(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(Ty)); }
count_insn(cx, ~"arrayalloca");
ret llvm::LLVMBuildArrayAlloca(B(cx), Ty, Val, noname());
}
fn Free(cx: block, PointerVal: ValueRef) {
if cx.unreachable { ret; }
count_insn(cx, ~"free");
llvm::LLVMBuildFree(B(cx), PointerVal);
}
fn Load(cx: block, PointerVal: ValueRef) -> ValueRef {
let ccx = cx.fcx.ccx;
if cx.unreachable {
let ty = val_ty(PointerVal);
let eltty = if llvm::LLVMGetTypeKind(ty) == lib::llvm::Array {
llvm::LLVMGetElementType(ty) } else { ccx.int_type };
ret llvm::LLVMGetUndef(eltty);
}
count_insn(cx, ~"load");
ret llvm::LLVMBuildLoad(B(cx), PointerVal, noname());
}
fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) {
if cx.unreachable { ret; }
#debug["Store %s -> %s",
val_str(cx.ccx().tn, Val),
val_str(cx.ccx().tn, Ptr)];
count_insn(cx, ~"store");
llvm::LLVMBuildStore(B(cx), Val, Ptr);
}
fn GEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
unsafe {
count_insn(cx, ~"gep");
ret llvm::LLVMBuildGEP(B(cx), Pointer, vec::unsafe::to_ptr(Indices),
Indices.len() as c_uint, noname());
}
}
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
fn GEPi(cx: block, base: ValueRef, ixs: ~[uint]) -> ValueRef {
let mut v: ~[ValueRef] = ~[];
for vec::each(ixs) |i| { vec::push(v, C_i32(i as i32)); }
count_insn(cx, ~"gepi");
ret InBoundsGEP(cx, base, v);
}
fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
unsafe {
count_insn(cx, ~"inboundsgep");
ret llvm::LLVMBuildInBoundsGEP(B(cx), Pointer,
vec::unsafe::to_ptr(Indices),
Indices.len() as c_uint,
noname());
}
}
fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
count_insn(cx, ~"structgep");
ret llvm::LLVMBuildStructGEP(B(cx), Pointer, Idx as c_uint, noname());
}
fn GlobalString(cx: block, _Str: *libc::c_char) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"globalstring");
ret llvm::LLVMBuildGlobalString(B(cx), _Str, noname());
}
fn GlobalStringPtr(cx: block, _Str: *libc::c_char) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
count_insn(cx, ~"globalstringptr");
ret llvm::LLVMBuildGlobalStringPtr(B(cx), _Str, noname());
}
/* Casts */
fn Trunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"trunc");
ret llvm::LLVMBuildTrunc(B(cx), Val, DestTy, noname());
}
fn ZExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"zext");
ret llvm::LLVMBuildZExt(B(cx), Val, DestTy, noname());
}
fn SExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"sext");
ret llvm::LLVMBuildSExt(B(cx), Val, DestTy, noname());
}
fn FPToUI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fptoui");
ret llvm::LLVMBuildFPToUI(B(cx), Val, DestTy, noname());
}
fn FPToSI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fptosi");
ret llvm::LLVMBuildFPToSI(B(cx), Val, DestTy, noname());
}
fn UIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"uitofp");
ret llvm::LLVMBuildUIToFP(B(cx), Val, DestTy, noname());
}
fn SIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"sitofp");
ret llvm::LLVMBuildSIToFP(B(cx), Val, DestTy, noname());
}
fn FPTrunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fptrunc");
ret llvm::LLVMBuildFPTrunc(B(cx), Val, DestTy, noname());
}
fn FPExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fpext");
ret llvm::LLVMBuildFPExt(B(cx), Val, DestTy, noname());
}
fn PtrToInt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"ptrtoint");
ret llvm::LLVMBuildPtrToInt(B(cx), Val, DestTy, noname());
}
fn IntToPtr(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"inttoptr");
ret llvm::LLVMBuildIntToPtr(B(cx), Val, DestTy, noname());
}
fn BitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"bitcast");
ret llvm::LLVMBuildBitCast(B(cx), Val, DestTy, noname());
}
fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"zextorbitcast");
ret llvm::LLVMBuildZExtOrBitCast(B(cx), Val, DestTy, noname());
}
fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"sextorbitcast");
ret llvm::LLVMBuildSExtOrBitCast(B(cx), Val, DestTy, noname());
}
fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"truncorbitcast");
ret llvm::LLVMBuildTruncOrBitCast(B(cx), Val, DestTy, noname());
}
fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: TypeRef,
_Name: *u8) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"cast");
ret llvm::LLVMBuildCast(B(cx), Op, Val, DestTy, noname());
}
fn PointerCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"pointercast");
ret llvm::LLVMBuildPointerCast(B(cx), Val, DestTy, noname());
}
fn IntCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"intcast");
ret llvm::LLVMBuildIntCast(B(cx), Val, DestTy, noname());
}
fn FPCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
count_insn(cx, ~"fpcast");
ret llvm::LLVMBuildFPCast(B(cx), Val, DestTy, noname());
}
/* Comparisons */
fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef)
-> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"icmp");
ret llvm::LLVMBuildICmp(B(cx), Op as c_uint, LHS, RHS, noname());
}
fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef)
-> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"fcmp");
ret llvm::LLVMBuildFCmp(B(cx), Op as c_uint, LHS, RHS, noname());
}
/* Miscellaneous instructions */
fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
count_insn(cx, ~"emptyphi");
ret llvm::LLVMBuildPhi(B(cx), Ty, noname());
}
fn Phi(cx: block, Ty: TypeRef, vals: ~[ValueRef], bbs: ~[BasicBlockRef])
-> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
assert vals.len() == bbs.len();
let phi = EmptyPhi(cx, Ty);
unsafe {
count_insn(cx, ~"addincoming");
llvm::LLVMAddIncoming(phi, vec::unsafe::to_ptr(vals),
vec::unsafe::to_ptr(bbs),
vals.len() as c_uint);
ret phi;
}
}
fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
if llvm::LLVMIsUndef(phi) == lib::llvm::True { ret; }
unsafe {
let valptr = unsafe::reinterpret_cast(ptr::addr_of(val));
let bbptr = unsafe::reinterpret_cast(ptr::addr_of(bb));
llvm::LLVMAddIncoming(phi, valptr, bbptr, 1 as c_uint);
}
}
fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef {
let ccx = cx.fcx.ccx;
let ty = val_ty(Fn);
let retty = if llvm::LLVMGetTypeKind(ty) == lib::llvm::Integer {
llvm::LLVMGetReturnType(ty) } else { ccx.int_type };
count_insn(cx, ~"");
ret llvm::LLVMGetUndef(retty);
}
fn add_span_comment(bcx: block, sp: span, text: ~str) {
let ccx = bcx.ccx();
if !ccx.sess.no_asm_comments() {
let s = text + ~" (" + codemap::span_to_str(sp, ccx.sess.codemap)
+ ~")";
log(debug, s);
add_comment(bcx, s);
}
}
fn add_comment(bcx: block, text: ~str) {
let ccx = bcx.ccx();
if !ccx.sess.no_asm_comments() {
let sanitized = str::replace(text, ~"$", ~"");
let comment_text = ~"# " + str::replace(sanitized, ~"\n", ~"\n\t# ");
let asm = str::as_c_str(comment_text, |c| {
str::as_c_str(~"", |e| {
count_insn(bcx, ~"inlineasm");
llvm::LLVMConstInlineAsm(T_fn(~[], T_void()), c, e,
False, False)
})
});
Call(bcx, asm, ~[]);
}
}
fn Call(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef {
if cx.unreachable { ret _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, ~"call");
#debug["Call(Fn=%s, Args=%?)",
val_str(cx.ccx().tn, Fn),
Args.map(|arg| val_str(cx.ccx().tn, arg))];
ret llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
}
}
fn FastCall(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef {
if cx.unreachable { ret _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, ~"fastcall");
let v = llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv);
ret v;
}
}
fn CallWithConv(cx: block, Fn: ValueRef, Args: ~[ValueRef],
Conv: CallConv) -> ValueRef {
if cx.unreachable { ret _UndefReturn(cx, Fn); }
unsafe {
count_insn(cx, ~"callwithconv");
let v = llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
lib::llvm::SetInstructionCallConv(v, Conv);
ret v;
}
}
fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) ->
ValueRef {
if cx.unreachable { ret _Undef(Then); }
count_insn(cx, ~"select");
ret llvm::LLVMBuildSelect(B(cx), If, Then, Else, noname());
}
fn VAArg(cx: block, list: ValueRef, Ty: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
count_insn(cx, ~"vaarg");
ret llvm::LLVMBuildVAArg(B(cx), list, Ty, noname());
}
fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_nil()); }
count_insn(cx, ~"extractelement");
ret llvm::LLVMBuildExtractElement(B(cx), VecVal, Index, noname());
}
fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef,
Index: ValueRef) {
if cx.unreachable { ret; }
count_insn(cx, ~"insertelement");
llvm::LLVMBuildInsertElement(B(cx), VecVal, EltVal, Index, noname());
}
fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef,
Mask: ValueRef) {
if cx.unreachable { ret; }
count_insn(cx, ~"shufflevector");
llvm::LLVMBuildShuffleVector(B(cx), V1, V2, Mask, noname());
}
fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_nil()); }
count_insn(cx, ~"extractvalue");
ret llvm::LLVMBuildExtractValue(B(cx), AggVal, Index as c_uint, noname());
}
fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef,
Index: uint) {
if cx.unreachable { ret; }
count_insn(cx, ~"insertvalue");
llvm::LLVMBuildInsertValue(B(cx), AggVal, EltVal, Index as c_uint,
noname());
}
fn IsNull(cx: block, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"isnull");
ret llvm::LLVMBuildIsNull(B(cx), Val, noname());
}
fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
count_insn(cx, ~"isnotnull");
ret llvm::LLVMBuildIsNotNull(B(cx), Val, noname());
}
fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
let ccx = cx.fcx.ccx;
if cx.unreachable { ret llvm::LLVMGetUndef(ccx.int_type); }
count_insn(cx, ~"ptrdiff");
ret llvm::LLVMBuildPtrDiff(B(cx), LHS, RHS, noname());
}
fn Trap(cx: block) {
if cx.unreachable { ret; }
let b = B(cx);
let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(b);
let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB);
let M: ModuleRef = llvm::LLVMGetGlobalParent(FN);
let T: ValueRef = str::as_c_str(~"llvm.trap", |buf| {
llvm::LLVMGetNamedFunction(M, buf)
});
assert (T as int != 0);
let Args: ~[ValueRef] = ~[];
unsafe {
count_insn(cx, ~"trap");
llvm::LLVMBuildCall(b, T, vec::unsafe::to_ptr(Args),
Args.len() as c_uint, noname());
}
}
fn LandingPad(cx: block, Ty: TypeRef, PersFn: ValueRef,
NumClauses: uint) -> ValueRef {
assert !cx.terminated && !cx.unreachable;
count_insn(cx, ~"landingpad");
ret llvm::LLVMBuildLandingPad(B(cx), Ty, PersFn,
NumClauses as c_uint, noname());
}
fn SetCleanup(cx: block, LandingPad: ValueRef) {
count_insn(cx, ~"setcleanup");
llvm::LLVMSetCleanup(LandingPad, lib::llvm::True);
}
fn Resume(cx: block, Exn: ValueRef) -> ValueRef {
assert (!cx.terminated);
cx.terminated = true;
count_insn(cx, ~"resume");
ret llvm::LLVMBuildResume(B(cx), Exn);
}
// Atomic Operations
fn AtomicRMW(cx: block, op: AtomicBinOp,
dst: ValueRef, src: ValueRef,
order: AtomicOrdering) -> ValueRef {
llvm::LLVMBuildAtomicRMW(B(cx), op, dst, src, order)
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Module which understands C++ constructor synthesis rules.
#[cfg_attr(test, derive(Eq, PartialEq))]
pub(super) struct ImplicitConstructorsNeeded {
pub(super) default_constructor: bool,
pub(super) copy_constructor_taking_t: bool,
pub(super) copy_constructor_taking_const_t: bool,
pub(super) move_constructor: bool,
}
#[derive(Default)]
pub(super) struct ExplicitItemsFound {
pub(super) move_constructor: bool,
pub(super) copy_constructor: bool,
pub(super) any_other_constructor: bool,
pub(super) any_bases_or_fields_lack_const_copy_constructors: bool,
pub(super) any_bases_or_fields_have_deleted_or_inaccessible_copy_constructors: bool,
pub(super) destructor: bool,
pub(super) any_bases_have_deleted_or_inaccessible_destructors: bool,
pub(super) copy_assignment_operator: bool,
pub(super) move_assignment_operator: bool,
pub(super) has_rvalue_reference_fields: bool,
}
pub(super) fn determine_implicit_constructors(
explicits: ExplicitItemsFound,
) -> ImplicitConstructorsNeeded {
let any_constructor =
explicits.copy_constructor || explicits.move_constructor || explicits.any_other_constructor;
// If no user-declared constructors of any kind are provided for a class type (struct, class, or union),
// the compiler will always declare a default constructor as an inline public member of its class.
let default_constructor = !any_constructor;
// If no user-defined copy constructors are provided for a class type (struct, class, or union),
// the compiler will always declare a copy constructor as a non-explicit inline public member of its class.
// This implicitly-declared copy constructor has the form T::T(const T&) if all of the following are true:
// each direct and virtual base B of T has a copy constructor whose parameters are const B& or const volatile B&;
// each non-static data member M of T of class type or array of class type has a copy constructor whose parameters are const M& or const volatile M&.
// The implicitly-declared or defaulted copy constructor for class T is defined as deleted if any of the following conditions are true:
// T is a union-like class and has a variant member with non-trivial copy constructor; // we don't support unions anyway
// T has a user-defined move constructor or move assignment operator (this condition only causes the implicitly-declared, not the defaulted, copy constructor to be deleted).
// T has non-static data members that cannot be copied (have deleted, inaccessible, or ambiguous copy constructors);
// T has direct or virtual base class that cannot be copied (has deleted, inaccessible, or ambiguous copy constructors);
// T has direct or virtual base class with a deleted or inaccessible destructor;
// T has a data member of rvalue reference type;
let copy_constructor_is_deleted = explicits.move_constructor
|| explicits.move_assignment_operator
|| explicits.any_bases_or_fields_have_deleted_or_inaccessible_copy_constructors
|| explicits.any_bases_have_deleted_or_inaccessible_destructors
|| explicits.has_rvalue_reference_fields;
let (copy_constructor_taking_const_t, copy_constructor_taking_t) =
if explicits.copy_constructor || copy_constructor_is_deleted {
(false, false)
} else if explicits.any_bases_or_fields_lack_const_copy_constructors {
(false, true)
} else {
(true, false)
};
// If no user-defined move constructors are provided for a class type (struct, class, or union), and all of the following is true:
// there are no user-declared copy constructors;
// there are no user-declared copy assignment operators;
// there are no user-declared move assignment operators;
// there is no user-declared destructor.
// then the compiler will declare a move constructor
let move_constructor = !(explicits.move_constructor
|| explicits.copy_constructor
|| explicits.destructor
|| explicits.copy_assignment_operator
|| explicits.move_assignment_operator);
ImplicitConstructorsNeeded {
default_constructor,
copy_constructor_taking_t,
copy_constructor_taking_const_t,
move_constructor,
}
}
#[cfg(test)]
mod tests {
use super::determine_implicit_constructors;
use super::ExplicitItemsFound;
#[test]
fn test_simple() {
let inputs = ExplicitItemsFound::default();
let outputs = determine_implicit_constructors(inputs);
assert!(outputs.default_constructor);
assert!(outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(outputs.move_constructor);
}
#[test]
fn test_with_destructor() {
let inputs = ExplicitItemsFound {
destructor: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(outputs.default_constructor);
assert!(outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(!outputs.move_constructor);
}
#[test]
fn test_with_pesky_base() {
let inputs = ExplicitItemsFound {
any_bases_or_fields_lack_const_copy_constructors: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(outputs.default_constructor);
assert!(!outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(!outputs.move_constructor);
}
#[test]
fn test_with_user_defined_move_constructor() {
let inputs = ExplicitItemsFound {
move_constructor: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(!outputs.default_constructor);
assert!(!outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(!outputs.move_constructor);
}
#[test]
fn test_with_user_defined_misc_constructor() {
let inputs = ExplicitItemsFound {
any_other_constructor: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(!outputs.default_constructor);
assert!(outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(outputs.move_constructor);
}
}
correct dumb booboo
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Module which understands C++ constructor synthesis rules.
#[cfg_attr(test, derive(Eq, PartialEq))]
pub(super) struct ImplicitConstructorsNeeded {
pub(super) default_constructor: bool,
pub(super) copy_constructor_taking_t: bool,
pub(super) copy_constructor_taking_const_t: bool,
pub(super) move_constructor: bool,
}
#[derive(Default)]
pub(super) struct ExplicitItemsFound {
pub(super) move_constructor: bool,
pub(super) copy_constructor: bool,
pub(super) any_other_constructor: bool,
pub(super) any_bases_or_fields_lack_const_copy_constructors: bool,
pub(super) any_bases_or_fields_have_deleted_or_inaccessible_copy_constructors: bool,
pub(super) destructor: bool,
pub(super) any_bases_have_deleted_or_inaccessible_destructors: bool,
pub(super) copy_assignment_operator: bool,
pub(super) move_assignment_operator: bool,
pub(super) has_rvalue_reference_fields: bool,
}
pub(super) fn determine_implicit_constructors(
explicits: ExplicitItemsFound,
) -> ImplicitConstructorsNeeded {
let any_constructor =
explicits.copy_constructor || explicits.move_constructor || explicits.any_other_constructor;
// If no user-declared constructors of any kind are provided for a class type (struct, class, or union),
// the compiler will always declare a default constructor as an inline public member of its class.
let default_constructor = !any_constructor;
// If no user-defined copy constructors are provided for a class type (struct, class, or union),
// the compiler will always declare a copy constructor as a non-explicit inline public member of its class.
// This implicitly-declared copy constructor has the form T::T(const T&) if all of the following are true:
// each direct and virtual base B of T has a copy constructor whose parameters are const B& or const volatile B&;
// each non-static data member M of T of class type or array of class type has a copy constructor whose parameters are const M& or const volatile M&.
// The implicitly-declared or defaulted copy constructor for class T is defined as deleted if any of the following conditions are true:
// T is a union-like class and has a variant member with non-trivial copy constructor; // we don't support unions anyway
// T has a user-defined move constructor or move assignment operator (this condition only causes the implicitly-declared, not the defaulted, copy constructor to be deleted).
// T has non-static data members that cannot be copied (have deleted, inaccessible, or ambiguous copy constructors);
// T has direct or virtual base class that cannot be copied (has deleted, inaccessible, or ambiguous copy constructors);
// T has direct or virtual base class with a deleted or inaccessible destructor;
// T has a data member of rvalue reference type;
let copy_constructor_is_deleted = explicits.move_constructor
|| explicits.move_assignment_operator
|| explicits.any_bases_or_fields_have_deleted_or_inaccessible_copy_constructors
|| explicits.any_bases_have_deleted_or_inaccessible_destructors
|| explicits.has_rvalue_reference_fields;
let (copy_constructor_taking_const_t, copy_constructor_taking_t) =
if explicits.copy_constructor || copy_constructor_is_deleted {
(false, false)
} else if explicits.any_bases_or_fields_lack_const_copy_constructors {
(false, true)
} else {
(true, false)
};
// If no user-defined move constructors are provided for a class type (struct, class, or union), and all of the following is true:
// there are no user-declared copy constructors;
// there are no user-declared copy assignment operators;
// there are no user-declared move assignment operators;
// there is no user-declared destructor.
// then the compiler will declare a move constructor
let move_constructor = !(explicits.move_constructor
|| explicits.copy_constructor
|| explicits.destructor
|| explicits.copy_assignment_operator
|| explicits.move_assignment_operator);
ImplicitConstructorsNeeded {
default_constructor,
copy_constructor_taking_t,
copy_constructor_taking_const_t,
move_constructor,
}
}
#[cfg(test)]
mod tests {
use super::determine_implicit_constructors;
use super::ExplicitItemsFound;
#[test]
fn test_simple() {
let inputs = ExplicitItemsFound::default();
let outputs = determine_implicit_constructors(inputs);
assert!(outputs.default_constructor);
assert!(outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(outputs.move_constructor);
}
#[test]
fn test_with_destructor() {
let inputs = ExplicitItemsFound {
destructor: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(outputs.default_constructor);
assert!(outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(!outputs.move_constructor);
}
#[test]
fn test_with_pesky_base() {
let inputs = ExplicitItemsFound {
any_bases_or_fields_lack_const_copy_constructors: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(outputs.default_constructor);
assert!(!outputs.copy_constructor_taking_const_t);
assert!(outputs.copy_constructor_taking_t);
assert!(!outputs.move_constructor);
}
#[test]
fn test_with_user_defined_move_constructor() {
let inputs = ExplicitItemsFound {
move_constructor: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(!outputs.default_constructor);
assert!(!outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(!outputs.move_constructor);
}
#[test]
fn test_with_user_defined_misc_constructor() {
let inputs = ExplicitItemsFound {
any_other_constructor: true,
..Default::default()
};
let outputs = determine_implicit_constructors(inputs);
assert!(!outputs.default_constructor);
assert!(outputs.copy_constructor_taking_const_t);
assert!(!outputs.copy_constructor_taking_t);
assert!(outputs.move_constructor);
}
}
|
//! The `ast` module contains a lot of useful functionality
//! to create and walk through the ast (abstract syntaxtree)
use frontend::lexer::Token;
use frontend::lexer::Token::*;
use backend::zcode::zfile;
use backend::zcode::zfile::{FormattingState, ZOP};
use std::collections::HashMap;
//==============================
// ast
#[derive(Clone)]
enum Type{
Bool,
Integer,
String,
}
pub struct AST {
passages: Vec<ASTNode>,
path: Vec<usize>,
}
/// add zcode based on tokens
fn gen_zcode<'a>(node: &'a ASTNode, mut out: &mut zfile::Zfile, mut manager: &mut CodeGenManager<'a>) -> Vec<ZOP> {
let mut state_copy = manager.format_state.clone();
let mut set_formatting = false;
match node {
&ASTNode::Passage(ref node) => {
let mut code: Vec<ZOP> = vec![];
match &node.category {
&TokPassage {ref name, .. } => {
code.push(ZOP::Routine{name: name.to_string(), count_variables: 0});
},
_ => {
debug!("no match 1");
}
};
for child in &node.childs {
for instr in gen_zcode(child, out, manager) {
code.push(instr);
}
}
code.push(ZOP::Newline);
code.push(ZOP::Call1N{jump_to_label: "system_check_links".to_string()});
code
},
&ASTNode::Default(ref t) => {
let mut code: Vec<ZOP> = match &t.category {
&TokText {ref text, .. } => {
vec![ZOP::PrintOps{text: text.to_string()}]
},
&TokNewLine { .. } => {
vec![ZOP::Newline]
},
&TokFormatBoldStart { .. } => {
state_copy.bold = true;
set_formatting = true;
vec![ZOP::SetTextStyle{bold: state_copy.bold, reverse: state_copy.inverted, monospace: state_copy.mono, italic: state_copy.italic}]
},
&TokFormatMonoStart { .. } => {
state_copy.mono = true;
set_formatting = true;
vec![ZOP::SetTextStyle{bold: state_copy.bold, reverse: state_copy.inverted, monospace: state_copy.mono, italic: state_copy.italic}]
},
&TokFormatItalicStart { .. } => {
state_copy.italic = true;
set_formatting = true;
vec![ZOP::SetTextStyle{bold: state_copy.bold, reverse: state_copy.inverted, monospace: state_copy.mono, italic: state_copy.italic}]
},
&TokPassageLink {ref display_name, ref passage_name, .. } => {
set_formatting = true;
vec![
ZOP::Call2NWithAddress{jump_to_label: "system_add_link".to_string(), address: passage_name.to_string()},
ZOP::SetColor{foreground: 8, background: 2},
ZOP::Print{text: format!("{}[", display_name)},
ZOP::PrintNumVar{variable: 16},
ZOP::Print{text: "]".to_string()},
ZOP::SetColor{foreground: 9, background: 2},
]
},
&TokAssign {ref var_name, ref op_name, .. } => {
if op_name == "=" || op_name == "to" {
if t.childs.len() == 1 {
match t.childs[0].as_default().category {
TokInt {value, .. } => {
if !manager.symbol_table.is_known_symbol(var_name) {
manager.symbol_table.insert_new_symbol(&var_name, Type::Integer);
}
let symbol_id = manager.symbol_table.get_symbol_id(var_name);
vec![ZOP::StoreU16{variable: symbol_id, value: value as u16}]
},
TokBoolean {ref value, .. } => {
if !manager.symbol_table.is_known_symbol(var_name) {
manager.symbol_table.insert_new_symbol(&var_name, Type::Bool);
}
let symbol_id = manager.symbol_table.get_symbol_id(var_name);
vec![ZOP::StoreU8{variable: symbol_id, value: boolstr_to_u8(&*value)}]
},
_ => { vec![] }
}
} else {
debug!("Assign Expression currently not supported.");
vec![]
}
} else { vec![] }
},
&TokMacroIf { .. } => {
if t.childs.len() < 2 {
panic!("Unsupported if-expression!");
}
let mut compare: u8 = 1;
// check if the first node is a pseudonode
let pseudo_node = match t.childs[0].as_default().category {
TokPseudo => t.childs[0].as_default(),
_ => panic!("Unsupported if-expression!")
};
// Check if first token is variable
let var_name = match pseudo_node.childs[0].as_default().category {
TokVariable {ref name, .. } => name,
_ => panic!("Unsupported if-expression!")
};
if pseudo_node.childs.len() > 1 {
// Check if second token is compare operator
match pseudo_node.childs[1].as_default().category {
TokCompOp {ref op_name, .. } => {
match &*(*op_name) {
"==" | "is" => {} ,
_ => panic!("Unsupported Compare Operator!")
}
}, _ => panic!("Unsupported if-expression!")
}
// Check if third token is number
compare = match pseudo_node.childs[2].as_default().category {
TokInt {ref value, .. } => {
*value as u8
},
TokBoolean {ref value, .. } => {
boolstr_to_u8(&*value)
}, _ => panic!("Unsupported assign value!")
};
}
let symbol_id = manager.symbol_table.get_symbol_id(&*var_name);
let if_id = manager.ids_if.start_next();
let if_label = format!("if_{}", if_id);
let after_if_label = format!("after_if_{}", if_id);
let after_else_label = format!("after_else_{}", if_id);
let mut code: Vec<ZOP> = vec![
ZOP::JE{local_var_id: symbol_id, equal_to_const: compare, jump_to_label: if_label.to_string()},
ZOP::Jump{jump_to_label: after_if_label.to_string()},
ZOP::Label{name: if_label.to_string()}
];
for i in 1..t.childs.len() {
for instr in gen_zcode(&t.childs[i], out, manager) {
code.push(instr);
}
}
code.push(ZOP::Jump{jump_to_label: after_else_label});
code.push(ZOP::Label{name: after_if_label});
code
},
&TokMacroElse { .. } => {
let mut code: Vec<ZOP> = vec![];
for child in &t.childs {
for instr in gen_zcode(child, out, manager) {
code.push(instr);
}
}
code
},
&TokMacroEndIf { .. } => {
let after_else_label = format!("after_else_{}", manager.ids_if.pop_id());
vec![ZOP::Label{name: after_else_label}]
},
&TokMacroContentVar {ref var_name, .. } => {
let var_id = manager.symbol_table.get_symbol_id(&*var_name);
match manager.symbol_table.get_symbol_type(&*var_name) {
Type::Integer => {
vec![ZOP::PrintNumVar{variable: var_id}]
},
Type::String => {
vec![]
},
Type::Bool => {
vec![ZOP::PrintNumVar{variable: var_id}]
}
}
},
_ => {
debug!("no match if");
vec![]
}
};
if set_formatting {
for child in &t.childs {
for instr in gen_zcode(child, out, manager) {
code.push(instr);
}
}
code.push(ZOP::SetTextStyle{bold: false, reverse: false, monospace: false, italic: false});
let state = manager.format_state;
code.push(ZOP::SetTextStyle{bold: state.bold, reverse: state.inverted, monospace: state.mono, italic: state.italic});
}
code
}
}
}
fn boolstr_to_u8(string: &str) -> u8 {
match string {
"true" => 1 as u8,
_ => 0 as u8
}
}
impl AST {
pub fn new() -> AST {
AST {
passages: Vec::new(),
path: Vec::new(),
}
}
/// adds a passage to the path in the ast
pub fn add_passage(&mut self, token: Token) {
self.path.clear();
let ast_count_passages = self.count_childs(self.path.to_vec());
let node = ASTNode::Passage(NodePassage { category: token, childs: Vec::new() });
self.passages.push(node);
self.path.push(ast_count_passages);
}
/// adds a child to the path in the ast
pub fn add_child(&mut self, token: Token) {
if let Some(index) = self.path.first() {
let mut new_path: Vec<usize> = self.path.to_vec();
new_path.remove(0);
self.passages[*index].add_child(new_path, token)
} else {
self.passages.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() }));
}
}
/// adds a child an goees one child down
pub fn child_down(&mut self, token: Token) {
let ast_count_childs = self.count_childs(self.path.to_vec());
self.add_child(token);
self.path.push(ast_count_childs);
}
/// adds one child and goes down. adds snd child and goes down.
pub fn two_childs_down(&mut self, child1: Token, child2: Token) {
self.child_down(child1);
self.child_down(child2);
}
/// goes one lvl up
pub fn up(&mut self) {
self.path.pop();
}
/// goes one lvl up and adds and child
pub fn up_child(&mut self, token: Token) {
self.up();
self.add_child(token);
}
/// goes one lvl up, adds an child and goes one lvl down
pub fn up_child_down(&mut self, token: Token) {
self.up();
self.child_down(token);
}
/// convert ast to zcode
pub fn to_zcode(& self, out: &mut zfile::Zfile) {
let mut manager = CodeGenManager::new();
let mut code: Vec<ZOP> = vec![];
for child in &self.passages {
for instr in gen_zcode(child, out, &mut manager) {
code.push(instr);
}
}
out.emit(code);
}
/// prints the tree
pub fn print(&self) {
debug!("Abstract Syntax Tree: ");
for child in &self.passages {
child.print(0);
}
debug!("");
}
/// counts the childs of the path in the asts
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
self.passages[*index].count_childs(new_path)
} else {
self.passages.len()
}
}
}
// ================================
// node types
enum ASTNode {
Default (NodeDefault),
Passage (NodePassage)
}
struct NodePassage {
category: Token,
pub childs: Vec<ASTNode>,
/*tags: Vec<ASTNode>*/
}
struct NodeDefault {
category: Token,
childs: Vec<ASTNode>
}
struct CodeGenManager<'a> {
ids_if: IdentifierProvider,
symbol_table: SymbolTable<'a>,
format_state: FormattingState
}
struct IdentifierProvider {
current_id: u32,
id_stack: Vec<u32>
}
struct SymbolTable<'a> {
current_id: u8,
symbol_map: HashMap<&'a str, (u8, Type)>
}
impl <'a> CodeGenManager<'a> {
pub fn new() -> CodeGenManager<'a> {
CodeGenManager {
ids_if: IdentifierProvider::new(),
symbol_table: SymbolTable::new(),
format_state: FormattingState {bold: false, italic: false, mono: false, inverted: false}
}
}
}
impl IdentifierProvider {
pub fn new() -> IdentifierProvider {
IdentifierProvider {
current_id: 0,
id_stack: Vec::new()
}
}
// Returns a new id and pushes it onto the stack
pub fn start_next(&mut self) -> u32 {
let id = self.current_id;
self.current_id += 1;
self.id_stack.push(id);
id
}
// Pops the last id from the stack
pub fn pop_id(&mut self) -> u32 {
self.id_stack.pop().unwrap()
}
}
impl <'a> SymbolTable<'a> {
pub fn new() -> SymbolTable<'a> {
SymbolTable {
current_id: 25,
symbol_map: HashMap::<&str, (u8,Type)>::new()
}
}
// Inserts a symbol into the table, assigning a new id
pub fn insert_new_symbol(&mut self, symbol: &'a str, t: Type) {
debug!("Assigned id {} to variable {}", self.current_id, symbol);
self.symbol_map.insert(symbol, (self.current_id,t));
self.current_id += 1;
}
// Checks if the symbol is already existent in the table
pub fn is_known_symbol(&self, symbol: &str) -> bool {
self.symbol_map.contains_key(symbol)
}
// Returns the id for a given symbol
// (check if is_known_symbol, otherwise panics)
pub fn get_symbol_id(&self, symbol: &str) -> u8 {
let (b,_) = self.symbol_map.get(symbol).unwrap().clone();
b
}
pub fn get_symbol_type(&self, symbol: &str) -> Type {
let (_,b) = self.symbol_map.get(symbol).unwrap().clone();
b
}
}
impl ASTNode {
/// adds an child to the path in the ast
pub fn add_child(&mut self, path: Vec<usize>, token: Token) {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&mut ASTNode::Default(ref mut node) => node.childs[*index].add_child(new_path, token),
&mut ASTNode::Passage(ref mut node) => node.childs[*index].add_child(new_path, token),
}
} else {
match self {
&mut ASTNode::Default(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
&mut ASTNode::Passage(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
}
}
}
/// counts the childs of the current path in the ast
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&ASTNode::Default(ref node) => node.childs[*index].count_childs(new_path),
&ASTNode::Passage(ref node) => node.childs[*index].count_childs(new_path),
}
} else {
match self {
&ASTNode::Default(ref node) => node.childs.len(),
&ASTNode::Passage(ref node) => node.childs.len(),
}
}
}
/// prints an node of an ast
pub fn print(&self, indent: usize) {
let mut spaces = "".to_string();
for _ in 0..indent {
spaces.push_str(" ");
}
match self {
&ASTNode::Passage(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
},
&ASTNode::Default(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
}
}
}
pub fn as_default(&self) -> &NodeDefault {
match self {
&ASTNode::Default(ref def) => def,
_ => panic!("Node cannot be unwrapped as NodeDefault!")
}
}
}
Add AST support for the print macro & random function
- Generate Z-Code for the random function
- Add a temporary variable "int0" to symbol table
//! The `ast` module contains a lot of useful functionality
//! to create and walk through the ast (abstract syntaxtree)
use frontend::lexer::Token;
use frontend::lexer::Token::*;
use backend::zcode::zfile;
use backend::zcode::zfile::{FormattingState, ZOP};
use std::collections::HashMap;
//==============================
// ast
#[derive(Clone)]
enum Type{
Bool,
Integer,
String,
}
pub struct AST {
passages: Vec<ASTNode>,
path: Vec<usize>,
}
/// add zcode based on tokens
fn gen_zcode<'a>(node: &'a ASTNode, mut out: &mut zfile::Zfile, mut manager: &mut CodeGenManager<'a>) -> Vec<ZOP> {
let mut state_copy = manager.format_state.clone();
let mut set_formatting = false;
match node {
&ASTNode::Passage(ref node) => {
let mut code: Vec<ZOP> = vec![];
match &node.category {
&TokPassage {ref name, .. } => {
code.push(ZOP::Routine{name: name.to_string(), count_variables: 0});
},
_ => {
debug!("no match 1");
}
};
for child in &node.childs {
for instr in gen_zcode(child, out, manager) {
code.push(instr);
}
}
code.push(ZOP::Newline);
code.push(ZOP::Call1N{jump_to_label: "system_check_links".to_string()});
code
},
&ASTNode::Default(ref t) => {
let mut code: Vec<ZOP> = match &t.category {
&TokText {ref text, .. } => {
vec![ZOP::PrintOps{text: text.to_string()}]
},
&TokNewLine { .. } => {
vec![ZOP::Newline]
},
&TokFormatBoldStart { .. } => {
state_copy.bold = true;
set_formatting = true;
vec![ZOP::SetTextStyle{bold: state_copy.bold, reverse: state_copy.inverted, monospace: state_copy.mono, italic: state_copy.italic}]
},
&TokFormatMonoStart { .. } => {
state_copy.mono = true;
set_formatting = true;
vec![ZOP::SetTextStyle{bold: state_copy.bold, reverse: state_copy.inverted, monospace: state_copy.mono, italic: state_copy.italic}]
},
&TokFormatItalicStart { .. } => {
state_copy.italic = true;
set_formatting = true;
vec![ZOP::SetTextStyle{bold: state_copy.bold, reverse: state_copy.inverted, monospace: state_copy.mono, italic: state_copy.italic}]
},
&TokPassageLink {ref display_name, ref passage_name, .. } => {
set_formatting = true;
vec![
ZOP::Call2NWithAddress{jump_to_label: "system_add_link".to_string(), address: passage_name.to_string()},
ZOP::SetColor{foreground: 8, background: 2},
ZOP::Print{text: format!("{}[", display_name)},
ZOP::PrintNumVar{variable: 16},
ZOP::Print{text: "]".to_string()},
ZOP::SetColor{foreground: 9, background: 2},
]
},
&TokAssign {ref var_name, ref op_name, .. } => {
if op_name == "=" || op_name == "to" {
if t.childs.len() == 1 {
match t.childs[0].as_default().category {
TokInt {value, .. } => {
if !manager.symbol_table.is_known_symbol(var_name) {
manager.symbol_table.insert_new_symbol(&var_name, Type::Integer);
}
let symbol_id = manager.symbol_table.get_symbol_id(var_name);
vec![ZOP::StoreU16{variable: symbol_id, value: value as u16}]
},
TokBoolean {ref value, .. } => {
if !manager.symbol_table.is_known_symbol(var_name) {
manager.symbol_table.insert_new_symbol(&var_name, Type::Bool);
}
let symbol_id = manager.symbol_table.get_symbol_id(var_name);
vec![ZOP::StoreU8{variable: symbol_id, value: boolstr_to_u8(&*value)}]
},
_ => { vec![] }
}
} else {
debug!("Assign Expression currently not supported.");
vec![]
}
} else { vec![] }
},
&TokMacroIf { .. } => {
if t.childs.len() < 2 {
panic!("Unsupported if-expression!");
}
let mut compare: u8 = 1;
// check if the first node is a pseudonode
let pseudo_node = match t.childs[0].as_default().category {
TokPseudo => t.childs[0].as_default(),
_ => panic!("Unsupported if-expression!")
};
// Check if first token is variable
let var_name = match pseudo_node.childs[0].as_default().category {
TokVariable {ref name, .. } => name,
_ => panic!("Unsupported if-expression!")
};
if pseudo_node.childs.len() > 1 {
// Check if second token is compare operator
match pseudo_node.childs[1].as_default().category {
TokCompOp {ref op_name, .. } => {
match &*(*op_name) {
"==" | "is" => {} ,
_ => panic!("Unsupported Compare Operator!")
}
}, _ => panic!("Unsupported if-expression!")
}
// Check if third token is number
compare = match pseudo_node.childs[2].as_default().category {
TokInt {ref value, .. } => {
*value as u8
},
TokBoolean {ref value, .. } => {
boolstr_to_u8(&*value)
}, _ => panic!("Unsupported assign value!")
};
}
let symbol_id = manager.symbol_table.get_symbol_id(&*var_name);
let if_id = manager.ids_if.start_next();
let if_label = format!("if_{}", if_id);
let after_if_label = format!("after_if_{}", if_id);
let after_else_label = format!("after_else_{}", if_id);
let mut code: Vec<ZOP> = vec![
ZOP::JE{local_var_id: symbol_id, equal_to_const: compare, jump_to_label: if_label.to_string()},
ZOP::Jump{jump_to_label: after_if_label.to_string()},
ZOP::Label{name: if_label.to_string()}
];
for i in 1..t.childs.len() {
for instr in gen_zcode(&t.childs[i], out, manager) {
code.push(instr);
}
}
code.push(ZOP::Jump{jump_to_label: after_else_label});
code.push(ZOP::Label{name: after_if_label});
code
},
&TokMacroElse { .. } => {
let mut code: Vec<ZOP> = vec![];
for child in &t.childs {
for instr in gen_zcode(child, out, manager) {
code.push(instr);
}
}
code
},
&TokMacroEndIf { .. } => {
let after_else_label = format!("after_else_{}", manager.ids_if.pop_id());
vec![ZOP::Label{name: after_else_label}]
},
&TokMacroPrint { .. } => {
if t.childs.len() != 1 {
panic!("Doesn't support print with 0 or more than one argument");
}
let mut code: Vec<ZOP> = vec![];
let child = &t.childs[0];
match child.as_default().category {
TokInt {ref value, ..} => {
code.push(ZOP::Print{text: format!("{}", value)},);
},
TokBoolean {ref value, ..} => {
code.push(ZOP::Print{text: format!("{}", value)},);
},
TokFunction {ref name, ..} => {
if *name == "random" {
let args = &child.as_default().childs;
if args.len() != 2 {
panic!("Function random only supports 2 args");
}
if args[0].as_default().childs.len() != 1 ||
args[0].as_default().childs.len() != 1 {
panic!("Unsupported Expression");
}
let from = args[0].as_default().childs[0].as_default();
let to = args[1].as_default().childs[0].as_default();
let mut from_value;
let mut to_value;
if let TokInt {value, ..} = from.category {
from_value = value as u16;
} else {
panic!("Unsupported Expression");
};
if let TokInt {value, ..} = to.category {
to_value = value as u16;
} else {
panic!("Unsupported Expression");
};
let range = (to_value - from_value + 1) as u8;
let var = manager.symbol_table.get_symbol_id("int0");
code.push(ZOP::Random {range: range, variable: var} );
if from_value <= 0 {
code.push(ZOP::Sub {variable1: var, sub_const: 1, variable2: var} );
} else {
code.push(ZOP::Add {variable1: var, add_const: from_value - 1, variable2: var} );
}
code.push(ZOP::PrintNumVar {variable: var} );
} else {
panic!("Unsupported function '{}'", name);
}
},
_ => {
panic!("Unsupported Expression");
}
};
code
}
&TokMacroContentVar {ref var_name, .. } => {
let var_id = manager.symbol_table.get_symbol_id(&*var_name);
match manager.symbol_table.get_symbol_type(&*var_name) {
Type::Integer => {
vec![ZOP::PrintNumVar{variable: var_id}]
},
Type::String => {
vec![]
},
Type::Bool => {
vec![ZOP::PrintNumVar{variable: var_id}]
}
}
},
_ => {
debug!("no match if");
vec![]
}
};
if set_formatting {
for child in &t.childs {
for instr in gen_zcode(child, out, manager) {
code.push(instr);
}
}
code.push(ZOP::SetTextStyle{bold: false, reverse: false, monospace: false, italic: false});
let state = manager.format_state;
code.push(ZOP::SetTextStyle{bold: state.bold, reverse: state.inverted, monospace: state.mono, italic: state.italic});
}
code
}
}
}
fn boolstr_to_u8(string: &str) -> u8 {
match string {
"true" => 1 as u8,
_ => 0 as u8
}
}
impl AST {
pub fn new() -> AST {
AST {
passages: Vec::new(),
path: Vec::new(),
}
}
/// adds a passage to the path in the ast
pub fn add_passage(&mut self, token: Token) {
self.path.clear();
let ast_count_passages = self.count_childs(self.path.to_vec());
let node = ASTNode::Passage(NodePassage { category: token, childs: Vec::new() });
self.passages.push(node);
self.path.push(ast_count_passages);
}
/// adds a child to the path in the ast
pub fn add_child(&mut self, token: Token) {
if let Some(index) = self.path.first() {
let mut new_path: Vec<usize> = self.path.to_vec();
new_path.remove(0);
self.passages[*index].add_child(new_path, token)
} else {
self.passages.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() }));
}
}
/// adds a child an goees one child down
pub fn child_down(&mut self, token: Token) {
let ast_count_childs = self.count_childs(self.path.to_vec());
self.add_child(token);
self.path.push(ast_count_childs);
}
/// adds one child and goes down. adds snd child and goes down.
pub fn two_childs_down(&mut self, child1: Token, child2: Token) {
self.child_down(child1);
self.child_down(child2);
}
/// goes one lvl up
pub fn up(&mut self) {
self.path.pop();
}
/// goes one lvl up and adds and child
pub fn up_child(&mut self, token: Token) {
self.up();
self.add_child(token);
}
/// goes one lvl up, adds an child and goes one lvl down
pub fn up_child_down(&mut self, token: Token) {
self.up();
self.child_down(token);
}
/// convert ast to zcode
pub fn to_zcode(& self, out: &mut zfile::Zfile) {
let mut manager = CodeGenManager::new();
// Insert temp variables for internal calculations
manager.symbol_table.insert_new_symbol("int0", Type::Integer);
let mut code: Vec<ZOP> = vec![];
for child in &self.passages {
for instr in gen_zcode(child, out, &mut manager) {
code.push(instr);
}
}
out.emit(code);
}
/// prints the tree
pub fn print(&self) {
debug!("Abstract Syntax Tree: ");
for child in &self.passages {
child.print(0);
}
debug!("");
}
/// counts the childs of the path in the asts
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
self.passages[*index].count_childs(new_path)
} else {
self.passages.len()
}
}
}
// ================================
// node types
enum ASTNode {
Default (NodeDefault),
Passage (NodePassage)
}
struct NodePassage {
category: Token,
pub childs: Vec<ASTNode>,
/*tags: Vec<ASTNode>*/
}
struct NodeDefault {
category: Token,
childs: Vec<ASTNode>
}
struct CodeGenManager<'a> {
ids_if: IdentifierProvider,
symbol_table: SymbolTable<'a>,
format_state: FormattingState
}
struct IdentifierProvider {
current_id: u32,
id_stack: Vec<u32>
}
struct SymbolTable<'a> {
current_id: u8,
symbol_map: HashMap<&'a str, (u8, Type)>
}
impl <'a> CodeGenManager<'a> {
pub fn new() -> CodeGenManager<'a> {
CodeGenManager {
ids_if: IdentifierProvider::new(),
symbol_table: SymbolTable::new(),
format_state: FormattingState {bold: false, italic: false, mono: false, inverted: false}
}
}
}
impl IdentifierProvider {
pub fn new() -> IdentifierProvider {
IdentifierProvider {
current_id: 0,
id_stack: Vec::new()
}
}
// Returns a new id and pushes it onto the stack
pub fn start_next(&mut self) -> u32 {
let id = self.current_id;
self.current_id += 1;
self.id_stack.push(id);
id
}
// Pops the last id from the stack
pub fn pop_id(&mut self) -> u32 {
self.id_stack.pop().unwrap()
}
}
impl <'a> SymbolTable<'a> {
pub fn new() -> SymbolTable<'a> {
SymbolTable {
current_id: 25,
symbol_map: HashMap::<&str, (u8,Type)>::new()
}
}
// Inserts a symbol into the table, assigning a new id
pub fn insert_new_symbol(&mut self, symbol: &'a str, t: Type) {
debug!("Assigned id {} to variable {}", self.current_id, symbol);
self.symbol_map.insert(symbol, (self.current_id,t));
self.current_id += 1;
}
// Checks if the symbol is already existent in the table
pub fn is_known_symbol(&self, symbol: &str) -> bool {
self.symbol_map.contains_key(symbol)
}
// Returns the id for a given symbol
// (check if is_known_symbol, otherwise panics)
pub fn get_symbol_id(&self, symbol: &str) -> u8 {
let (b,_) = self.symbol_map.get(symbol).unwrap().clone();
b
}
pub fn get_symbol_type(&self, symbol: &str) -> Type {
let (_,b) = self.symbol_map.get(symbol).unwrap().clone();
b
}
}
impl ASTNode {
/// adds an child to the path in the ast
pub fn add_child(&mut self, path: Vec<usize>, token: Token) {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&mut ASTNode::Default(ref mut node) => node.childs[*index].add_child(new_path, token),
&mut ASTNode::Passage(ref mut node) => node.childs[*index].add_child(new_path, token),
}
} else {
match self {
&mut ASTNode::Default(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
&mut ASTNode::Passage(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
}
}
}
/// counts the childs of the current path in the ast
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&ASTNode::Default(ref node) => node.childs[*index].count_childs(new_path),
&ASTNode::Passage(ref node) => node.childs[*index].count_childs(new_path),
}
} else {
match self {
&ASTNode::Default(ref node) => node.childs.len(),
&ASTNode::Passage(ref node) => node.childs.len(),
}
}
}
/// prints an node of an ast
pub fn print(&self, indent: usize) {
let mut spaces = "".to_string();
for _ in 0..indent {
spaces.push_str(" ");
}
match self {
&ASTNode::Passage(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
},
&ASTNode::Default(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
}
}
}
pub fn as_default(&self) -> &NodeDefault {
match self {
&ASTNode::Default(ref def) => def,
_ => panic!("Node cannot be unwrapped as NodeDefault!")
}
}
}
|
//! The `ast` module contains a lot of useful functionality
//! to create and walk through the ast (abstract syntaxtree)
use frontend::lexer::Token;
use backend::zcode::zfile;
use backend::zcode::zfile::{FormattingState};
use std::collections::HashMap;
//==============================
// ast
pub struct AST {
passages: Vec<ASTNode>
}
/// add zcode based on tokens
fn gen_zcode<'a>(node: &'a ASTNode, state: FormattingState, mut out: &mut zfile::Zfile, mut manager: &mut CodeGenManager<'a>) {
let mut state_copy = state.clone();
let mut set_formatting = false;
match node {
&ASTNode::Passage(ref node) => {
match &node.category {
&Token::TokPassageName(ref name) => {
out.routine(name, 0);
},
_ => {
debug!("no match 1");
}
};
for child in &node.childs {
gen_zcode(child, state_copy, out, manager);
}
out.op_newline();
out.op_call_1n("system_check_links");
},
&ASTNode::Default(ref t) => {
match &t.category {
&Token::TokText(ref s) => {
out.gen_print_ops(s);
},
&Token::TokNewLine => {
out.op_newline();
},
&Token::TokFormatBoldStart => {
state_copy.bold = true;
out.op_set_text_style(state_copy.bold, state_copy.inverted, state_copy.mono, state_copy.italic);
set_formatting = true;
},
&Token::TokFormatItalicStart => {
state_copy.italic = true;
out.op_set_text_style(state_copy.bold, state_copy.inverted, state_copy.mono, state_copy.italic);
set_formatting = true;
},
&Token::TokPassageLink (ref name, ref link) => {
out.op_call_2n_with_address("system_add_link", link);
out.op_set_text_style(state_copy.bold, true, state_copy.mono, state_copy.italic);
let link_text = format!("{}[", name);
out.op_print(&link_text);
out.op_print_num_var(16);
out.op_print("]");
out.op_set_text_style(state_copy.bold, state_copy.inverted, state_copy.mono, state_copy.italic);
set_formatting = true;
},
&Token::TokAssign(ref var, ref operator) => {
if operator == "=" || operator == "to" {
if !manager.symbol_table.is_known_symbol(var) {
manager.symbol_table.insert_new_symbol(&var);
}
let symbol_id = manager.symbol_table.get_symbol_id(var);
if t.childs.len() == 1 {
match t.childs[0].as_default().category {
Token::TokInt(value) => {
out.op_store_u16(symbol_id, value as u16);
},
Token::TokBoolean(ref bool_val) => {
out.op_store_u8(symbol_id, boolstr_to_u8(&*bool_val));
}
_ => { }
}
} else {
debug!("Assign Expression currently not supported.");
}
}
},
&Token::TokIf => {
if t.childs.len() < 2 {
panic!("Unsupported if-expression!");
}
let mut compare: u8 = 1;
// check if the first node is a pseudonode
let pseudo_node = match t.childs[0].as_default().category {
Token::TokPseudo => t.childs[0].as_default(),
_ => panic!("Unsupported if-expression!")
};
// Check if first token is variable
let var_name = match pseudo_node.childs[0].as_default().category {
Token::TokVariable(ref var) => var,
_ => panic!("Unsupported if-expression!")
};
if pseudo_node.childs.len() > 1 {
// Check if second token is compare operator
match pseudo_node.childs[1].as_default().category {
Token::TokCompOp(ref op) => {
match &*(*op) {
"==" | "is" => {} ,
_ => panic!("Unsupported Compare Operator!")
}
}, _ => panic!("Unsupported if-expression!")
}
// Check if third token is number
compare = match pseudo_node.childs[2].as_default().category {
Token::TokInt(ref value) => {
*value as u8
},
Token::TokBoolean(ref bool_val) => {
boolstr_to_u8(&*bool_val)
}, _ => panic!("Unsupported assign value!")
};
}
let symbol_id = manager.symbol_table.get_symbol_id(&*var_name);
let if_id = manager.ids_if.start_next();
let if_label = &format!("if_{}", if_id);
let after_if_label = &format!("after_if_{}", if_id);
let after_else_label = &format!("after_else_{}", if_id);
out.op_je(symbol_id, compare, if_label);
out.op_jump(after_if_label);
out.label(if_label);
for i in 1..t.childs.len() {
gen_zcode(&t.childs[i], state_copy, out, manager)
}
out.op_jump(after_else_label);
out.label(after_if_label);
},
&Token::TokElse => {
for child in &t.childs {
gen_zcode(child, state_copy, out, manager)
}
},
&Token::TokEndIf => {
let after_else_label = &format!("after_else_{}", manager.ids_if.pop_id());
out.label(after_else_label);
},
_ => {
debug!("no match 2");
}
};
if set_formatting {
for child in &t.childs {
gen_zcode(child, state_copy, out, manager);
}
out.op_set_text_style(false, false, false, false);
out.op_set_text_style(state.bold, state.inverted, state.mono, state.italic);
}
}
};
}
fn boolstr_to_u8(string: &str) -> u8 {
match string {
"true" => 1 as u8,
_ => 0 as u8
}
}
impl AST {
/// convert ast to zcode
pub fn to_zcode(& self, out: &mut zfile::Zfile) {
let mut manager = CodeGenManager::new();
let state = FormattingState {bold: false, italic: false, mono: false, inverted: false};
for child in &self.passages {
gen_zcode(child, state, out, &mut manager);
}
}
pub fn new() -> AST {
AST {
passages: Vec::new()
}
}
/// prints the tree
pub fn print(&self) {
debug!("Abstract Syntax Tree: ");
for child in &self.passages {
child.print(0);
}
debug!("");
}
/// adds a passage to the path in the ast
pub fn add_passage(&mut self, token: Token) {
let node = ASTNode::Passage(NodePassage { category: token, childs: Vec::new() });
self.passages.push(node);
}
/// adds a child to the path in the ast
pub fn add_child(&mut self, path: &Vec<usize>, token: Token) {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
self.passages[*index].add_child(new_path, token)
} else {
self.passages.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() }));
}
}
/// counts the childs of the path in the asts
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
self.passages[*index].count_childs(new_path)
} else {
self.passages.len()
}
}
}
// ================================
// node types
enum ASTNode {
Default (NodeDefault),
Passage (NodePassage)
}
struct NodePassage {
category: Token,
pub childs: Vec<ASTNode>,
/*tags: Vec<ASTNode>*/
}
struct NodeDefault {
category: Token,
childs: Vec<ASTNode>
}
struct CodeGenManager<'a> {
ids_if: IdentifierProvider,
symbol_table: SymbolTable<'a>
}
struct IdentifierProvider {
current_id: u32,
id_stack: Vec<u32>
}
struct SymbolTable<'a> {
current_id: u8,
symbol_map: HashMap<&'a str, u8>
}
impl <'a> CodeGenManager<'a> {
pub fn new() -> CodeGenManager<'a> {
CodeGenManager {
ids_if: IdentifierProvider::new(),
symbol_table: SymbolTable::new()
}
}
}
impl IdentifierProvider {
pub fn new() -> IdentifierProvider {
IdentifierProvider {
current_id: 0,
id_stack: Vec::new()
}
}
// Returns a new id and pushes it onto the stack
pub fn start_next(&mut self) -> u32 {
let id = self.current_id;
self.current_id += 1;
self.id_stack.push(id);
id
}
// Pops the last id from the stack
pub fn pop_id(&mut self) -> u32 {
self.id_stack.pop().unwrap()
}
}
impl <'a> SymbolTable<'a> {
pub fn new() -> SymbolTable<'a> {
SymbolTable {
current_id: 25,
symbol_map: HashMap::<&str, u8>::new()
}
}
// Inserts a symbol into the table, assigning a new id
pub fn insert_new_symbol(&mut self, symbol: &'a str) {
debug!("Assigned id {} to variable {}", self.current_id, symbol);
self.symbol_map.insert(symbol, self.current_id);
self.current_id += 1;
}
// Checks if the symbol is already existent in the table
pub fn is_known_symbol(&self, symbol: &str) -> bool {
self.symbol_map.contains_key(symbol)
}
// Returns the id for a given symbol
// (check if is_known_symbol, otherwise panics)
pub fn get_symbol_id(&self, symbol: &str) -> u8 {
*self.symbol_map.get(symbol).unwrap()
}
}
impl ASTNode {
/// adds an child to the path in the ast
pub fn add_child(&mut self, path: Vec<usize>, token: Token) {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&mut ASTNode::Default(ref mut node) => node.childs[*index].add_child(new_path, token),
&mut ASTNode::Passage(ref mut node) => node.childs[*index].add_child(new_path, token),
}
} else {
match self {
&mut ASTNode::Default(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
&mut ASTNode::Passage(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
}
}
}
/// counts the childs of the current path in the ast
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&ASTNode::Default(ref node) => node.childs[*index].count_childs(new_path),
&ASTNode::Passage(ref node) => node.childs[*index].count_childs(new_path),
}
} else {
match self {
&ASTNode::Default(ref node) => node.childs.len(),
&ASTNode::Passage(ref node) => node.childs.len(),
}
}
}
/// prints an node of an ast
pub fn print(&self, indent: usize) {
let mut spaces = "".to_string();
for _ in 0..indent {
spaces.push_str(" ");
}
match self {
&ASTNode::Passage(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
},
&ASTNode::Default(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
}
}
}
pub fn as_default(&self) -> &NodeDefault {
match self {
&ASTNode::Default(ref def) => def,
_ => panic!("Node cannot be unwrapped as NodeDefault!")
}
}
}
Moved formatting state into CodeGenManager
//! The `ast` module contains a lot of useful functionality
//! to create and walk through the ast (abstract syntaxtree)
use frontend::lexer::Token;
use backend::zcode::zfile;
use backend::zcode::zfile::{FormattingState};
use std::collections::HashMap;
//==============================
// ast
pub struct AST {
passages: Vec<ASTNode>
}
/// add zcode based on tokens
fn gen_zcode<'a>(node: &'a ASTNode, mut out: &mut zfile::Zfile, mut manager: &mut CodeGenManager<'a>) {
let mut state_copy = manager.format_state.clone();
let mut set_formatting = false;
match node {
&ASTNode::Passage(ref node) => {
match &node.category {
&Token::TokPassageName(ref name) => {
out.routine(name, 0);
},
_ => {
debug!("no match 1");
}
};
for child in &node.childs {
gen_zcode(child, out, manager)
}
out.op_newline();
out.op_call_1n("system_check_links");
},
&ASTNode::Default(ref t) => {
match &t.category {
&Token::TokText(ref s) => {
out.gen_print_ops(s);
},
&Token::TokNewLine => {
out.op_newline();
},
&Token::TokFormatBoldStart => {
state_copy.bold = true;
out.op_set_text_style(state_copy.bold, state_copy.inverted, state_copy.mono, state_copy.italic);
set_formatting = true;
},
&Token::TokFormatItalicStart => {
state_copy.italic = true;
out.op_set_text_style(state_copy.bold, state_copy.inverted, state_copy.mono, state_copy.italic);
set_formatting = true;
},
&Token::TokPassageLink (ref name, ref link) => {
out.op_call_2n_with_address("system_add_link", link);
out.op_set_text_style(state_copy.bold, true, state_copy.mono, state_copy.italic);
let link_text = format!("{}[", name);
out.op_print(&link_text);
out.op_print_num_var(16);
out.op_print("]");
out.op_set_text_style(state_copy.bold, state_copy.inverted, state_copy.mono, state_copy.italic);
set_formatting = true;
},
&Token::TokAssign(ref var, ref operator) => {
if operator == "=" || operator == "to" {
if !manager.symbol_table.is_known_symbol(var) {
manager.symbol_table.insert_new_symbol(&var);
}
let symbol_id = manager.symbol_table.get_symbol_id(var);
if t.childs.len() == 1 {
match t.childs[0].as_default().category {
Token::TokInt(value) => {
out.op_store_u16(symbol_id, value as u16);
},
Token::TokBoolean(ref bool_val) => {
out.op_store_u8(symbol_id, boolstr_to_u8(&*bool_val));
}
_ => { }
}
} else {
debug!("Assign Expression currently not supported.");
}
}
},
&Token::TokIf => {
if t.childs.len() < 2 {
panic!("Unsupported if-expression!");
}
let mut compare: u8 = 1;
// check if the first node is a pseudonode
let pseudo_node = match t.childs[0].as_default().category {
Token::TokPseudo => t.childs[0].as_default(),
_ => panic!("Unsupported if-expression!")
};
// Check if first token is variable
let var_name = match pseudo_node.childs[0].as_default().category {
Token::TokVariable(ref var) => var,
_ => panic!("Unsupported if-expression!")
};
if pseudo_node.childs.len() > 1 {
// Check if second token is compare operator
match pseudo_node.childs[1].as_default().category {
Token::TokCompOp(ref op) => {
match &*(*op) {
"==" | "is" => {} ,
_ => panic!("Unsupported Compare Operator!")
}
}, _ => panic!("Unsupported if-expression!")
}
// Check if third token is number
compare = match pseudo_node.childs[2].as_default().category {
Token::TokInt(ref value) => {
*value as u8
},
Token::TokBoolean(ref bool_val) => {
boolstr_to_u8(&*bool_val)
}, _ => panic!("Unsupported assign value!")
};
}
let symbol_id = manager.symbol_table.get_symbol_id(&*var_name);
let if_id = manager.ids_if.start_next();
let if_label = &format!("if_{}", if_id);
let after_if_label = &format!("after_if_{}", if_id);
let after_else_label = &format!("after_else_{}", if_id);
out.op_je(symbol_id, compare, if_label);
out.op_jump(after_if_label);
out.label(if_label);
for i in 1..t.childs.len() {
gen_zcode(&t.childs[i], out, manager)
}
out.op_jump(after_else_label);
out.label(after_if_label);
},
&Token::TokElse => {
for child in &t.childs {
gen_zcode(child, out, manager)
}
},
&Token::TokEndIf => {
let after_else_label = &format!("after_else_{}", manager.ids_if.pop_id());
out.label(after_else_label);
},
_ => {
debug!("no match 2");
}
};
if set_formatting {
for child in &t.childs {
gen_zcode(child, out, manager)
}
out.op_set_text_style(false, false, false, false);
let state = manager.format_state;
out.op_set_text_style(state.bold, state.inverted, state.mono, state.italic);
}
}
};
}
fn boolstr_to_u8(string: &str) -> u8 {
match string {
"true" => 1 as u8,
_ => 0 as u8
}
}
impl AST {
/// convert ast to zcode
pub fn to_zcode(& self, out: &mut zfile::Zfile) {
let mut manager = CodeGenManager::new();
for child in &self.passages {
gen_zcode(child, out, &mut manager);
}
}
pub fn new() -> AST {
AST {
passages: Vec::new()
}
}
/// prints the tree
pub fn print(&self) {
debug!("Abstract Syntax Tree: ");
for child in &self.passages {
child.print(0);
}
debug!("");
}
/// adds a passage to the path in the ast
pub fn add_passage(&mut self, token: Token) {
let node = ASTNode::Passage(NodePassage { category: token, childs: Vec::new() });
self.passages.push(node);
}
/// adds a child to the path in the ast
pub fn add_child(&mut self, path: &Vec<usize>, token: Token) {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
self.passages[*index].add_child(new_path, token)
} else {
self.passages.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() }));
}
}
/// counts the childs of the path in the asts
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
self.passages[*index].count_childs(new_path)
} else {
self.passages.len()
}
}
}
// ================================
// node types
enum ASTNode {
Default (NodeDefault),
Passage (NodePassage)
}
struct NodePassage {
category: Token,
pub childs: Vec<ASTNode>,
/*tags: Vec<ASTNode>*/
}
struct NodeDefault {
category: Token,
childs: Vec<ASTNode>
}
struct CodeGenManager<'a> {
ids_if: IdentifierProvider,
symbol_table: SymbolTable<'a>,
format_state: FormattingState
}
struct IdentifierProvider {
current_id: u32,
id_stack: Vec<u32>
}
struct SymbolTable<'a> {
current_id: u8,
symbol_map: HashMap<&'a str, u8>
}
impl <'a> CodeGenManager<'a> {
pub fn new() -> CodeGenManager<'a> {
CodeGenManager {
ids_if: IdentifierProvider::new(),
symbol_table: SymbolTable::new(),
format_state: FormattingState {bold: false, italic: false, mono: false, inverted: false}
}
}
}
impl IdentifierProvider {
pub fn new() -> IdentifierProvider {
IdentifierProvider {
current_id: 0,
id_stack: Vec::new()
}
}
// Returns a new id and pushes it onto the stack
pub fn start_next(&mut self) -> u32 {
let id = self.current_id;
self.current_id += 1;
self.id_stack.push(id);
id
}
// Pops the last id from the stack
pub fn pop_id(&mut self) -> u32 {
self.id_stack.pop().unwrap()
}
}
impl <'a> SymbolTable<'a> {
pub fn new() -> SymbolTable<'a> {
SymbolTable {
current_id: 25,
symbol_map: HashMap::<&str, u8>::new()
}
}
// Inserts a symbol into the table, assigning a new id
pub fn insert_new_symbol(&mut self, symbol: &'a str) {
debug!("Assigned id {} to variable {}", self.current_id, symbol);
self.symbol_map.insert(symbol, self.current_id);
self.current_id += 1;
}
// Checks if the symbol is already existent in the table
pub fn is_known_symbol(&self, symbol: &str) -> bool {
self.symbol_map.contains_key(symbol)
}
// Returns the id for a given symbol
// (check if is_known_symbol, otherwise panics)
pub fn get_symbol_id(&self, symbol: &str) -> u8 {
*self.symbol_map.get(symbol).unwrap()
}
}
impl ASTNode {
/// adds an child to the path in the ast
pub fn add_child(&mut self, path: Vec<usize>, token: Token) {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&mut ASTNode::Default(ref mut node) => node.childs[*index].add_child(new_path, token),
&mut ASTNode::Passage(ref mut node) => node.childs[*index].add_child(new_path, token),
}
} else {
match self {
&mut ASTNode::Default(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
&mut ASTNode::Passage(ref mut node) => node.childs.push(ASTNode::Default(NodeDefault { category: token, childs: Vec::new() } )),
}
}
}
/// counts the childs of the current path in the ast
pub fn count_childs(&self, path: Vec<usize>) -> usize {
if let Some(index) = path.first() {
let mut new_path: Vec<usize> = path.to_vec();
new_path.remove(0);
match self {
&ASTNode::Default(ref node) => node.childs[*index].count_childs(new_path),
&ASTNode::Passage(ref node) => node.childs[*index].count_childs(new_path),
}
} else {
match self {
&ASTNode::Default(ref node) => node.childs.len(),
&ASTNode::Passage(ref node) => node.childs.len(),
}
}
}
/// prints an node of an ast
pub fn print(&self, indent: usize) {
let mut spaces = "".to_string();
for _ in 0..indent {
spaces.push_str(" ");
}
match self {
&ASTNode::Passage(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
},
&ASTNode::Default(ref t) => {
debug!("{}|- : {:?}", spaces, t.category);
for child in &t.childs {
child.print(indent+2);
}
}
}
}
pub fn as_default(&self) -> &NodeDefault {
match self {
&ASTNode::Default(ref def) => def,
_ => panic!("Node cannot be unwrapped as NodeDefault!")
}
}
}
|
/*!
A full TimeSteward implementation that has decent (amortized) asymptotic performance for all common operations.
This is intended to be the simplest possible implementation that meets those conditions. As such, it's not especially optimized. Here are some of its specific weaknesses:
*no support for multithreading
*when a field changes in the past, this TimeSteward immediately erases all more-recent versions of that field. This can take time proportional to the amount of times that field has changed since the past change. (It doesn't affect the amortized time because the recording of each thing amortizes its eventual deletion, but it can cause a hiccup.)
*This erasing happens even if the field was overwritten at some point without being examined. In that case, we could theoretically optimize by leaving the future of the field untouched.
*There can also be hiccups at arbitrary times when the hash table resizes.
*We haven't optimized for the "most changes happen in the present" case, which means we pay a bunch of log n factors when we could be paying O(1).
*If you keep around old snapshots of times when no fields are actually being modified anymore, they will eventually have all their data copied into them unnecessarily. This could be avoided if we had a good two-dimensional tree type so that the snapshots could be queried by (SnapshotIdx X BaseTime) rectangles.
*We also do not optimize the for the case where a field is changed in the past but then the field is changed BACK before it affects anything (either by another change in the fiat events or by a regular prediction). The same applies to the case where an event is invalidated, then rerun, but makes the same changes as it made the first time. This allows dependency chains to propagate much faster than they should.
*There might be more small dependency optimizations we could do, like distinguishing between accessing just a field's data data and accessing just its last change time, or even the difference between those and just checking whether the field exists or not, or other conditions (although we would need an API change to track some of those things). However, I suspect that the additional runtime cost of recording these different dependencies wouldn't be worth it. (This would only have a small effect at best, because it wouldn't slow down dependency chain propagation unless there are fields that haven't implemented guaranteed_equal__unsafe().)
*/
use super::{DeterministicRandomId, SiphashIdGenerator, RowId, ColumnId, FieldId, PredictorId,
Column, ExtendedTime, EventRng, Basics, TimeSteward, FiatEventOperationResult,
ValidSince, TimeStewardLifetimedMethods, TimeStewardStaticMethods};
use std::collections::{HashMap, BTreeMap, HashSet};
use std::collections::hash_map::Entry;
use std::hash::Hash;
// use std::collections::Bound::{Included, Excluded, Unbounded};
use std::any::Any;
use std::borrow::Borrow;
use std::rc::Rc;
use std::cell::{Cell, RefCell};
use std::ops::Drop;
use rand::Rng;
use insert_only;
type SnapshotIdx = u64;
This is an error
/*
An ExtendedTime may be in one of several states
– empty and unused
– a fiat event scheduled but nothing executed
– a fiat event scheduled and executed consistently to that, and still valid
– a fiat event scheduled and executed consistently to that, but its accessed fields have changed
– a fiat event executed, but not scheduled (we could disallow this by doing it immediately)
– a fiat event executed but then rescheduled differently (ditto)
– a predicted event scheduled but nothing executed
– a predicted event scheduled and executed consistently to that, and still valid
– a predicted event scheduled and executed consistently to that, but its accessed fields have changed
– a predicted event executed, but not scheduled (we could disallow this in STABLE states but it is guaranteed to occur at least temporarily during a function)
– a predicted event executed but then rescheduled differently (ditto)
There are enough parallels between fiat and predicted that we should probably combine them:
0. Unused
1. Scheduled only
2. Executed consistently, still valid
3. Executed consistently, fields changed
4. Executed but not scheduled
5. Executed but scheduled differently
possible movements:
(1, 4)->0
0->1
(1, 3, 5)->2
2->3
(2, 3, 5)->4
4->5
The ExtendedTime needs attention in (1, 3, 4, 5) but not (2, 0).
Thus, the changes that affect "needs attention" are:
(1, 4)->0
0->1
(1, 3, 5)->2
2->3
2->4
Which can be split up into the ones CAUSED by giving the time attention:
4->0
(1, 3, 5)->2
And the ones caused from a distance:
0->1 (scheduling)
1->0 and 2->4 (un-scheduling)
2->3 (invalidating)
that's assuming that you're not allowed to reschedule without un-scheduling first (which, in my current model, is true for both fiat events and predicted events)
The only things distinguishing 3 and 5 are how they are created. Let's combine them:
0. Unused
1. Scheduled only
2. Executed consistently, still valid
3. Executed consistently, fields OR schedule different than when it was executed
4. Executed but not scheduled
possible movements:
(1, 4)->0
0->1
(1, 3)->2
(2, 4)->3
(2, 3)->4
The ExtendedTime needs attention in (1, 3, 4) but not (2, 0).
Thus, the changes that affect "needs attention" are:
(1, 4)->0
0->1
(1, 3)->2
2->3
2->4
Which can be split up into the ones CAUSED by giving the time attention:
4->0
(1, 3)->2
And the ones caused from a distance:
0->1 (scheduling)
1->0 and 2->4 (un-scheduling)
2->3 (invalidating)
notice that the (3, 4) trap can only be escaped by giving the time attention.
The only way to REMOVE "needs attention" from a distance is 1->0.
Thus,
*/
enum EventValidity {
Invalid,
ValidWithDependencies (HashSet <FieldId>),
}
struct EventExecutionState {
fields_changed: HashSet <FieldId>,
validity: EventValidity,
}
struct EventState <B: Basics> {
schedule: Option <Event <B>>,
execution_state: Option <EventExecutionState>,
}
impl <B: Basics> StewardOwned <B> {
fn schedule_event (&mut self, time: ExtendedTime <B>, event: Event <B>) {
match self.event_states.entry (time.clone()) {
Entry::Vacant (entry) => {
entry.insert (EventState {schedule: Some (event), execution_state: None});
self.events_needing_attention.insert (time);
}
Entry::Occupied (mut entry) => {
let state = entry.get_mut();
assert!(state.scheduled.is_none(), "scheduling an event where there is already one scheduled");
state.schedule = Some (event);
if state.execution_state.is_none() {
self.events_needing_attention.insert (time);
}
}
}
}
fn unschedule_event (&mut self, time: ExtendedTime <B>) {
match self.owned.event_states.entry (time.clone()) {
Entry::Vacant (_) => {
panic!("You can't unschedule an event that wasn't scheduled")
}
Entry::Occupied (mut entry) => {
let state = entry.get_mut();
assert!(state.scheduled.is_some(), "You can't unschedule an event that wasn't scheduled");
state.schedule = None;
if let Some (ref mut execution_state) = state.execution_state {
Self::invalidate_execution (time, execution_state, self.events_needing_attention, self.dependencies);
} else {
self.events_needing_attention.remove (time);
entry.remove();
}
}
}
}
fn invalidate_execution (time: & ExtendedTime <B>, execution: &mut EventExecutionState, events_needing_attention: &mut BTreeSet<ExtendedTime<B>>, steward_dependencies: &mut DependenciesMap) {
if let ValidWithDependencies (dependencies) = execution.validity {
execution_state.validity = Invalid;
events_needing_attention.insert (time);
for dependency in dependencies {
match steward_dependencies.entry (dependency) {
Entry::Vacant (_) => panic!("dependency records are inconsistent"),
Entry::Occupied (mut entry) => {
entry.get_mut().remove (time);
if entry.get().is_empty() {entry.remove();}
}
}
}
}
}
fn invalidate_dependencies (&mut self, id: FieldId, time: ExtendedTime <B>) {
if let Some (my_dependencies) = self.dependencies.get (id) {
for (access_time, access_info) in my_dependencies.range (Excluded (time), Unbounded) {
match access_info {
EventAccess => Self::invalidate_execution (access_time, self.event_states.get(discarded.last_change).expect ("event that accessed this field was missing").execution_state.expect ("event that accessed this field not marked executed"), self.events_needing_attention, self.dependencies),
PredictionAccess (row_id, predictor_id) =>
}
}
}
}
fn discard_changes (&mut self, id: FieldId, history: &mut FieldHistory <B>, index: usize, during_processing_of_event_responsible_for_first_discarded: bool, snapshots: &SnapshotsData <B>) {
history.update_snapshots (snapshots);
self.invalidate_dependencies (id, history.changes [index].last_change);
let mut discard_iter = history.changes.split_off (index).into_iter();
if during_processing_of_event_responsible_for_first_discarded {discard_iter.next();}
for discarded in discard_iter {
Self::invalidate_execution (discarded.last_change, self.event_states.get(discarded.last_change).expect ("event that created this change was missing").execution_state.expect ("event that created this change not marked executed"), self.events_needing_attention, self.dependencies);
}
}
fn add_change (&mut self, id: FieldId, history: &mut FieldHistory <B>, change: Field <B>, snapshots: &mut SnapshotsData <B>) {
history.changes.last().map (| last_change | assert!(last_change.last_change <change.last_change));
history.update_snapshots (snapshots);
self.invalidate_dependencies (id, change.last_change);
history.changes.push (change);
}
}
impl <B: Basics> Steward <B> {
fn create_execution (&mut self, time: & ExtendedTime <B>, new_results: MutatorResults <B>) {
let fields = self.shared.fields.borrow_mut();
let new_fields_changed = HashSet::new();
let new_dependencies = HashSet::with_capacity(new_results.fields);
for (id, field) in new_results.fields {
new_dependencies.insert (id);
if field.last_change == time {
new_fields_changed.insert (id);
let mut history = field_states.entry(id).or_insert (FieldHistory {changes: Vec:: new(), first_snapshot_not_updated = self.owned.next_snapshot});
match history.changes.binary_search_by_key (time, | change | change.last_change) {
Ok (index) => panic!("there shouldn't be a change at this time is no event has been executed then"),
Err (index) => {
self.owned.discard_changes (id, entry.get_mut(), index, false, &fields.changed_since_snapshots);
self.owned.add_change (id, entry.get_mut(), field, &fields.changed_since_snapshots);
}
}
}
}
}
fn remove_execution (&mut self, time: & ExtendedTime <B>, execution: EventExecutionState) {
let fields = self.shared.fields.borrow_mut();
for id in execution.fields_changed {
if let Entry::Occupied (mut entry) = field_states.entry(id) {
//some of these could have ALREADY been deleted –
//in fact, perhaps that's how the event was invalidated in the first place
if let Ok (index) = entry.get().changes.binary_search_by_key (time, | change | change.last_change) {
self.owned.discard_changes (id, entity.get_mut(), index, true, &fields.changed_since_snapshots);
if entry.get().changes.is_empty() {entry.remove();}
}
}
}
}
fn replace_execution (&mut self, time: & ExtendedTime <B>, execution: &mut EventExecutionState, new_results: MutatorResults <B>) {
let fields = self.shared.fields.borrow_mut();
let new_fields_changed = HashSet::new();
let new_dependencies = HashSet::with_capacity(new_results.fields);
for (id, field) in new_results.fields {
new_dependencies.insert (id);
if field.last_change == time {
new_fields_changed.insert (id);
let mut history = field_states.entry(id).or_insert (FieldHistory {changes: Vec:: new(), first_snapshot_not_updated = self.owned.next_snapshot});
match history.changes.binary_search_by_key (time, | change | change.last_change) {
Ok (index) => {
self.owned.discard_changes (id, entry.get_mut(), index, true, &fields.changed_since_snapshots);
self.owned.add_change (id, entry.get_mut(), field, &fields.changed_since_snapshots);
}
Err (index) => {
self.owned.discard_changes (id, entry.get_mut(), index, false, &fields.changed_since_snapshots);
self.owned.add_change (id, entry.get_mut(), field, &fields.changed_since_snapshots);
}
}
}
}
for id in execution.fields_changed {
if new_fields_changed.get(id).is_none() {
if let Entry::Occupied (mut entry) = field_states.entry(id) {
//some of these could have ALREADY been deleted –
//in fact, perhaps that's how the event was invalidated in the first place
if let Ok (index) = entry.get().changes.binary_search_by_key (time, | change | change.last_change) {
self.owned.discard_changes (id, entity.get_mut(), index, true, &fields.changed_since_snapshots);
if entry.get().changes.is_empty() {entry.remove();}
}
}
}
}
}
fn do_event (&mut self, time: ExtendedTime <B>) {
}
fn do_next (&mut self) {
match (self.owned.events_needing_attention.iter().next(), self.owned.predictions_missing_by_time.iter().next()) {
(None, None) =>(),
(Some (ref event_time), None) => self.do_event (event_time.clone()),
(None, Some ((ref prediction_time, (row_id, prediction_id)))) => self.make_prediction (prediction_time.clone(), row_id, prediction_id),
(Some (ref event_time), Some ((ref prediction_time, (row_id, prediction_id)))) => if event_time <= prediction_time {self.do_event (event_time.clone())} else {self.make_prediction (prediction_time.clone(), row_id, prediction_id)},
}
}
}
impl <B: Basics> FieldHistory <B> {
fn update_snapshots (&mut self, snapshots: &SnapshotsData <B>) {
for (index, snapshot_map) in snapshots.range(Included (self.first_snapshot_not_updated), Unbounded) {
snapshot_map.1.get_default(field_id, || SnapshotField {
data: self.changes.get (match self.changes.binary_search_by_key (time, | change | change.last_change) { Ok (index) => index - 1, Err (index) => index - 1,})
})
self.first_snapshot_not_updated = index + 1;
}
}
}
#[derive (Clone)]
struct Field<B: Basics> {
last_change: ExtendedTime<B>,
data: Option <Rc<Any>>,
}
struct SnapshotField<B: Basics> {
data: Option<Field<B>>,
}
enum AccessInfo {
EventAccess,
PredictionAccess (RowId, PredictorId),
}
struct FieldHistory <B: Basics> {
changes: Vec<Field <B>>,
first_snapshot_not_updated: SnapshotIdx,
}
type SnapshotsData <B> = BTreeMap<SnapshotIdx,
Rc<insert_only::HashMap<FieldId, SnapshotField<B>>>>;
struct Fields<B: Basics> {
field_states: HashMap<FieldId, FieldHistory <B>>,
changed_since_snapshots: SnapshotsData <B>,
}
type DependenciesMap <B> =HashMap<FieldId, BTreeMap<ExtendedTime <B>, AccessInfo>>;
#[derive (Clone)]
struct Prediction<B: Basics> {
predictor_id: PredictorId,
prediction_is_about_row_id: RowId,
predictor_accessed: Vec<FieldId>,
what_will_happen: Option<(ExtendedTime<B>, Event<B>)>,
}
struct StewardShared<B: Basics> {
predictors_by_column: HashMap<ColumnId, Vec<Predictor<B>>>,
predictors_by_id: HashMap<PredictorId, Predictor<B>>,
constants: B::Constants,
fields: RefCell<Fields<B>>,
}
#[derive (Clone)]
struct StewardOwned<B: Basics> {
event_states: HashMap <ExtendedTime<B>, EventState <B>>,
events_needing_attention: BTreeSet<ExtendedTime<B>>,
next_snapshot: SnapshotIdx,
predictions_by_id: HashMap<(RowId, PredictorId), Rc<Prediction<B>>>,
predictions_missing_by_time: BTreeMap<ExtendedTime <B>, (RowId, PredictorId)>,
dependencies: DependenciesMap <B>,
}
#[derive (Clone)]
pub struct Steward<B: Basics> {
owned: StewardOwned<B>,
shared: Rc<StewardShared<B>>,
}
pub struct Snapshot<B: Basics> {
now: B::Time,
index: SnapshotIdx,
field_states: Rc<insert_only::HashMap<FieldId, SnapshotField<B>>>,
shared: Rc<StewardShared<B>>,
}
struct MutatorResults <B: Basics> {
fields: HashMap<FieldId, Field <B>>,
dependencies: Vec<FieldId>,
}
pub struct Mutator<'a, B: Basics> {
now: ExtendedTime<B>,
steward: &'a mut StewardOwned<B>,
shared: &'a StewardShared<B>,
fields: &'a Fields<B>,
generator: EventRng,
results: RefCell<MutatorResults <B>>,
}
struct PredictorAccessorResults<B: Basics> {
soonest_prediction: Option<(B::Time, Event<B>)>,
dependencies: Vec<FieldId>,
dependencies_hasher: SiphashIdGenerator,
}
pub struct PredictorAccessor<'a, B: Basics> {
predictor_id: PredictorId,
about_row_id: RowId,
internal_now: ExtendedTime<B>,
steward: RefCell<&'a mut StewardOwned<B>>,
shared: &'a StewardShared<B>,
fields: &'a Fields<B>,
results: RefCell<PredictorAccessorResults<B>>,
}
pub type EventFn<B> = for<'d, 'e> Fn(&'d mut Mutator<'e, B>);
pub type Event<B> = Rc<EventFn<B>>;
pub type Predictor<B> = super::Predictor<PredictorFn<B>>;
pub type PredictorFn<B> = for<'b, 'c> Fn(&'b mut PredictorAccessor<'c, B>, RowId);
impl<B: Basics> Drop for Snapshot<B> {
fn drop(&mut self) {
self.shared.fields.borrow_mut().changed_since_snapshots.remove(&self.index);
}
}
impl<B: Basics> super::Accessor<B> for Snapshot<B> {
fn data_and_last_change<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &B::Time)> {
let field_id = FieldId::new (id, C::column_id());
let field = self.field_states.get_default(field_id, || {
SnapshotField {
data: self.shared
.fields
.borrow()
.get_for_snapshot (&field_id, self.now)
.cloned(),
touched_by_steward: Cell::new(false),
}
})
extract_field_info::<B, C>( .data
.as_ref())
.map(|p| (p.0, &p.1.base))
}
fn constants(&self) -> &B::Constants {
&self.shared.constants
}
}
impl<'a, B: Basics> super::Accessor<B> for Mutator<'a, B> {
fn data_and_last_change<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &B::Time)> {
let field = extract_field_info::<B, C> (self.results.borrow_mut().fields.entry (id).or_insert_with (| | fields.get::<C>(id, self.now)));
(field.0, & field.1.base)
}
fn constants(&self) -> &B::Constants {
&self.shared.constants
}
}
impl<'a, B: Basics> super::Accessor<B> for PredictorAccessor<'a, B> {
fn data_and_last_change<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &B::Time)> {
let field_id = FieldId::new (id, C::column_id());
let mut results = self.results.borrow_mut();
self.steward
.borrow_mut()
.prediction_dependencies
.entry(field_id)
.or_insert(HashSet::new())
.insert((self.about_row_id, self.predictor_id));
results.dependencies.push(field_id);
self.fields.get::<C>(id, self.now).map(|p| {
p.1.id.hash(&mut results.dependencies_hasher);
(p.0, &p.1.base)
})
}
fn constants(&self) -> &B::Constants {
&self.shared.constants
}
}
impl<B: Basics> super::MomentaryAccessor<B> for Snapshot<B> {
fn now(&self) -> &B::Time {
&self.now
}
}
impl<'a, B: Basics> super::MomentaryAccessor<B> for Mutator<'a, B> {
fn now(&self) -> &B::Time {
&self.now.base
}
}
impl<'a, B: Basics> super::PredictorAccessor<B, EventFn<B>> for PredictorAccessor<'a, B> {
fn predict_immediately(&mut self, event: Event<B>) {
let t = self.internal_now.base.clone();
self.predict_at_time(&t, event);
}
fn predict_at_time(&mut self, time: &B::Time, event: Event<B>) {
if time < &self.internal_now.base {
return;
}
let mut results = self.results.borrow_mut();
if let Some((ref old_time, _)) = results.soonest_prediction {
if old_time <= time {
return;
}
}
results.soonest_prediction = Some((time.clone(), event));
}
}
impl<B: Basics> super::Snapshot<B> for Snapshot<B> {}
impl<'a, B: Basics> super::Mutator<B> for Mutator<'a, B> {
fn set<C: Column>(&mut self, id: RowId, data: Option<C::FieldType>) {
let field_id = FieldId::new (id, C::column_id());
self.results.borrow_mut().fields.insert (Field {last_change: self.now, data: data.map (| whatever | Rc::new (whatever))});
}
fn rng(&mut self) -> &mut EventRng {
&mut self.generator
}
fn random_id(&mut self) -> RowId {
RowId { data: [self.generator.gen::<u64>(), self.generator.gen::<u64>()] }
}
}
// https://github.com/rust-lang/rfcs/issues/1485
trait Filter<T> {
fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self;
}
impl<T> Filter<T> for Option<T> {
fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
self.and_then(|x| {
if predicate(&x) {
Some(x)
} else {
None
}
})
}
}
fn extract_field_info<B: Basics, C: Column>(field: &Field<B>)
-> (&C::FieldType, &ExtendedTime<B>) {
(field.data
.downcast_ref::<C::FieldType>()
.expect("a field had the wrong type for its column")
.borrow(),
& field.last_change)
}
fn get<C: Column, Value>(map: &HashMap<FieldId, Value>, id: RowId) -> Option<&Value> {
map.get(&FieldId {
row_id: id,
column_id: C::column_id(),
})
}
impl<B: Basics> Fields<B> {
fn get<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &ExtendedTime<B>)> {
let field_states = &self.field_states;
extract_field_info::<B, C>(get::<C, Field<B>>(field_states, id))
}
// returns true if the field changed from existing to nonexistent or vice versa
fn set<C: Column>(&mut self, id: RowId, value: C::FieldType, time: &ExtendedTime<B>) -> bool {
let field = Field {
data: Rc::new(value),
last_change: time.clone(),
};
match self.field_states
.entry(FieldId {
row_id: id,
column_id: C::column_id(),
}) {
Entry::Occupied(mut entry) => {
entry.insert(field);
false
}
Entry::Vacant(entry) => {
entry.insert(field);
true
}
}
}
// returns true if the field changed from existing to nonexistent or vice versa
fn remove<C: Column>(&mut self, id: RowId) -> bool {
self.field_states
.remove(&FieldId {
row_id: id,
column_id: C::column_id(),
})
.is_some()
}
// returns true if the field changed from existing to nonexistent or vice versa
fn set_opt<C: Column>(&mut self,
id: RowId,
value_opt: Option<C::FieldType>,
time: &ExtendedTime<B>)
-> bool {
if let Some(value) = value_opt {
self.set::<C>(id, value, time)
} else {
self.remove::<C>(id)
}
}
}
impl<B: Basics> Steward<B> {
fn next_event(&self) -> Option<(ExtendedTime<B>, Event<B>)> {
let first_fiat_event_iter = self.owned
.fiat_events
.iter()
.map(|ev| (ev.0.clone(), ev.1.clone()))
.take(1);
let first_predicted_event_iter = self.owned
.predictions_by_time
.iter()
.map(|pair| {
(pair.0.clone(),
pair.1
.what_will_happen
.as_ref()
.expect("a prediction that predicted nothing was \
stored in predictions")
.1
.clone())
})
.take(1);
/* let predicted_events_iter = self.settings
* .predictors
* .iter()
* .flat_map(|predictor|
* TODO change field_states
* to separate by field type, for efficiency,
* like the haskell does?
* self.state.field_states.keys().filter_map(move |field_id|
* if field_id.column_id != predictor.column_id {
* None
* } else {
* let mut pa = PredictorAccessor{
* steward: self,
* soonest_prediction: None,
* dependencies_hasher: SiphashIdGenerator::new(),
* };
* (predictor.function)(&mut pa, field_id.row_id);
* let dependencies_hash = pa.dependencies_hasher.generate();
* pa.soonest_prediction.and_then(|(event_base_time, event)|
* super::next_extended_time_of_predicted_event(
* predictor.predictor_id,
* field_id.row_id,
* dependencies_hash,
* event_base_time,
* &self.state.last_event.as_ref().expect ("how can we be calling a predictor when there are no fields yet?")
* ).map(|event_time| (event_time, event)))})); */
let events_iter = first_fiat_event_iter.chain(first_predicted_event_iter);
events_iter.min_by_key(|ev| ev.0.clone())
}
fn get_predictor(&self, predictor_id: PredictorId) -> &Predictor<B> {
self.shared
.predictors_by_id
.get(&predictor_id)
.expect("somehow a PredictorId appeared with no associated predictor")
}
fn clear_prediction(&mut self, row_id: RowId, predictor_id: PredictorId) {
if let Some(prediction) = self.owned.predictions_by_id.get(&(row_id, predictor_id)) {
for field_id in prediction.predictor_accessed.iter() {
if let Entry::Occupied(mut entry) = self.owned
.prediction_dependencies
.entry(field_id.clone()) {
entry.get_mut().remove(&(row_id, predictor_id));
if entry.get().is_empty() {
entry.remove();
}
}
}
if let Some((ref when, _)) = prediction.what_will_happen {
self.owned.predictions_by_time.remove(when).expect("prediction records were inconsistent");
}
}
}
fn execute_event(&mut self, event_time: ExtendedTime<B>, event: Event<B>) {
let predictions_needed;
{
let field_ref = &mut *self.shared.fields.borrow_mut();
let mut mutator = Mutator {
now: event_time.clone(),
steward: &mut self.owned,
shared: &self.shared,
fields: field_ref,
generator: super::generator_for_event(event_time.id),
predictions_needed: HashSet::new(),
};
event(&mut mutator);
predictions_needed = mutator.predictions_needed;
}
// if it was a fiat event, clean it up:
self.owned.fiat_events.remove(&event_time);
self.owned.last_event = Some(event_time);
let old_value = self.fields.field_states.get(&field_id).cloned();
let existence_changed = self.fields.set_opt::<C>(id, data, &self.now);
for snapshot_map in self.fields.changed_since_snapshots.iter().rev() {
let info = snapshot_map.1.get_default(field_id, || {
SnapshotField {
data: old_value.clone(),
touched_by_steward: Cell::new(false),
}
});
if info.touched_by_steward.get() {
break;
}
info.touched_by_steward.set(true);
if existence_changed {
self.shared.predictors_by_column.get(&C::column_id()).map(|predictors| {
for predictor in predictors {
self.predictions_needed.insert((id, predictor.predictor_id));
}
});
}
if let Entry::Occupied(entry) = self.steward.prediction_dependencies.entry(field_id) {
for prediction in entry.get() {
self.predictions_needed.insert(prediction.clone());
}
entry.remove();
}
for (row_id, predictor_id) in predictions_needed {
self.clear_prediction(row_id, predictor_id);
let now = self.owned
.last_event
.clone()
.expect("how can we be calling a predictor when there are no fields yet?");
let function = self.get_predictor(predictor_id).function.clone();
let results;
{
let field_ref = &*self.shared.fields.borrow();
let mut pa = PredictorAccessor {
predictor_id: predictor_id,
about_row_id: row_id,
internal_now: now,
steward: RefCell::new(&mut self.owned),
shared: &self.shared,
fields: field_ref,
results: RefCell::new(PredictorAccessorResults {
soonest_prediction: None,
dependencies: Vec::new(),
dependencies_hasher: SiphashIdGenerator::new(),
}),
};
(function)(&mut pa, row_id);
results = pa.results.into_inner();
}
let dependencies_hash = results.dependencies_hasher.generate();
let prediction = Rc::new(Prediction {
predictor_id: predictor_id,
prediction_is_about_row_id: row_id,
predictor_accessed: results.dependencies,
what_will_happen: results.soonest_prediction.and_then(|(event_base_time, event)| {
super::next_extended_time_of_predicted_event(predictor_id,
row_id,
dependencies_hash,
event_base_time,
&self.owned
.last_event
.as_ref()
.expect("how can we be calling a \
predictor when there are no \
fields yet?"))
.map(|event_time| (event_time, event))
}),
});
self.owned.predictions_by_id.insert((row_id, predictor_id), prediction.clone());
if let Some((ref time, _)) = prediction.what_will_happen {
self.owned.predictions_by_time.insert(time.clone(), prediction.clone());
}
}
}
fn update_until_beginning_of(&mut self, target_time: &B::Time) {
while let Some(ev) = self.next_event().filter(|ev| ev.0.base < *target_time) {
let (event_time, event) = ev;
self.execute_event(event_time, event);
}
}
}
impl<'a, B: Basics> TimeStewardLifetimedMethods<'a, B> for Steward<B> {
type Mutator = Mutator <'a, B>;
type PredictorAccessor = PredictorAccessor <'a, B>;
}
impl<B: Basics> TimeStewardStaticMethods<B> for Steward<B> {
type EventFn = EventFn <B>;
type PredictorFn = PredictorFn <B>;
type Snapshot = Snapshot<B>;
fn valid_since(&self) -> ValidSince<B::Time> {
match self.owned.last_event {
None => ValidSince::TheBeginning,
Some(ref time) => ValidSince::After(time.base.clone()),
}
}
fn new_empty(constants: B::Constants,
predictors: Vec<super::Predictor<Self::PredictorFn>>)
-> Self {
let mut predictors_by_id = HashMap::new();
let mut predictors_by_column = HashMap::new();
for predictor in predictors {
predictors_by_id.insert(predictor.predictor_id, predictor.clone());
predictors_by_column.entry(predictor.column_id).or_insert(Vec::new()).push(predictor);
}
Steward {
owned: StewardOwned {
last_event: None,
fiat_events: BTreeMap::new(),
next_snapshot: 0,
predictions_by_time: BTreeMap::new(),
predictions_by_id: HashMap::new(),
prediction_dependencies: HashMap::new(),
},
shared: Rc::new(StewardShared {
predictors_by_id: predictors_by_id,
predictors_by_column: predictors_by_column,
constants: constants,
fields: RefCell::new(Fields {
field_states: HashMap::new(),
changed_since_snapshots: BTreeMap::new(),
}),
}),
}
}
fn insert_fiat_event(&mut self,
time: B::Time,
id: DeterministicRandomId,
event: Event<B>)
-> FiatEventOperationResult {
if let Some(ref change) = self.owned.last_event {
if change.base >= time {
return FiatEventOperationResult::InvalidTime;
}
}
match self.owned.fiat_events.insert(super::extended_time_of_fiat_event(time, id), event) {
None => FiatEventOperationResult::Success,
Some(_) => FiatEventOperationResult::InvalidInput,
}
}
fn erase_fiat_event(&mut self,
time: &B::Time,
id: DeterministicRandomId)
-> FiatEventOperationResult {
if let Some(ref change) = self.owned.last_event {
if change.base >= *time {
return FiatEventOperationResult::InvalidTime;
}
}
match self.owned.fiat_events.remove(&super::extended_time_of_fiat_event(time.clone(), id)) {
None => FiatEventOperationResult::InvalidInput,
Some(_) => FiatEventOperationResult::Success,
}
}
fn snapshot_before<'b>(&'b mut self, time: &'b B::Time) -> Option<Self::Snapshot> {
if let Some(ref change) = self.owned.last_event {
if change.base >= *time {
return None;
}
}
self.update_until_beginning_of(time);
let result = Some(Snapshot {
now: time.clone(),
index: self.owned.next_snapshot,
field_states: self.shared
.fields
.borrow_mut()
.changed_since_snapshots
.entry(self.owned.next_snapshot)
.or_insert(Rc::new(insert_only::HashMap::new()))
.clone(),
shared: self.shared.clone(),
});
self.owned.next_snapshot += 1;
result
}
}
impl<B: Basics> TimeSteward<B> for Steward<B> {}
More work on the new TimeSteward
/*!
A full TimeSteward implementation that has decent (amortized) asymptotic performance for all common operations.
This is intended to be the simplest possible implementation that meets those conditions. As such, it's not especially optimized. Here are some of its specific weaknesses:
*no support for multithreading
*when a field changes in the past, this TimeSteward immediately erases all more-recent versions of that field. This can take time proportional to the amount of times that field has changed since the past change. (It doesn't affect the amortized time because the recording of each thing amortizes its eventual deletion, but it can cause a hiccup.)
*This erasing happens even if the field was overwritten at some point without being examined. In that case, we could theoretically optimize by leaving the future of the field untouched.
*There can also be hiccups at arbitrary times when the hash table resizes.
*We haven't optimized for the "most changes happen in the present" case, which means we pay a bunch of log n factors when we could be paying O(1).
*If you keep around old snapshots of times when no fields are actually being modified anymore, they will eventually have all their data copied into them unnecessarily. This could be avoided if we had a good two-dimensional tree type so that the snapshots could be queried by (SnapshotIdx X BaseTime) rectangles.
*We also do not optimize the for the case where a field is changed in the past but then the field is changed BACK before it affects anything (either by another change in the fiat events or by a regular prediction). The same applies to the case where an event is invalidated, then rerun, but makes the same changes as it made the first time. This allows dependency chains to propagate much faster than they should.
*There might be more small dependency optimizations we could do, like distinguishing between accessing just a field's data data and accessing just its last change time, or even the difference between those and just checking whether the field exists or not, or other conditions (although we would need an API change to track some of those things). However, I suspect that the additional runtime cost of recording these different dependencies wouldn't be worth it. (This would only have a small effect at best, because it wouldn't slow down dependency chain propagation unless there are fields that haven't implemented guaranteed_equal__unsafe().)
*/
use super::{DeterministicRandomId, SiphashIdGenerator, RowId, ColumnId, FieldId, PredictorId,
Column, ExtendedTime, EventRng, Basics, TimeSteward, FiatEventOperationResult,
ValidSince, TimeStewardLifetimedMethods, TimeStewardStaticMethods};
use std::collections::{HashMap, BTreeMap, HashSet};
use std::collections::hash_map::Entry;
use std::hash::Hash;
// use std::collections::Bound::{Included, Excluded, Unbounded};
use std::any::Any;
use std::borrow::Borrow;
use std::rc::Rc;
use std::cell::{Cell, RefCell};
use std::ops::Drop;
use rand::Rng;
use insert_only;
type SnapshotIdx = u64;
This is an error
/*
An ExtendedTime may be in one of several states
– empty and unused
– a fiat event scheduled but nothing executed
– a fiat event scheduled and executed consistently to that, and still valid
– a fiat event scheduled and executed consistently to that, but its accessed fields have changed
– a fiat event executed, but not scheduled (we could disallow this by doing it immediately)
– a fiat event executed but then rescheduled differently (ditto)
– a predicted event scheduled but nothing executed
– a predicted event scheduled and executed consistently to that, and still valid
– a predicted event scheduled and executed consistently to that, but its accessed fields have changed
– a predicted event executed, but not scheduled (we could disallow this in STABLE states but it is guaranteed to occur at least temporarily during a function)
– a predicted event executed but then rescheduled differently (ditto)
There are enough parallels between fiat and predicted that we should probably combine them:
0. Unused
1. Scheduled only
2. Executed consistently, still valid
3. Executed consistently, fields changed
4. Executed but not scheduled
5. Executed but scheduled differently
possible movements:
(1, 4)->0
0->1
(1, 3, 5)->2
2->3
(2, 3, 5)->4
4->5
The ExtendedTime needs attention in (1, 3, 4, 5) but not (2, 0).
Thus, the changes that affect "needs attention" are:
(1, 4)->0
0->1
(1, 3, 5)->2
2->3
2->4
Which can be split up into the ones CAUSED by giving the time attention:
4->0
(1, 3, 5)->2
And the ones caused from a distance:
0->1 (scheduling)
1->0 and 2->4 (un-scheduling)
2->3 (invalidating)
that's assuming that you're not allowed to reschedule without un-scheduling first (which, in my current model, is true for both fiat events and predicted events)
The only things distinguishing 3 and 5 are how they are created. Let's combine them:
0. Unused
1. Scheduled only
2. Executed consistently, still valid
3. Executed consistently, fields OR schedule different than when it was executed
4. Executed but not scheduled
possible movements:
(1, 4)->0
0->1
(1, 3)->2
(2, 4)->3
(2, 3)->4
The ExtendedTime needs attention in (1, 3, 4) but not (2, 0).
Thus, the changes that affect "needs attention" are:
(1, 4)->0
0->1
(1, 3)->2
2->3
2->4
Which can be split up into the ones CAUSED by giving the time attention:
4->0
(1, 3)->2
And the ones caused from a distance:
0->1 (scheduling)
1->0 and 2->4 (un-scheduling)
2->3 (invalidating)
notice that the (3, 4) trap can only be escaped by giving the time attention.
The only way to REMOVE "needs attention" from a distance is 1->0.
Thus,
*/
enum EventValidity {
Invalid,
ValidWithDependencies (HashSet <FieldId>),
}
struct EventExecutionState {
fields_changed: HashSet <FieldId>,
validity: EventValidity,
}
struct EventState <B: Basics> {
schedule: Option <Event <B>>,
execution_state: Option <EventExecutionState>,
}
impl <B: Basics> StewardOwned <B> {
fn schedule_event (&mut self, time: ExtendedTime <B>, event: Event <B>) {
match self.event_states.entry (time.clone()) {
Entry::Vacant (entry) => {
entry.insert (EventState {schedule: Some (event), execution_state: None});
self.events_needing_attention.insert (time);
}
Entry::Occupied (mut entry) => {
let state = entry.get_mut();
assert!(state.scheduled.is_none(), "scheduling an event where there is already one scheduled");
state.schedule = Some (event);
if state.execution_state.is_none() {
self.events_needing_attention.insert (time);
}
}
}
}
fn unschedule_event (&mut self, time: ExtendedTime <B>) {
match self.event_states.entry (time.clone()) {
Entry::Vacant (_) => {
panic!("You can't unschedule an event that wasn't scheduled")
}
Entry::Occupied (mut entry) => {
let state = entry.get_mut();
assert!(state.schedule.is_some(), "You can't unschedule an event that wasn't scheduled");
state.schedule = None;
if let Some (ref mut execution_state) = state.execution_state {
Self::invalidate_execution (time, execution_state, self.events_needing_attention, self.dependencies);
} else {
self.events_needing_attention.remove (time);
entry.remove();
}
}
}
}
fn invalidate_execution (time: & ExtendedTime <B>, execution: &mut EventExecutionState, events_needing_attention: &mut BTreeSet<ExtendedTime<B>>, steward_dependencies: &mut DependenciesMap) {
if let ValidWithDependencies (dependencies) = execution.validity {
execution_state.validity = Invalid;
events_needing_attention.insert (time);
for dependency in dependencies {
match steward_dependencies.entry (dependency) {
Entry::Vacant (_) => panic!("dependency records are inconsistent"),
Entry::Occupied (mut entry) => {
entry.get_mut().remove (time);
if entry.get().is_empty() {entry.remove();}
}
}
}
}
}
fn invalidate_dependencies (&mut self, id: FieldId, time: ExtendedTime <B>) {
if let Some (my_dependencies) = self.dependencies.get (id) {
for (access_time, access_info) in my_dependencies.range (Excluded (time), Unbounded) {
match access_info {
EventAccess => Self::invalidate_execution (access_time, self.event_states.get(discarded.last_change).expect ("event that accessed this field was missing").execution_state.expect ("event that accessed this field not marked executed"), self.events_needing_attention, self.dependencies),
PredictionAccess (row_id, predictor_id) =>
}
}
}
}
fn discard_changes (&mut self, id: FieldId, history: &mut FieldHistory <B>, index: usize, during_processing_of_event_responsible_for_first_discarded: bool, snapshots: &SnapshotsData <B>) {
history.update_snapshots (snapshots);
self.invalidate_dependencies (id, history.changes [index].last_change);
let mut discard_iter = history.changes.split_off (index).into_iter();
if during_processing_of_event_responsible_for_first_discarded {discard_iter.next();}
for discarded in discard_iter {
Self::invalidate_execution (discarded.last_change, self.event_states.get(discarded.last_change).expect ("event that created this change was missing").execution_state.expect ("event that created this change not marked executed"), self.events_needing_attention, self.dependencies);
}
}
fn add_change (&mut self, id: FieldId, history: &mut FieldHistory <B>, change: Field <B>, snapshots: &mut SnapshotsData <B>) {
history.changes.last().map (| last_change | assert!(last_change.last_change <change.last_change));
history.update_snapshots (snapshots);
self.invalidate_dependencies (id, change.last_change);
history.changes.push (change);
}
}
impl <B: Basics> Steward <B> {
fn create_execution (&mut self, time: & ExtendedTime <B>, new_results: MutatorResults <B>)->EventExecutionState {
let fields = self.shared.fields.borrow_mut();
let new_fields_changed = HashSet::new();
let new_dependencies = HashSet::with_capacity(new_results.fields);
for (id, field) in new_results.fields {
new_dependencies.insert (id);
if field.last_change == time {
new_fields_changed.insert (id);
let mut history = field_states.entry(id).or_insert (FieldHistory {changes: Vec:: new(), first_snapshot_not_updated = self.owned.next_snapshot});
match history.changes.binary_search_by_key (time, | change | change.last_change) {
Ok (index) => panic!("there shouldn't be a change at this time is no event has been executed then"),
Err (index) => {
self.owned.discard_changes (id, entry.get_mut(), index, false, &fields.changed_since_snapshots);
self.owned.add_change (id, entry.get_mut(), field, &fields.changed_since_snapshots);
}
}
}
}
}
fn remove_execution (&mut self, time: & ExtendedTime <B>, execution: EventExecutionState) {
let fields = self.shared.fields.borrow_mut();
for id in execution.fields_changed {
if let Entry::Occupied (mut entry) = field_states.entry(id) {
//some of these could have ALREADY been deleted –
//in fact, perhaps that's how the event was invalidated in the first place
if let Ok (index) = entry.get().changes.binary_search_by_key (time, | change | change.last_change) {
self.owned.discard_changes (id, entity.get_mut(), index, true, &fields.changed_since_snapshots);
if entry.get().changes.is_empty() {entry.remove();}
}
}
}
}
fn replace_execution (&mut self, time: & ExtendedTime <B>, execution: &mut EventExecutionState, new_results: MutatorResults <B>) {
let fields = self.shared.fields.borrow_mut();
let new_fields_changed = HashSet::new();
let new_dependencies = HashSet::with_capacity(new_results.fields);
for (id, field) in new_results.fields {
new_dependencies.insert (id);
if field.last_change == time {
new_fields_changed.insert (id);
let mut history = field_states.entry(id).or_insert (FieldHistory {changes: Vec:: new(), first_snapshot_not_updated = self.owned.next_snapshot});
match history.changes.binary_search_by_key (time, | change | change.last_change) {
Ok (index) => {
self.owned.discard_changes (id, entry.get_mut(), index, true, &fields.changed_since_snapshots);
self.owned.add_change (id, entry.get_mut(), field, &fields.changed_since_snapshots);
}
Err (index) => {
self.owned.discard_changes (id, entry.get_mut(), index, false, &fields.changed_since_snapshots);
self.owned.add_change (id, entry.get_mut(), field, &fields.changed_since_snapshots);
}
}
}
}
for id in execution.fields_changed {
if new_fields_changed.get(id).is_none() {
if let Entry::Occupied (mut entry) = field_states.entry(id) {
//some of these could have ALREADY been deleted –
//in fact, perhaps that's how the event was invalidated in the first place
if let Ok (index) = entry.get().changes.binary_search_by_key (time, | change | change.last_change) {
self.owned.discard_changes (id, entity.get_mut(), index, true, &fields.changed_since_snapshots);
if entry.get().changes.is_empty() {entry.remove();}
}
}
}
}
execution.fields_changed = new_fields_changed;
execution.validity = ValidWithDependencies (new_dependencies);
}
fn do_event (&mut self, time: ExtendedTime <B>) {
self.owned.events_needing_attention.remove(& time);
let state = self.owned.event_states.remove (& time).expect ("You can't do an event that wasn't scheduled");
if let Some (event) = entry.get().schedule {
let results;
{
let field_ref = &*self.shared.fields.borrow();
let mut mutator = Mutator {
now: time,
steward: &mut self.owned,
shared: &self.shared,
fields: field_ref,
generator: super::generator_for_event(event_time.id),
results: RefCell::new (MutatorResults {fields: HashMap::new()},
};
event(&mut mutator);
results = mutator. results;
}
if let Some (ref mut execution) = state.execution_state {
self.replace_execution (& time, &mut execution, results);
}
else {
state.execution_state = Some (self.create_execution (& time, results));
}
self.owned.event_states.insert (time, state);
} else {
self.remove_execution (& time, state.execution_state.expect("a null event state was left lying around");
}
}
fn make_prediction (&mut self, time: ExtendedTime <B>, row_id: RowId, predictor_id: PredictorId) {
}
fn do_next (&mut self) {
match (self.owned.events_needing_attention.iter().next(), self.owned.predictions_missing_by_time.iter().next()) {
(None, None) =>(),
(Some (ref event_time), None) => self.do_event (event_time.clone()),
(None, Some ((ref prediction_time, (row_id, predictor_id)))) => self.make_prediction (prediction_time.clone(), row_id, predictor_id),
(Some (ref event_time), Some ((ref prediction_time, (row_id, predictor_id)))) => if event_time <= prediction_time {self.do_event (event_time.clone())} else {self.make_prediction (prediction_time.clone(), row_id, predictor_id)},
}
}
}
impl <B: Basics> FieldHistory <B> {
fn update_snapshots (&mut self, snapshots: &SnapshotsData <B>) {
for (index, snapshot_map) in snapshots.range(Included (self.first_snapshot_not_updated), Unbounded) {
snapshot_map.1.get_default(field_id, || SnapshotField {
data: self.changes.get (match self.changes.binary_search_by_key (time, | change | change.last_change) { Ok (index) => index - 1, Err (index) => index - 1,})
})
self.first_snapshot_not_updated = index + 1;
}
}
}
#[derive (Clone)]
struct Field<B: Basics> {
last_change: ExtendedTime<B>,
data: Option <Rc<Any>>,
}
struct SnapshotField<B: Basics> {
data: Option<Field<B>>,
}
enum AccessInfo {
EventAccess,
PredictionAccess (RowId, PredictorId),
}
struct FieldHistory <B: Basics> {
changes: Vec<Field <B>>,
first_snapshot_not_updated: SnapshotIdx,
}
type SnapshotsData <B> = BTreeMap<SnapshotIdx,
Rc<insert_only::HashMap<FieldId, SnapshotField<B>>>>;
struct Fields<B: Basics> {
field_states: HashMap<FieldId, FieldHistory <B>>,
changed_since_snapshots: SnapshotsData <B>,
}
type DependenciesMap <B> =HashMap<FieldId, BTreeMap<ExtendedTime <B>, AccessInfo>>;
#[derive (Clone)]
struct Prediction<B: Basics> {
predictor_id: PredictorId,
prediction_is_about_row_id: RowId,
predictor_accessed: Vec<FieldId>,
what_will_happen: Option<(ExtendedTime<B>, Event<B>)>,
}
struct StewardShared<B: Basics> {
predictors_by_column: HashMap<ColumnId, Vec<Predictor<B>>>,
predictors_by_id: HashMap<PredictorId, Predictor<B>>,
constants: B::Constants,
fields: RefCell<Fields<B>>,
}
#[derive (Clone)]
struct StewardOwned<B: Basics> {
event_states: HashMap <ExtendedTime<B>, EventState <B>>,
events_needing_attention: BTreeSet<ExtendedTime<B>>,
next_snapshot: SnapshotIdx,
predictions_by_id: HashMap<(RowId, PredictorId), Vec<Prediction<B>>>,
predictions_missing_by_time: BTreeMap<ExtendedTime <B>, (RowId, PredictorId)>,
dependencies: DependenciesMap <B>,
}
#[derive (Clone)]
pub struct Steward<B: Basics> {
owned: StewardOwned<B>,
shared: Rc<StewardShared<B>>,
}
pub struct Snapshot<B: Basics> {
now: B::Time,
index: SnapshotIdx,
field_states: Rc<insert_only::HashMap<FieldId, SnapshotField<B>>>,
shared: Rc<StewardShared<B>>,
}
struct MutatorResults <B: Basics> {
fields: HashMap<FieldId, Field <B>>,
}
pub struct Mutator<'a, B: Basics> {
now: ExtendedTime<B>,
steward: &'a mut StewardOwned<B>,
shared: &'a StewardShared<B>,
fields: &'a Fields<B>,
generator: EventRng,
results: RefCell<MutatorResults <B>>,
}
struct PredictorAccessorResults<B: Basics> {
soonest_prediction: Option<(B::Time, Event<B>)>,
dependencies: Vec<FieldId>,
dependencies_hasher: SiphashIdGenerator,
}
pub struct PredictorAccessor<'a, B: Basics> {
predictor_id: PredictorId,
about_row_id: RowId,
internal_now: ExtendedTime<B>,
steward: RefCell<&'a mut StewardOwned<B>>,
shared: &'a StewardShared<B>,
fields: &'a Fields<B>,
results: RefCell<PredictorAccessorResults<B>>,
}
pub type EventFn<B> = for<'d, 'e> Fn(&'d mut Mutator<'e, B>);
pub type Event<B> = Rc<EventFn<B>>;
pub type Predictor<B> = super::Predictor<PredictorFn<B>>;
pub type PredictorFn<B> = for<'b, 'c> Fn(&'b mut PredictorAccessor<'c, B>, RowId);
impl<B: Basics> Drop for Snapshot<B> {
fn drop(&mut self) {
self.shared.fields.borrow_mut().changed_since_snapshots.remove(&self.index);
}
}
impl<B: Basics> super::Accessor<B> for Snapshot<B> {
fn data_and_last_change<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &B::Time)> {
let field_id = FieldId::new (id, C::column_id());
let field = self.field_states.get_default(field_id, || {
SnapshotField {
data: self.shared
.fields
.borrow()
.get_for_snapshot (&field_id, self.now)
.cloned(),
touched_by_steward: Cell::new(false),
}
})
extract_field_info::<B, C>( .data
.as_ref())
.map(|p| (p.0, &p.1.base))
}
fn constants(&self) -> &B::Constants {
&self.shared.constants
}
}
impl<'a, B: Basics> super::Accessor<B> for Mutator<'a, B> {
fn data_and_last_change<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &B::Time)> {
let field = extract_field_info::<B, C> (self.results.borrow_mut().fields.entry (id).or_insert_with (| | fields.get::<C>(id, self.now)));
(field.0, & field.1.base)
}
fn constants(&self) -> &B::Constants {
&self.shared.constants
}
}
impl<'a, B: Basics> super::Accessor<B> for PredictorAccessor<'a, B> {
fn data_and_last_change<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &B::Time)> {
let field_id = FieldId::new (id, C::column_id());
let mut results = self.results.borrow_mut();
self.steward
.borrow_mut()
.prediction_dependencies
.entry(field_id)
.or_insert(HashSet::new())
.insert((self.about_row_id, self.predictor_id));
results.dependencies.push(field_id);
self.fields.get::<C>(id, self.now).map(|p| {
p.1.id.hash(&mut results.dependencies_hasher);
(p.0, &p.1.base)
})
}
fn constants(&self) -> &B::Constants {
&self.shared.constants
}
}
impl<B: Basics> super::MomentaryAccessor<B> for Snapshot<B> {
fn now(&self) -> &B::Time {
&self.now
}
}
impl<'a, B: Basics> super::MomentaryAccessor<B> for Mutator<'a, B> {
fn now(&self) -> &B::Time {
&self.now.base
}
}
impl<'a, B: Basics> super::PredictorAccessor<B, EventFn<B>> for PredictorAccessor<'a, B> {
fn predict_immediately(&mut self, event: Event<B>) {
let t = self.internal_now.base.clone();
self.predict_at_time(&t, event);
}
fn predict_at_time(&mut self, time: &B::Time, event: Event<B>) {
if time < &self.internal_now.base {
return;
}
let mut results = self.results.borrow_mut();
if let Some((ref old_time, _)) = results.soonest_prediction {
if old_time <= time {
return;
}
}
results.soonest_prediction = Some((time.clone(), event));
}
}
impl<B: Basics> super::Snapshot<B> for Snapshot<B> {}
impl<'a, B: Basics> super::Mutator<B> for Mutator<'a, B> {
fn set<C: Column>(&mut self, id: RowId, data: Option<C::FieldType>) {
let field_id = FieldId::new (id, C::column_id());
self.results.borrow_mut().fields.insert (Field {last_change: self.now, data: data.map (| whatever | Rc::new (whatever))});
}
fn rng(&mut self) -> &mut EventRng {
&mut self.generator
}
fn random_id(&mut self) -> RowId {
RowId { data: [self.generator.gen::<u64>(), self.generator.gen::<u64>()] }
}
}
// https://github.com/rust-lang/rfcs/issues/1485
trait Filter<T> {
fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self;
}
impl<T> Filter<T> for Option<T> {
fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
self.and_then(|x| {
if predicate(&x) {
Some(x)
} else {
None
}
})
}
}
fn extract_field_info<B: Basics, C: Column>(field: &Field<B>)
-> (&C::FieldType, &ExtendedTime<B>) {
(field.data
.downcast_ref::<C::FieldType>()
.expect("a field had the wrong type for its column")
.borrow(),
& field.last_change)
}
fn get<C: Column, Value>(map: &HashMap<FieldId, Value>, id: RowId) -> Option<&Value> {
map.get(&FieldId {
row_id: id,
column_id: C::column_id(),
})
}
impl<B: Basics> Fields<B> {
fn get<C: Column>(&self, id: RowId) -> Option<(&C::FieldType, &ExtendedTime<B>)> {
let field_states = &self.field_states;
extract_field_info::<B, C>(get::<C, Field<B>>(field_states, id))
}
// returns true if the field changed from existing to nonexistent or vice versa
fn set<C: Column>(&mut self, id: RowId, value: C::FieldType, time: &ExtendedTime<B>) -> bool {
let field = Field {
data: Rc::new(value),
last_change: time.clone(),
};
match self.field_states
.entry(FieldId {
row_id: id,
column_id: C::column_id(),
}) {
Entry::Occupied(mut entry) => {
entry.insert(field);
false
}
Entry::Vacant(entry) => {
entry.insert(field);
true
}
}
}
// returns true if the field changed from existing to nonexistent or vice versa
fn remove<C: Column>(&mut self, id: RowId) -> bool {
self.field_states
.remove(&FieldId {
row_id: id,
column_id: C::column_id(),
})
.is_some()
}
// returns true if the field changed from existing to nonexistent or vice versa
fn set_opt<C: Column>(&mut self,
id: RowId,
value_opt: Option<C::FieldType>,
time: &ExtendedTime<B>)
-> bool {
if let Some(value) = value_opt {
self.set::<C>(id, value, time)
} else {
self.remove::<C>(id)
}
}
}
impl<B: Basics> Steward<B> {
fn next_event(&self) -> Option<(ExtendedTime<B>, Event<B>)> {
let first_fiat_event_iter = self.owned
.fiat_events
.iter()
.map(|ev| (ev.0.clone(), ev.1.clone()))
.take(1);
let first_predicted_event_iter = self.owned
.predictions_by_time
.iter()
.map(|pair| {
(pair.0.clone(),
pair.1
.what_will_happen
.as_ref()
.expect("a prediction that predicted nothing was \
stored in predictions")
.1
.clone())
})
.take(1);
/* let predicted_events_iter = self.settings
* .predictors
* .iter()
* .flat_map(|predictor|
* TODO change field_states
* to separate by field type, for efficiency,
* like the haskell does?
* self.state.field_states.keys().filter_map(move |field_id|
* if field_id.column_id != predictor.column_id {
* None
* } else {
* let mut pa = PredictorAccessor{
* steward: self,
* soonest_prediction: None,
* dependencies_hasher: SiphashIdGenerator::new(),
* };
* (predictor.function)(&mut pa, field_id.row_id);
* let dependencies_hash = pa.dependencies_hasher.generate();
* pa.soonest_prediction.and_then(|(event_base_time, event)|
* super::next_extended_time_of_predicted_event(
* predictor.predictor_id,
* field_id.row_id,
* dependencies_hash,
* event_base_time,
* &self.state.last_event.as_ref().expect ("how can we be calling a predictor when there are no fields yet?")
* ).map(|event_time| (event_time, event)))})); */
let events_iter = first_fiat_event_iter.chain(first_predicted_event_iter);
events_iter.min_by_key(|ev| ev.0.clone())
}
fn get_predictor(&self, predictor_id: PredictorId) -> &Predictor<B> {
self.shared
.predictors_by_id
.get(&predictor_id)
.expect("somehow a PredictorId appeared with no associated predictor")
}
fn clear_prediction(&mut self, row_id: RowId, predictor_id: PredictorId) {
if let Some(prediction) = self.owned.predictions_by_id.get(&(row_id, predictor_id)) {
for field_id in prediction.predictor_accessed.iter() {
if let Entry::Occupied(mut entry) = self.owned
.prediction_dependencies
.entry(field_id.clone()) {
entry.get_mut().remove(&(row_id, predictor_id));
if entry.get().is_empty() {
entry.remove();
}
}
}
if let Some((ref when, _)) = prediction.what_will_happen {
self.owned.predictions_by_time.remove(when).expect("prediction records were inconsistent");
}
}
}
fn execute_event(&mut self, event_time: ExtendedTime<B>, event: Event<B>) {
let predictions_needed;
{
let field_ref = &mut *self.shared.fields.borrow_mut();
let mut mutator = Mutator {
now: event_time.clone(),
steward: &mut self.owned,
shared: &self.shared,
fields: field_ref,
generator: super::generator_for_event(event_time.id),
predictions_needed: HashSet::new(),
};
event(&mut mutator);
predictions_needed = mutator.predictions_needed;
}
// if it was a fiat event, clean it up:
self.owned.fiat_events.remove(&event_time);
self.owned.last_event = Some(event_time);
let old_value = self.fields.field_states.get(&field_id).cloned();
let existence_changed = self.fields.set_opt::<C>(id, data, &self.now);
for snapshot_map in self.fields.changed_since_snapshots.iter().rev() {
let info = snapshot_map.1.get_default(field_id, || {
SnapshotField {
data: old_value.clone(),
touched_by_steward: Cell::new(false),
}
});
if info.touched_by_steward.get() {
break;
}
info.touched_by_steward.set(true);
if existence_changed {
self.shared.predictors_by_column.get(&C::column_id()).map(|predictors| {
for predictor in predictors {
self.predictions_needed.insert((id, predictor.predictor_id));
}
});
}
if let Entry::Occupied(entry) = self.steward.prediction_dependencies.entry(field_id) {
for prediction in entry.get() {
self.predictions_needed.insert(prediction.clone());
}
entry.remove();
}
for (row_id, predictor_id) in predictions_needed {
self.clear_prediction(row_id, predictor_id);
let now = self.owned
.last_event
.clone()
.expect("how can we be calling a predictor when there are no fields yet?");
let function = self.get_predictor(predictor_id).function.clone();
let results;
{
let field_ref = &*self.shared.fields.borrow();
let mut pa = PredictorAccessor {
predictor_id: predictor_id,
about_row_id: row_id,
internal_now: now,
steward: RefCell::new(&mut self.owned),
shared: &self.shared,
fields: field_ref,
results: RefCell::new(PredictorAccessorResults {
soonest_prediction: None,
dependencies: Vec::new(),
dependencies_hasher: SiphashIdGenerator::new(),
}),
};
(function)(&mut pa, row_id);
results = pa.results.into_inner();
}
let dependencies_hash = results.dependencies_hasher.generate();
let prediction = Rc::new(Prediction {
predictor_id: predictor_id,
prediction_is_about_row_id: row_id,
predictor_accessed: results.dependencies,
what_will_happen: results.soonest_prediction.and_then(|(event_base_time, event)| {
super::next_extended_time_of_predicted_event(predictor_id,
row_id,
dependencies_hash,
event_base_time,
&self.owned
.last_event
.as_ref()
.expect("how can we be calling a \
predictor when there are no \
fields yet?"))
.map(|event_time| (event_time, event))
}),
});
self.owned.predictions_by_id.insert((row_id, predictor_id), prediction.clone());
if let Some((ref time, _)) = prediction.what_will_happen {
self.owned.predictions_by_time.insert(time.clone(), prediction.clone());
}
}
}
fn update_until_beginning_of(&mut self, target_time: &B::Time) {
while let Some(ev) = self.next_event().filter(|ev| ev.0.base < *target_time) {
let (event_time, event) = ev;
self.execute_event(event_time, event);
}
}
}
impl<'a, B: Basics> TimeStewardLifetimedMethods<'a, B> for Steward<B> {
type Mutator = Mutator <'a, B>;
type PredictorAccessor = PredictorAccessor <'a, B>;
}
impl<B: Basics> TimeStewardStaticMethods<B> for Steward<B> {
type EventFn = EventFn <B>;
type PredictorFn = PredictorFn <B>;
type Snapshot = Snapshot<B>;
fn valid_since(&self) -> ValidSince<B::Time> {
match self.owned.last_event {
None => ValidSince::TheBeginning,
Some(ref time) => ValidSince::After(time.base.clone()),
}
}
fn new_empty(constants: B::Constants,
predictors: Vec<super::Predictor<Self::PredictorFn>>)
-> Self {
let mut predictors_by_id = HashMap::new();
let mut predictors_by_column = HashMap::new();
for predictor in predictors {
predictors_by_id.insert(predictor.predictor_id, predictor.clone());
predictors_by_column.entry(predictor.column_id).or_insert(Vec::new()).push(predictor);
}
Steward {
owned: StewardOwned {
last_event: None,
fiat_events: BTreeMap::new(),
next_snapshot: 0,
predictions_by_time: BTreeMap::new(),
predictions_by_id: HashMap::new(),
prediction_dependencies: HashMap::new(),
},
shared: Rc::new(StewardShared {
predictors_by_id: predictors_by_id,
predictors_by_column: predictors_by_column,
constants: constants,
fields: RefCell::new(Fields {
field_states: HashMap::new(),
changed_since_snapshots: BTreeMap::new(),
}),
}),
}
}
fn insert_fiat_event(&mut self,
time: B::Time,
id: DeterministicRandomId,
event: Event<B>)
-> FiatEventOperationResult {
if let Some(ref change) = self.owned.last_event {
if change.base >= time {
return FiatEventOperationResult::InvalidTime;
}
}
match self.owned.fiat_events.insert(super::extended_time_of_fiat_event(time, id), event) {
None => FiatEventOperationResult::Success,
Some(_) => FiatEventOperationResult::InvalidInput,
}
}
fn erase_fiat_event(&mut self,
time: &B::Time,
id: DeterministicRandomId)
-> FiatEventOperationResult {
if let Some(ref change) = self.owned.last_event {
if change.base >= *time {
return FiatEventOperationResult::InvalidTime;
}
}
match self.owned.fiat_events.remove(&super::extended_time_of_fiat_event(time.clone(), id)) {
None => FiatEventOperationResult::InvalidInput,
Some(_) => FiatEventOperationResult::Success,
}
}
fn snapshot_before<'b>(&'b mut self, time: &'b B::Time) -> Option<Self::Snapshot> {
if let Some(ref change) = self.owned.last_event {
if change.base >= *time {
return None;
}
}
self.update_until_beginning_of(time);
let result = Some(Snapshot {
now: time.clone(),
index: self.owned.next_snapshot,
field_states: self.shared
.fields
.borrow_mut()
.changed_since_snapshots
.entry(self.owned.next_snapshot)
.or_insert(Rc::new(insert_only::HashMap::new()))
.clone(),
shared: self.shared.clone(),
});
self.owned.next_snapshot += 1;
result
}
}
impl<B: Basics> TimeSteward<B> for Steward<B> {}
|
// Copyright 2017-2019 int08h LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// for value_t_or_exit!()
#[macro_use]
extern crate clap;
use ring::rand;
use ring::rand::SecureRandom;
use byteorder::{LittleEndian, ReadBytesExt};
use chrono::offset::Utc;
use chrono::TimeZone;
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::iter::Iterator;
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
use clap::{App, Arg};
use roughenough::merkle::root_from_paths;
use roughenough::sign::Verifier;
use roughenough::{
roughenough_version, RtMessage, Tag, CERTIFICATE_CONTEXT, SIGNED_RESPONSE_CONTEXT,
};
fn create_nonce() -> [u8; 64] {
let rng = rand::SystemRandom::new();
let mut nonce = [0u8; 64];
rng.fill(&mut nonce).unwrap();
nonce
}
fn make_request(nonce: &[u8]) -> Vec<u8> {
let mut msg = RtMessage::new(1);
msg.add_field(Tag::NONC, nonce).unwrap();
msg.pad_to_kilobyte();
msg.encode().unwrap()
}
fn receive_response(sock: &mut UdpSocket) -> RtMessage {
let mut buf = [0; 744];
let resp_len = sock.recv_from(&mut buf).unwrap().0;
RtMessage::from_bytes(&buf[0..resp_len]).unwrap()
}
fn stress_test_forever(addr: &SocketAddr) -> ! {
if !addr.ip().is_loopback() {
panic!("Cannot use non-loopback address {} for stress testing", addr.ip());
}
println!("Stress testing!");
let nonce = create_nonce();
let socket = UdpSocket::bind("0.0.0.0:0").expect("Couldn't open UDP socket");
let request = make_request(&nonce);
loop {
socket.send_to(&request, addr).unwrap();
}
}
struct ResponseHandler {
pub_key: Option<Vec<u8>>,
msg: HashMap<Tag, Vec<u8>>,
srep: HashMap<Tag, Vec<u8>>,
cert: HashMap<Tag, Vec<u8>>,
dele: HashMap<Tag, Vec<u8>>,
nonce: [u8; 64],
}
struct ParsedResponse {
verified: bool,
midpoint: u64,
radius: u32,
}
impl ResponseHandler {
pub fn new(pub_key: Option<Vec<u8>>, response: RtMessage, nonce: [u8; 64]) -> ResponseHandler {
let msg = response.into_hash_map();
let srep = RtMessage::from_bytes(&msg[&Tag::SREP])
.unwrap()
.into_hash_map();
let cert = RtMessage::from_bytes(&msg[&Tag::CERT])
.unwrap()
.into_hash_map();
let dele = RtMessage::from_bytes(&cert[&Tag::DELE])
.unwrap()
.into_hash_map();
ResponseHandler {
pub_key,
msg,
srep,
cert,
dele,
nonce,
}
}
pub fn extract_time(&self) -> ParsedResponse {
let midpoint = self.srep[&Tag::MIDP]
.as_slice()
.read_u64::<LittleEndian>()
.unwrap();
let radius = self.srep[&Tag::RADI]
.as_slice()
.read_u32::<LittleEndian>()
.unwrap();
let verified = if self.pub_key.is_some() {
self.validate_dele();
self.validate_srep();
self.validate_merkle();
self.validate_midpoint(midpoint);
true
} else {
false
};
ParsedResponse {
verified,
midpoint,
radius,
}
}
fn validate_dele(&self) {
let mut full_cert = Vec::from(CERTIFICATE_CONTEXT.as_bytes());
full_cert.extend(&self.cert[&Tag::DELE]);
assert!(
self.validate_sig(
self.pub_key.as_ref().unwrap(),
&self.cert[&Tag::SIG],
&full_cert
),
"Invalid signature on DELE tag, response may not be authentic"
);
}
fn validate_srep(&self) {
let mut full_srep = Vec::from(SIGNED_RESPONSE_CONTEXT.as_bytes());
full_srep.extend(&self.msg[&Tag::SREP]);
assert!(
self.validate_sig(&self.dele[&Tag::PUBK], &self.msg[&Tag::SIG], &full_srep),
"Invalid signature on SREP tag, response may not be authentic"
);
}
fn validate_merkle(&self) {
let srep = RtMessage::from_bytes(&self.msg[&Tag::SREP])
.unwrap()
.into_hash_map();
let index = self.msg[&Tag::INDX]
.as_slice()
.read_u32::<LittleEndian>()
.unwrap();
let paths = &self.msg[&Tag::PATH];
let hash = root_from_paths(index as usize, &self.nonce, paths);
assert_eq!(
hash, srep[&Tag::ROOT],
"Nonce is not present in the response's merkle tree"
);
}
fn validate_midpoint(&self, midpoint: u64) {
let mint = self.dele[&Tag::MINT]
.as_slice()
.read_u64::<LittleEndian>()
.unwrap();
let maxt = self.dele[&Tag::MAXT]
.as_slice()
.read_u64::<LittleEndian>()
.unwrap();
assert!(
midpoint >= mint,
"Response midpoint {} lies *before* delegation span ({}, {})",
midpoint, mint, maxt
);
assert!(
midpoint <= maxt,
"Response midpoint {} lies *after* delegation span ({}, {})",
midpoint, mint, maxt
);
}
fn validate_sig(&self, public_key: &[u8], sig: &[u8], data: &[u8]) -> bool {
let mut verifier = Verifier::new(public_key);
verifier.update(data);
verifier.verify(sig)
}
}
fn main() {
let matches = App::new("roughenough client")
.version(roughenough_version().as_ref())
.arg(Arg::with_name("host")
.required(true)
.help("The Roughtime server to connect to")
.takes_value(true))
.arg(Arg::with_name("port")
.required(true)
.help("The Roughtime server port to connect to")
.takes_value(true))
.arg(Arg::with_name("verbose")
.short("v")
.long("verbose")
.help("Print more output"))
.arg(Arg::with_name("json")
.short("j")
.long("json")
.help("Print output in JSON"))
.arg(Arg::with_name("public-key")
.short("p")
.long("public-key")
.takes_value(true)
.help("The server public key used to validate responses. If unset, no validation will be performed"))
.arg(Arg::with_name("time-format")
.short("f")
.long("time-format")
.takes_value(true)
.help("The strftime format string used to print the time recieved from the server")
.default_value("%b %d %Y %H:%M:%S")
)
.arg(Arg::with_name("num-requests")
.short("n")
.long("num-requests")
.takes_value(true)
.help("The number of requests to make to the server (each from a different source port). This is mainly useful for testing batch response handling")
.default_value("1")
)
.arg(Arg::with_name("stress")
.short("s")
.long("stress")
.help("Stress-tests the server by sending the same request as fast as possible. Please only use this on your own server")
)
.arg(Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.help("Writes all requsts to the specified file, in addition to sending them to the server. Useful for generating fuzer inputs")
)
.get_matches();
let host = matches.value_of("host").unwrap();
let port = value_t_or_exit!(matches.value_of("port"), u16);
let verbose = matches.is_present("verbose");
let json = matches.is_present("json");
let num_requests = value_t_or_exit!(matches.value_of("num-requests"), u16) as usize;
let time_format = matches.value_of("time-format").unwrap();
let stress = matches.is_present("stress");
let pub_key = matches
.value_of("public-key")
.map(|pkey| hex::decode(pkey).expect("Error parsing public key!"));
let out = matches.value_of("output");
if verbose {
println!("Requesting time from: {:?}:{:?}", host, port);
}
let addr = (host, port).to_socket_addrs().unwrap().next().unwrap();
if stress {
stress_test_forever(&addr)
}
let mut requests = Vec::with_capacity(num_requests);
let mut file = out.map(|o| File::create(o).expect("Failed to create file!"));
for _ in 0..num_requests {
let nonce = create_nonce();
let socket = UdpSocket::bind("0.0.0.0:0").expect("Couldn't open UDP socket");
let request = make_request(&nonce);
if let Some(f) = file.as_mut() {
f.write_all(&request).expect("Failed to write to file!")
}
requests.push((nonce, request, socket));
}
for &mut (_, ref request, ref mut socket) in &mut requests {
socket.send_to(request, addr).unwrap();
}
for (nonce, _, mut socket) in requests {
let resp = receive_response(&mut socket);
let ParsedResponse {
verified,
midpoint,
radius,
} = ResponseHandler::new(pub_key.clone(), resp.clone(), nonce).extract_time();
let map = resp.into_hash_map();
let index = map[&Tag::INDX]
.as_slice()
.read_u32::<LittleEndian>()
.unwrap();
let seconds = midpoint / 10_u64.pow(6);
let nsecs = (midpoint - (seconds * 10_u64.pow(6))) * 10_u64.pow(3);
let spec = Utc.timestamp(seconds as i64, nsecs as u32);
let out = spec.format(time_format).to_string();
let verify_str = if verified { "Yes" } else { "No" };
if verbose {
println!(
"Received time from server: midpoint={:?}, radius={:?}, verified={} (merkle_index={})",
out, radius, verify_str, index
);
}
if json {
println!(
r#"{{ "midpoint": {:?}, "radius": {:?}, "verified": {}, "merkle_index": {} }}"#,
out, radius, verified, index
);
} else {
println!("{}", out);
}
}
}
Print Non-Data Output to Standard Error
This allows you to redirect standard out to a file or a pipe without
including the verbose debugging/help messages.
// Copyright 2017-2019 int08h LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// for value_t_or_exit!()
#[macro_use]
extern crate clap;
use ring::rand;
use ring::rand::SecureRandom;
use byteorder::{LittleEndian, ReadBytesExt};
use chrono::offset::Utc;
use chrono::TimeZone;
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::iter::Iterator;
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
use clap::{App, Arg};
use roughenough::merkle::root_from_paths;
use roughenough::sign::Verifier;
use roughenough::{
roughenough_version, RtMessage, Tag, CERTIFICATE_CONTEXT, SIGNED_RESPONSE_CONTEXT,
};
fn create_nonce() -> [u8; 64] {
let rng = rand::SystemRandom::new();
let mut nonce = [0u8; 64];
rng.fill(&mut nonce).unwrap();
nonce
}
fn make_request(nonce: &[u8]) -> Vec<u8> {
let mut msg = RtMessage::new(1);
msg.add_field(Tag::NONC, nonce).unwrap();
msg.pad_to_kilobyte();
msg.encode().unwrap()
}
fn receive_response(sock: &mut UdpSocket) -> RtMessage {
let mut buf = [0; 744];
let resp_len = sock.recv_from(&mut buf).unwrap().0;
RtMessage::from_bytes(&buf[0..resp_len]).unwrap()
}
fn stress_test_forever(addr: &SocketAddr) -> ! {
if !addr.ip().is_loopback() {
panic!("Cannot use non-loopback address {} for stress testing", addr.ip());
}
println!("Stress testing!");
let nonce = create_nonce();
let socket = UdpSocket::bind("0.0.0.0:0").expect("Couldn't open UDP socket");
let request = make_request(&nonce);
loop {
socket.send_to(&request, addr).unwrap();
}
}
struct ResponseHandler {
pub_key: Option<Vec<u8>>,
msg: HashMap<Tag, Vec<u8>>,
srep: HashMap<Tag, Vec<u8>>,
cert: HashMap<Tag, Vec<u8>>,
dele: HashMap<Tag, Vec<u8>>,
nonce: [u8; 64],
}
struct ParsedResponse {
verified: bool,
midpoint: u64,
radius: u32,
}
impl ResponseHandler {
pub fn new(pub_key: Option<Vec<u8>>, response: RtMessage, nonce: [u8; 64]) -> ResponseHandler {
let msg = response.into_hash_map();
let srep = RtMessage::from_bytes(&msg[&Tag::SREP])
.unwrap()
.into_hash_map();
let cert = RtMessage::from_bytes(&msg[&Tag::CERT])
.unwrap()
.into_hash_map();
let dele = RtMessage::from_bytes(&cert[&Tag::DELE])
.unwrap()
.into_hash_map();
ResponseHandler {
pub_key,
msg,
srep,
cert,
dele,
nonce,
}
}
pub fn extract_time(&self) -> ParsedResponse {
let midpoint = self.srep[&Tag::MIDP]
.as_slice()
.read_u64::<LittleEndian>()
.unwrap();
let radius = self.srep[&Tag::RADI]
.as_slice()
.read_u32::<LittleEndian>()
.unwrap();
let verified = if self.pub_key.is_some() {
self.validate_dele();
self.validate_srep();
self.validate_merkle();
self.validate_midpoint(midpoint);
true
} else {
false
};
ParsedResponse {
verified,
midpoint,
radius,
}
}
fn validate_dele(&self) {
let mut full_cert = Vec::from(CERTIFICATE_CONTEXT.as_bytes());
full_cert.extend(&self.cert[&Tag::DELE]);
assert!(
self.validate_sig(
self.pub_key.as_ref().unwrap(),
&self.cert[&Tag::SIG],
&full_cert
),
"Invalid signature on DELE tag, response may not be authentic"
);
}
fn validate_srep(&self) {
let mut full_srep = Vec::from(SIGNED_RESPONSE_CONTEXT.as_bytes());
full_srep.extend(&self.msg[&Tag::SREP]);
assert!(
self.validate_sig(&self.dele[&Tag::PUBK], &self.msg[&Tag::SIG], &full_srep),
"Invalid signature on SREP tag, response may not be authentic"
);
}
fn validate_merkle(&self) {
let srep = RtMessage::from_bytes(&self.msg[&Tag::SREP])
.unwrap()
.into_hash_map();
let index = self.msg[&Tag::INDX]
.as_slice()
.read_u32::<LittleEndian>()
.unwrap();
let paths = &self.msg[&Tag::PATH];
let hash = root_from_paths(index as usize, &self.nonce, paths);
assert_eq!(
hash, srep[&Tag::ROOT],
"Nonce is not present in the response's merkle tree"
);
}
fn validate_midpoint(&self, midpoint: u64) {
let mint = self.dele[&Tag::MINT]
.as_slice()
.read_u64::<LittleEndian>()
.unwrap();
let maxt = self.dele[&Tag::MAXT]
.as_slice()
.read_u64::<LittleEndian>()
.unwrap();
assert!(
midpoint >= mint,
"Response midpoint {} lies *before* delegation span ({}, {})",
midpoint, mint, maxt
);
assert!(
midpoint <= maxt,
"Response midpoint {} lies *after* delegation span ({}, {})",
midpoint, mint, maxt
);
}
fn validate_sig(&self, public_key: &[u8], sig: &[u8], data: &[u8]) -> bool {
let mut verifier = Verifier::new(public_key);
verifier.update(data);
verifier.verify(sig)
}
}
fn main() {
let matches = App::new("roughenough client")
.version(roughenough_version().as_ref())
.arg(Arg::with_name("host")
.required(true)
.help("The Roughtime server to connect to")
.takes_value(true))
.arg(Arg::with_name("port")
.required(true)
.help("The Roughtime server port to connect to")
.takes_value(true))
.arg(Arg::with_name("verbose")
.short("v")
.long("verbose")
.help("Print more output"))
.arg(Arg::with_name("json")
.short("j")
.long("json")
.help("Print output in JSON"))
.arg(Arg::with_name("public-key")
.short("p")
.long("public-key")
.takes_value(true)
.help("The server public key used to validate responses. If unset, no validation will be performed"))
.arg(Arg::with_name("time-format")
.short("f")
.long("time-format")
.takes_value(true)
.help("The strftime format string used to print the time recieved from the server")
.default_value("%b %d %Y %H:%M:%S")
)
.arg(Arg::with_name("num-requests")
.short("n")
.long("num-requests")
.takes_value(true)
.help("The number of requests to make to the server (each from a different source port). This is mainly useful for testing batch response handling")
.default_value("1")
)
.arg(Arg::with_name("stress")
.short("s")
.long("stress")
.help("Stress-tests the server by sending the same request as fast as possible. Please only use this on your own server")
)
.arg(Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.help("Writes all requsts to the specified file, in addition to sending them to the server. Useful for generating fuzer inputs")
)
.get_matches();
let host = matches.value_of("host").unwrap();
let port = value_t_or_exit!(matches.value_of("port"), u16);
let verbose = matches.is_present("verbose");
let json = matches.is_present("json");
let num_requests = value_t_or_exit!(matches.value_of("num-requests"), u16) as usize;
let time_format = matches.value_of("time-format").unwrap();
let stress = matches.is_present("stress");
let pub_key = matches
.value_of("public-key")
.map(|pkey| hex::decode(pkey).expect("Error parsing public key!"));
let out = matches.value_of("output");
if verbose {
eprintln!("Requesting time from: {:?}:{:?}", host, port);
}
let addr = (host, port).to_socket_addrs().unwrap().next().unwrap();
if stress {
stress_test_forever(&addr)
}
let mut requests = Vec::with_capacity(num_requests);
let mut file = out.map(|o| File::create(o).expect("Failed to create file!"));
for _ in 0..num_requests {
let nonce = create_nonce();
let socket = UdpSocket::bind("0.0.0.0:0").expect("Couldn't open UDP socket");
let request = make_request(&nonce);
if let Some(f) = file.as_mut() {
f.write_all(&request).expect("Failed to write to file!")
}
requests.push((nonce, request, socket));
}
for &mut (_, ref request, ref mut socket) in &mut requests {
socket.send_to(request, addr).unwrap();
}
for (nonce, _, mut socket) in requests {
let resp = receive_response(&mut socket);
let ParsedResponse {
verified,
midpoint,
radius,
} = ResponseHandler::new(pub_key.clone(), resp.clone(), nonce).extract_time();
let map = resp.into_hash_map();
let index = map[&Tag::INDX]
.as_slice()
.read_u32::<LittleEndian>()
.unwrap();
let seconds = midpoint / 10_u64.pow(6);
let nsecs = (midpoint - (seconds * 10_u64.pow(6))) * 10_u64.pow(3);
let spec = Utc.timestamp(seconds as i64, nsecs as u32);
let out = spec.format(time_format).to_string();
let verify_str = if verified { "Yes" } else { "No" };
if verbose {
eprintln!(
"Received time from server: midpoint={:?}, radius={:?}, verified={} (merkle_index={})",
out, radius, verify_str, index
);
}
if json {
println!(
r#"{{ "midpoint": {:?}, "radius": {:?}, "verified": {}, "merkle_index": {} }}"#,
out, radius, verified, index
);
} else {
println!("{}", out);
}
}
}
|
// Type decoding
import std::str;
import std::vec;
import std::uint;
import std::option;
import std::option::none;
import std::option::some;
import front::ast;
import middle::ty;
import util::common;
import util::common::respan;
import util::common::a_ty;
import util::common::a_bang;
export parse_def_id;
export parse_ty_data;
// Compact string representation for ty::t values. API ty_str & parse_from_str
// (The second has to be authed pure.) Extra parameters are for converting
// to/from def_ids in the data buffer. Whatever format you choose should not
// contain pipe characters.
// Callback to translate defs to strs or back:
type str_def = fn(str) -> ast::def_id ;
type pstate =
rec(vec[u8] data, int crate, mutable uint pos, uint len, ty::ctxt tcx);
type ty_or_bang = util::common::ty_or_bang[ty::t];
fn peek(@pstate st) -> u8 { ret st.data.(st.pos); }
fn next(@pstate st) -> u8 {
auto ch = st.data.(st.pos);
st.pos = st.pos + 1u;
ret ch;
}
fn parse_ident(@pstate st, str_def sd, char last) -> ast::ident {
fn is_last(char b, char c) -> bool {
ret c == b;
}
ret parse_ident_(st, sd, bind is_last(last, _));
}
fn parse_ident_(@pstate st, str_def sd, fn(char) -> bool is_last)
-> ast::ident {
auto rslt = "";
while (! is_last(peek(st) as char)) {
rslt += str::unsafe_from_byte(next(st));
}
ret rslt;
}
fn parse_ty_data(vec[u8] data, int crate_num, uint pos, uint len, str_def sd,
ty::ctxt tcx) -> ty::t {
auto st =
@rec(data=data, crate=crate_num, mutable pos=pos, len=len, tcx=tcx);
auto result = parse_ty(st, sd);
ret result;
}
fn parse_ty_or_bang(@pstate st, str_def sd) -> ty_or_bang {
alt (peek(st) as char) {
case ('!') { next(st); ret a_bang[ty::t]; }
case (_) { ret a_ty[ty::t](parse_ty(st, sd)); }
}
}
fn parse_constrs(@pstate st, str_def sd) -> vec[@ty::constr_def] {
let vec[@ty::constr_def] rslt = [];
alt (peek(st) as char) {
case (':') {
do {
next(st);
vec::push(rslt, parse_constr(st, sd));
} while (peek(st) as char == ';')
}
case (_) { }
}
ret rslt;
}
fn parse_path(@pstate st, str_def sd) -> ast::path {
let vec[ast::ident] idents = [];
fn is_last(char c) -> bool {
ret (c == '(' || c == ':');
}
idents += [parse_ident_(st, sd, is_last)];
while (true) {
alt (peek(st) as char) {
case (':') { next(st); next(st); }
case (?c) {
if (c == '(') {
ret respan(rec(lo=0u, hi=0u),
rec(idents=idents, types=[]));
}
else {
idents += [parse_ident_(st, sd, is_last)];
}
}
}
}
fail "parse_path: ill-formed path";
}
fn parse_constr(@pstate st, str_def sd) -> @ty::constr_def {
let vec[@ast::constr_arg] args = [];
auto sp = rec(lo=0u,hi=0u); // FIXME: use a real span
let ast::path pth = parse_path(st, sd);
let char ignore = next(st) as char;
assert(ignore as char == '(');
auto def = parse_def(st, sd);
do {
alt (peek(st) as char) {
case ('*') {
st.pos += 1u;
args += [@respan(sp, ast::carg_base)];
}
case (?c) {
/* how will we disambiguate between
an arg index and a lit argument? */
if (c >= '0' && c <= '9') {
// FIXME
args += [@respan(sp, ast::carg_ident((c as uint) - 48u))];
ignore = next(st) as char;
}
else {
log_err("Lit args are unimplemented");
fail; // FIXME
}
/*
else {
auto lit = parse_lit(st, sd, ',');
args += [respan(st.span, ast::carg_lit(lit))];
}
*/
}
}
ignore = next(st) as char;
} while (ignore == ';');
assert(ignore == ')');
ret @respan(sp, rec(path=pth, args=args, id=def));
}
fn parse_ty(@pstate st, str_def sd) -> ty::t {
alt (next(st) as char) {
case ('n') { ret ty::mk_nil(st.tcx); }
case ('z') { ret ty::mk_bot(st.tcx); }
case ('b') { ret ty::mk_bool(st.tcx); }
case ('i') { ret ty::mk_int(st.tcx); }
case ('u') { ret ty::mk_uint(st.tcx); }
case ('l') { ret ty::mk_float(st.tcx); }
case ('M') {
alt (next(st) as char) {
case ('b') { ret ty::mk_mach(st.tcx, common::ty_u8); }
case ('w') { ret ty::mk_mach(st.tcx, common::ty_u16); }
case ('l') { ret ty::mk_mach(st.tcx, common::ty_u32); }
case ('d') { ret ty::mk_mach(st.tcx, common::ty_u64); }
case ('B') { ret ty::mk_mach(st.tcx, common::ty_i8); }
case ('W') { ret ty::mk_mach(st.tcx, common::ty_i16); }
case ('L') { ret ty::mk_mach(st.tcx, common::ty_i32); }
case ('D') { ret ty::mk_mach(st.tcx, common::ty_i64); }
case ('f') { ret ty::mk_mach(st.tcx, common::ty_f32); }
case ('F') { ret ty::mk_mach(st.tcx, common::ty_f64); }
}
}
case ('c') { ret ty::mk_char(st.tcx); }
case ('s') { ret ty::mk_str(st.tcx); }
case ('S') { ret ty::mk_istr(st.tcx); }
case ('t') {
assert (next(st) as char == '[');
auto def = parse_def(st, sd);
let vec[ty::t] params = [];
while (peek(st) as char != ']') { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_tag(st.tcx, def, params);
}
case ('p') { ret ty::mk_param(st.tcx, parse_int(st) as uint); }
case ('@') { ret ty::mk_box(st.tcx, parse_mt(st, sd)); }
case ('*') { ret ty::mk_ptr(st.tcx, parse_mt(st, sd)); }
case ('V') { ret ty::mk_vec(st.tcx, parse_mt(st, sd)); }
case ('I') { ret ty::mk_ivec(st.tcx, parse_mt(st, sd)); }
case ('a') { ret ty::mk_task(st.tcx); }
case ('P') { ret ty::mk_port(st.tcx, parse_ty(st, sd)); }
case ('C') { ret ty::mk_chan(st.tcx, parse_ty(st, sd)); }
case ('T') {
assert (next(st) as char == '[');
let ty::mt[] params = ~[];
while (peek(st) as char != ']') { params += ~[parse_mt(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_tup(st.tcx, params);
}
case ('R') {
assert (next(st) as char == '[');
let ty::field[] fields = ~[];
while (peek(st) as char != ']') {
auto name = "";
while (peek(st) as char != '=') {
name += str::unsafe_from_byte(next(st));
}
st.pos = st.pos + 1u;
fields += ~[rec(ident=name, mt=parse_mt(st, sd))];
}
st.pos = st.pos + 1u;
ret ty::mk_rec(st.tcx, fields);
}
case ('F') {
auto func = parse_ty_fn(st, sd);
ret ty::mk_fn(st.tcx, ast::proto_fn, func._0, func._1, func._2,
func._3);
}
case ('W') {
auto func = parse_ty_fn(st, sd);
ret ty::mk_fn(st.tcx, ast::proto_iter, func._0, func._1, func._2,
func._3);
}
case ('N') {
auto abi;
alt (next(st) as char) {
case ('r') { abi = ast::native_abi_rust; }
case ('i') { abi = ast::native_abi_rust_intrinsic; }
case ('c') { abi = ast::native_abi_cdecl; }
case ('l') { abi = ast::native_abi_llvm; }
}
auto func = parse_ty_fn(st, sd);
ret ty::mk_native_fn(st.tcx, abi, func._0, func._1);
}
case ('O') {
assert (next(st) as char == '[');
let vec[ty::method] methods = [];
while (peek(st) as char != ']') {
auto proto;
alt (next(st) as char) {
case ('W') { proto = ast::proto_iter; }
case ('F') { proto = ast::proto_fn; }
}
auto name = "";
while (peek(st) as char != '[') {
name += str::unsafe_from_byte(next(st));
}
auto func = parse_ty_fn(st, sd);
methods +=
[rec(proto=proto,
ident=name,
inputs=func._0,
output=func._1,
cf=func._2,
constrs=func._3)];
}
st.pos += 1u;
ret ty::mk_obj(st.tcx, methods);
}
case ('r') {
assert (next(st) as char == '[');
auto def = parse_def(st, sd);
auto inner = parse_ty(st, sd);
let vec[ty::t] params = [];
while (peek(st) as char != ']') { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_res(st.tcx, def, inner, params);
}
case ('X') { ret ty::mk_var(st.tcx, parse_int(st)); }
case ('E') {
auto def = parse_def(st, sd);
ret ty::mk_native(st.tcx, def);
}
case ('Y') { ret ty::mk_type(st.tcx); }
case ('#') {
auto pos = parse_hex(st);
assert (next(st) as char == ':');
auto len = parse_hex(st);
assert (next(st) as char == '#');
alt (st.tcx.rcache.find(tup(st.crate, pos, len))) {
case (some(?tt)) { ret tt; }
case (none) {
auto ps = @rec(pos=pos, len=len with *st);
auto tt = parse_ty(ps, sd);
st.tcx.rcache.insert(tup(st.crate, pos, len), tt);
ret tt;
}
}
}
case (?c) {
log_err "unexpected char in type string: ";
log_err c;
fail;
}
}
}
fn parse_mt(@pstate st, str_def sd) -> ty::mt {
auto mut;
alt (peek(st) as char) {
case ('m') { next(st); mut = ast::mut; }
case ('?') { next(st); mut = ast::maybe_mut; }
case (_) { mut = ast::imm; }
}
ret rec(ty=parse_ty(st, sd), mut=mut);
}
fn parse_def(@pstate st, str_def sd) -> ast::def_id {
auto def = "";
while (peek(st) as char != '|') {
def += str::unsafe_from_byte(next(st));
}
st.pos = st.pos + 1u;
ret sd(def);
}
fn parse_int(@pstate st) -> int {
auto n = 0;
while (true) {
auto cur = peek(st) as char;
if (cur < '0' || cur > '9') { break; }
st.pos = st.pos + 1u;
n *= 10;
n += (cur as int) - ('0' as int);
}
ret n;
}
fn parse_hex(@pstate st) -> uint {
auto n = 0u;
while (true) {
auto cur = peek(st) as char;
if ((cur < '0' || cur > '9') && (cur < 'a' || cur > 'f')) { break; }
st.pos = st.pos + 1u;
n *= 16u;
if ('0' <= cur && cur <= '9') {
n += (cur as uint) - ('0' as uint);
} else { n += 10u + (cur as uint) - ('a' as uint); }
}
ret n;
}
fn parse_ty_fn(@pstate st, str_def sd) ->
tup(ty::arg[], ty::t, ast::controlflow, vec[@ty::constr_def]) {
assert (next(st) as char == '[');
let ty::arg[] inputs = ~[];
while (peek(st) as char != ']') {
auto mode = ty::mo_val;
if (peek(st) as char == '&') {
mode = ty::mo_alias(false);
st.pos += 1u;
if (peek(st) as char == 'm') {
mode = ty::mo_alias(true);
st.pos += 1u;
}
}
inputs += ~[rec(mode=mode, ty=parse_ty(st, sd))];
}
st.pos += 1u; // eat the ']'
auto cs = parse_constrs(st, sd);
alt (parse_ty_or_bang(st, sd)) {
case (a_bang) {
ret tup(inputs, ty::mk_bot(st.tcx), ast::noreturn, cs);
}
case (a_ty(?t)) { ret tup(inputs, t, ast::return, cs); }
}
}
// Rust metadata parsing
fn parse_def_id(vec[u8] buf) -> ast::def_id {
auto colon_idx = 0u;
auto len = vec::len[u8](buf);
while (colon_idx < len && buf.(colon_idx) != ':' as u8) {
colon_idx += 1u;
}
if (colon_idx == len) {
log_err "didn't find ':' when parsing def id";
fail;
}
auto crate_part = vec::slice[u8](buf, 0u, colon_idx);
auto def_part = vec::slice[u8](buf, colon_idx + 1u, len);
auto crate_num = uint::parse_buf(crate_part, 10u) as int;
auto def_id = uint::parse_buf(def_part, 10u) as int;
ret tup(crate_num, def_id);
}
rustc: Remove obsolete "The second has to be authed pure" comment
// Type decoding
import std::str;
import std::vec;
import std::uint;
import std::option;
import std::option::none;
import std::option::some;
import front::ast;
import middle::ty;
import util::common;
import util::common::respan;
import util::common::a_ty;
import util::common::a_bang;
export parse_def_id;
export parse_ty_data;
// Compact string representation for ty::t values. API ty_str &
// parse_from_str. Extra parameters are for converting to/from def_ids in the
// data buffer. Whatever format you choose should not contain pipe characters.
// Callback to translate defs to strs or back:
type str_def = fn(str) -> ast::def_id ;
type pstate =
rec(vec[u8] data, int crate, mutable uint pos, uint len, ty::ctxt tcx);
type ty_or_bang = util::common::ty_or_bang[ty::t];
fn peek(@pstate st) -> u8 { ret st.data.(st.pos); }
fn next(@pstate st) -> u8 {
auto ch = st.data.(st.pos);
st.pos = st.pos + 1u;
ret ch;
}
fn parse_ident(@pstate st, str_def sd, char last) -> ast::ident {
fn is_last(char b, char c) -> bool {
ret c == b;
}
ret parse_ident_(st, sd, bind is_last(last, _));
}
fn parse_ident_(@pstate st, str_def sd, fn(char) -> bool is_last)
-> ast::ident {
auto rslt = "";
while (! is_last(peek(st) as char)) {
rslt += str::unsafe_from_byte(next(st));
}
ret rslt;
}
fn parse_ty_data(vec[u8] data, int crate_num, uint pos, uint len, str_def sd,
ty::ctxt tcx) -> ty::t {
auto st =
@rec(data=data, crate=crate_num, mutable pos=pos, len=len, tcx=tcx);
auto result = parse_ty(st, sd);
ret result;
}
fn parse_ty_or_bang(@pstate st, str_def sd) -> ty_or_bang {
alt (peek(st) as char) {
case ('!') { next(st); ret a_bang[ty::t]; }
case (_) { ret a_ty[ty::t](parse_ty(st, sd)); }
}
}
fn parse_constrs(@pstate st, str_def sd) -> vec[@ty::constr_def] {
let vec[@ty::constr_def] rslt = [];
alt (peek(st) as char) {
case (':') {
do {
next(st);
vec::push(rslt, parse_constr(st, sd));
} while (peek(st) as char == ';')
}
case (_) { }
}
ret rslt;
}
fn parse_path(@pstate st, str_def sd) -> ast::path {
let vec[ast::ident] idents = [];
fn is_last(char c) -> bool {
ret (c == '(' || c == ':');
}
idents += [parse_ident_(st, sd, is_last)];
while (true) {
alt (peek(st) as char) {
case (':') { next(st); next(st); }
case (?c) {
if (c == '(') {
ret respan(rec(lo=0u, hi=0u),
rec(idents=idents, types=[]));
}
else {
idents += [parse_ident_(st, sd, is_last)];
}
}
}
}
fail "parse_path: ill-formed path";
}
fn parse_constr(@pstate st, str_def sd) -> @ty::constr_def {
let vec[@ast::constr_arg] args = [];
auto sp = rec(lo=0u,hi=0u); // FIXME: use a real span
let ast::path pth = parse_path(st, sd);
let char ignore = next(st) as char;
assert(ignore as char == '(');
auto def = parse_def(st, sd);
do {
alt (peek(st) as char) {
case ('*') {
st.pos += 1u;
args += [@respan(sp, ast::carg_base)];
}
case (?c) {
/* how will we disambiguate between
an arg index and a lit argument? */
if (c >= '0' && c <= '9') {
// FIXME
args += [@respan(sp, ast::carg_ident((c as uint) - 48u))];
ignore = next(st) as char;
}
else {
log_err("Lit args are unimplemented");
fail; // FIXME
}
/*
else {
auto lit = parse_lit(st, sd, ',');
args += [respan(st.span, ast::carg_lit(lit))];
}
*/
}
}
ignore = next(st) as char;
} while (ignore == ';');
assert(ignore == ')');
ret @respan(sp, rec(path=pth, args=args, id=def));
}
fn parse_ty(@pstate st, str_def sd) -> ty::t {
alt (next(st) as char) {
case ('n') { ret ty::mk_nil(st.tcx); }
case ('z') { ret ty::mk_bot(st.tcx); }
case ('b') { ret ty::mk_bool(st.tcx); }
case ('i') { ret ty::mk_int(st.tcx); }
case ('u') { ret ty::mk_uint(st.tcx); }
case ('l') { ret ty::mk_float(st.tcx); }
case ('M') {
alt (next(st) as char) {
case ('b') { ret ty::mk_mach(st.tcx, common::ty_u8); }
case ('w') { ret ty::mk_mach(st.tcx, common::ty_u16); }
case ('l') { ret ty::mk_mach(st.tcx, common::ty_u32); }
case ('d') { ret ty::mk_mach(st.tcx, common::ty_u64); }
case ('B') { ret ty::mk_mach(st.tcx, common::ty_i8); }
case ('W') { ret ty::mk_mach(st.tcx, common::ty_i16); }
case ('L') { ret ty::mk_mach(st.tcx, common::ty_i32); }
case ('D') { ret ty::mk_mach(st.tcx, common::ty_i64); }
case ('f') { ret ty::mk_mach(st.tcx, common::ty_f32); }
case ('F') { ret ty::mk_mach(st.tcx, common::ty_f64); }
}
}
case ('c') { ret ty::mk_char(st.tcx); }
case ('s') { ret ty::mk_str(st.tcx); }
case ('S') { ret ty::mk_istr(st.tcx); }
case ('t') {
assert (next(st) as char == '[');
auto def = parse_def(st, sd);
let vec[ty::t] params = [];
while (peek(st) as char != ']') { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_tag(st.tcx, def, params);
}
case ('p') { ret ty::mk_param(st.tcx, parse_int(st) as uint); }
case ('@') { ret ty::mk_box(st.tcx, parse_mt(st, sd)); }
case ('*') { ret ty::mk_ptr(st.tcx, parse_mt(st, sd)); }
case ('V') { ret ty::mk_vec(st.tcx, parse_mt(st, sd)); }
case ('I') { ret ty::mk_ivec(st.tcx, parse_mt(st, sd)); }
case ('a') { ret ty::mk_task(st.tcx); }
case ('P') { ret ty::mk_port(st.tcx, parse_ty(st, sd)); }
case ('C') { ret ty::mk_chan(st.tcx, parse_ty(st, sd)); }
case ('T') {
assert (next(st) as char == '[');
let ty::mt[] params = ~[];
while (peek(st) as char != ']') { params += ~[parse_mt(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_tup(st.tcx, params);
}
case ('R') {
assert (next(st) as char == '[');
let ty::field[] fields = ~[];
while (peek(st) as char != ']') {
auto name = "";
while (peek(st) as char != '=') {
name += str::unsafe_from_byte(next(st));
}
st.pos = st.pos + 1u;
fields += ~[rec(ident=name, mt=parse_mt(st, sd))];
}
st.pos = st.pos + 1u;
ret ty::mk_rec(st.tcx, fields);
}
case ('F') {
auto func = parse_ty_fn(st, sd);
ret ty::mk_fn(st.tcx, ast::proto_fn, func._0, func._1, func._2,
func._3);
}
case ('W') {
auto func = parse_ty_fn(st, sd);
ret ty::mk_fn(st.tcx, ast::proto_iter, func._0, func._1, func._2,
func._3);
}
case ('N') {
auto abi;
alt (next(st) as char) {
case ('r') { abi = ast::native_abi_rust; }
case ('i') { abi = ast::native_abi_rust_intrinsic; }
case ('c') { abi = ast::native_abi_cdecl; }
case ('l') { abi = ast::native_abi_llvm; }
}
auto func = parse_ty_fn(st, sd);
ret ty::mk_native_fn(st.tcx, abi, func._0, func._1);
}
case ('O') {
assert (next(st) as char == '[');
let vec[ty::method] methods = [];
while (peek(st) as char != ']') {
auto proto;
alt (next(st) as char) {
case ('W') { proto = ast::proto_iter; }
case ('F') { proto = ast::proto_fn; }
}
auto name = "";
while (peek(st) as char != '[') {
name += str::unsafe_from_byte(next(st));
}
auto func = parse_ty_fn(st, sd);
methods +=
[rec(proto=proto,
ident=name,
inputs=func._0,
output=func._1,
cf=func._2,
constrs=func._3)];
}
st.pos += 1u;
ret ty::mk_obj(st.tcx, methods);
}
case ('r') {
assert (next(st) as char == '[');
auto def = parse_def(st, sd);
auto inner = parse_ty(st, sd);
let vec[ty::t] params = [];
while (peek(st) as char != ']') { params += [parse_ty(st, sd)]; }
st.pos = st.pos + 1u;
ret ty::mk_res(st.tcx, def, inner, params);
}
case ('X') { ret ty::mk_var(st.tcx, parse_int(st)); }
case ('E') {
auto def = parse_def(st, sd);
ret ty::mk_native(st.tcx, def);
}
case ('Y') { ret ty::mk_type(st.tcx); }
case ('#') {
auto pos = parse_hex(st);
assert (next(st) as char == ':');
auto len = parse_hex(st);
assert (next(st) as char == '#');
alt (st.tcx.rcache.find(tup(st.crate, pos, len))) {
case (some(?tt)) { ret tt; }
case (none) {
auto ps = @rec(pos=pos, len=len with *st);
auto tt = parse_ty(ps, sd);
st.tcx.rcache.insert(tup(st.crate, pos, len), tt);
ret tt;
}
}
}
case (?c) {
log_err "unexpected char in type string: ";
log_err c;
fail;
}
}
}
fn parse_mt(@pstate st, str_def sd) -> ty::mt {
auto mut;
alt (peek(st) as char) {
case ('m') { next(st); mut = ast::mut; }
case ('?') { next(st); mut = ast::maybe_mut; }
case (_) { mut = ast::imm; }
}
ret rec(ty=parse_ty(st, sd), mut=mut);
}
fn parse_def(@pstate st, str_def sd) -> ast::def_id {
auto def = "";
while (peek(st) as char != '|') {
def += str::unsafe_from_byte(next(st));
}
st.pos = st.pos + 1u;
ret sd(def);
}
fn parse_int(@pstate st) -> int {
auto n = 0;
while (true) {
auto cur = peek(st) as char;
if (cur < '0' || cur > '9') { break; }
st.pos = st.pos + 1u;
n *= 10;
n += (cur as int) - ('0' as int);
}
ret n;
}
fn parse_hex(@pstate st) -> uint {
auto n = 0u;
while (true) {
auto cur = peek(st) as char;
if ((cur < '0' || cur > '9') && (cur < 'a' || cur > 'f')) { break; }
st.pos = st.pos + 1u;
n *= 16u;
if ('0' <= cur && cur <= '9') {
n += (cur as uint) - ('0' as uint);
} else { n += 10u + (cur as uint) - ('a' as uint); }
}
ret n;
}
fn parse_ty_fn(@pstate st, str_def sd) ->
tup(ty::arg[], ty::t, ast::controlflow, vec[@ty::constr_def]) {
assert (next(st) as char == '[');
let ty::arg[] inputs = ~[];
while (peek(st) as char != ']') {
auto mode = ty::mo_val;
if (peek(st) as char == '&') {
mode = ty::mo_alias(false);
st.pos += 1u;
if (peek(st) as char == 'm') {
mode = ty::mo_alias(true);
st.pos += 1u;
}
}
inputs += ~[rec(mode=mode, ty=parse_ty(st, sd))];
}
st.pos += 1u; // eat the ']'
auto cs = parse_constrs(st, sd);
alt (parse_ty_or_bang(st, sd)) {
case (a_bang) {
ret tup(inputs, ty::mk_bot(st.tcx), ast::noreturn, cs);
}
case (a_ty(?t)) { ret tup(inputs, t, ast::return, cs); }
}
}
// Rust metadata parsing
fn parse_def_id(vec[u8] buf) -> ast::def_id {
auto colon_idx = 0u;
auto len = vec::len[u8](buf);
while (colon_idx < len && buf.(colon_idx) != ':' as u8) {
colon_idx += 1u;
}
if (colon_idx == len) {
log_err "didn't find ':' when parsing def id";
fail;
}
auto crate_part = vec::slice[u8](buf, 0u, colon_idx);
auto def_part = vec::slice[u8](buf, colon_idx + 1u, len);
auto crate_num = uint::parse_buf(crate_part, 10u) as int;
auto def_id = uint::parse_buf(def_part, 10u) as int;
ret tup(crate_num, def_id);
}
|
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{prelude::*, LintContext};
use guppy::{
graph::{PackageGraph, PackageMetadata},
PackageId,
};
use std::path::Path;
/// Represents a linter that runs once per package.
pub trait PackageLinter: Linter {
fn run<'l>(
&self,
ctx: &PackageContext<'l>,
out: &mut LintFormatter<'l, '_>,
) -> Result<RunStatus<'l>>;
}
/// Lint context for an individual package.
#[derive(Copy, Clone, Debug)]
pub struct PackageContext<'l> {
project_ctx: ProjectContext<'l>,
workspace_path: &'l Path,
metadata: &'l PackageMetadata,
}
impl<'l> PackageContext<'l> {
pub fn new(
project_ctx: ProjectContext<'l>,
package_graph: &'l PackageGraph,
workspace_path: &'l Path,
id: &PackageId,
) -> Self {
Self {
project_ctx,
workspace_path,
metadata: package_graph.metadata(id).expect("package id is valid"),
}
}
/// Returns the relative path for this package in the workspace.
pub fn workspace_path(&self) -> &'l Path {
self.workspace_path
}
/// Returns the metadata for this package.
pub fn metadata(&self) -> &'l PackageMetadata {
self.metadata
}
}
impl<'l> LintContext<'l> for PackageContext<'l> {
fn kind(&self) -> LintKind<'l> {
LintKind::Package {
name: self.metadata.name(),
workspace_path: self.workspace_path,
}
}
}
[x-lint] Add project context accessor
Closes: #3151
Approved by: sunshowers
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{prelude::*, LintContext};
use guppy::{
graph::{PackageGraph, PackageMetadata},
PackageId,
};
use std::path::Path;
/// Represents a linter that runs once per package.
pub trait PackageLinter: Linter {
fn run<'l>(
&self,
ctx: &PackageContext<'l>,
out: &mut LintFormatter<'l, '_>,
) -> Result<RunStatus<'l>>;
}
/// Lint context for an individual package.
#[derive(Copy, Clone, Debug)]
pub struct PackageContext<'l> {
project_ctx: ProjectContext<'l>,
workspace_path: &'l Path,
metadata: &'l PackageMetadata,
}
impl<'l> PackageContext<'l> {
pub fn new(
project_ctx: ProjectContext<'l>,
package_graph: &'l PackageGraph,
workspace_path: &'l Path,
id: &PackageId,
) -> Self {
Self {
project_ctx,
workspace_path,
metadata: package_graph.metadata(id).expect("package id is valid"),
}
}
/// Returns the project context
pub fn project_ctx(&self) -> &ProjectContext<'l> {
&self.project_ctx
}
/// Returns the relative path for this package in the workspace.
pub fn workspace_path(&self) -> &'l Path {
self.workspace_path
}
/// Returns the metadata for this package.
pub fn metadata(&self) -> &'l PackageMetadata {
self.metadata
}
}
impl<'l> LintContext<'l> for PackageContext<'l> {
fn kind(&self) -> LintKind<'l> {
LintKind::Package {
name: self.metadata.name(),
workspace_path: self.workspace_path,
}
}
}
|
extern crate assert_cli;
#[cfg(test)]
mod integration {
use assert_cli;
#[test]
fn mhost_wo_args() {
mhost()
.fails()
.and()
.stderr().contains("error: The following required arguments were not provided:")
.unwrap()
}
#[test]
fn mhost_simple_lookup() {
mhost()
.with_args(&["www.example.com"])
.succeeds()
.and()
.stdout().contains("* IPv4: 93.184.216.34")
.and()
.stdout().contains("* IPv6: 2606:2800:220:1:248:1893:25c8:1946")
.unwrap()
}
#[test]
fn mhost_simple_ipv4_reverse_lookup() {
mhost()
.with_args(&["8.8.8.8"])
.succeeds()
.and()
.stdout().contains("* PTR: google-public-dns-a.google.com.")
.unwrap()
}
#[test]
fn mhost_simple_ipv6_reverse_lookup() {
mhost()
.with_args(&["2001:4860:4860::8888"])
.succeeds()
.and()
.stdout().contains("* PTR: google-public-dns-a.google.com.")
.unwrap()
}
fn mhost() -> assert_cli::Assert {
::std::env::set_var("RUST_BACKTRACE", "1");
assert_cli::Assert::command(&["./target/debug/mhost"])
}
}
Distinguish cargo release type for integration tests
extern crate assert_cli;
#[cfg(test)]
mod integration {
use assert_cli;
#[test]
fn mhost_wo_args() {
mhost()
.fails()
.and()
.stderr().contains("error: The following required arguments were not provided:")
.unwrap()
}
#[test]
fn mhost_simple_lookup() {
mhost()
.with_args(&["www.example.com"])
.succeeds()
.and()
.stdout().contains("* IPv4: 93.184.216.34")
.and()
.stdout().contains("* IPv6: 2606:2800:220:1:248:1893:25c8:1946")
.unwrap()
}
#[test]
fn mhost_simple_ipv4_reverse_lookup() {
mhost()
.with_args(&["8.8.8.8"])
.succeeds()
.and()
.stdout().contains("* PTR: google-public-dns-a.google.com.")
.unwrap()
}
#[test]
fn mhost_simple_ipv6_reverse_lookup() {
mhost()
.with_args(&["2001:4860:4860::8888"])
.succeeds()
.and()
.stdout().contains("* PTR: google-public-dns-a.google.com.")
.unwrap()
}
// Since local development uses `cargo test` and Travis CI uses `cargo test --release` we need to distinguish,
// which binary to call -- `#[cfg(debug_assertions)]` to the rescue.
#[cfg(debug_assertions)]
fn mhost() -> assert_cli::Assert {
::std::env::set_var("RUST_BACKTRACE", "1");
assert_cli::Assert::command(&["./target/debug/mhost"])
}
#[cfg(not(debug_assertions))]
fn mhost() -> assert_cli::Assert {
::std::env::set_var("RUST_BACKTRACE", "1");
assert_cli::Assert::command(&["./target/release/mhost"])
}
}
|
//! An interpreter for the rust-installer package format. Responsible
//! for installing from a directory or tarball to an installation
//! prefix, represented by a `Components` instance.
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::io::{self, ErrorKind as IOErrorKind, Read};
use std::iter::FromIterator;
use std::mem;
use std::path::{Path, PathBuf};
use tar::EntryType;
use crate::diskio::{get_executor, Executor, Item, Kind};
use crate::dist::component::components::*;
use crate::dist::component::transaction::*;
use crate::dist::temp;
use crate::errors::*;
use crate::process;
use crate::utils::notifications::Notification;
use crate::utils::utils;
/// The current metadata revision used by rust-installer
pub const INSTALLER_VERSION: &str = "3";
pub const VERSION_FILE: &str = "rust-installer-version";
pub trait Package: fmt::Debug {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool;
fn install<'a>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'a>,
) -> Result<Transaction<'a>>;
fn components(&self) -> Vec<String>;
}
#[derive(Debug)]
pub struct DirectoryPackage {
path: PathBuf,
components: HashSet<String>,
copy: bool,
}
impl DirectoryPackage {
pub fn new(path: PathBuf, copy: bool) -> Result<Self> {
validate_installer_version(&path)?;
let content = utils::read_file("package components", &path.join("components"))?;
let components = content
.lines()
.map(std::borrow::ToOwned::to_owned)
.collect();
Ok(Self {
path,
components,
copy,
})
}
}
fn validate_installer_version(path: &Path) -> Result<()> {
let file = utils::read_file("installer version", &path.join(VERSION_FILE))?;
let v = file.trim();
if v == INSTALLER_VERSION {
Ok(())
} else {
Err(ErrorKind::BadInstallerVersion(v.to_owned()).into())
}
}
impl Package for DirectoryPackage {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.components.contains(component)
|| if let Some(n) = short_name {
self.components.contains(n)
} else {
false
}
}
fn install<'a>(
&self,
target: &Components,
name: &str,
short_name: Option<&str>,
tx: Transaction<'a>,
) -> Result<Transaction<'a>> {
let actual_name = if self.components.contains(name) {
name
} else if let Some(n) = short_name {
n
} else {
name
};
let root = self.path.join(actual_name);
let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?;
let mut builder = target.add(name, tx);
for l in manifest.lines() {
let part = ComponentPart::decode(l)
.ok_or_else(|| ErrorKind::CorruptComponent(name.to_owned()))?;
let path = part.1;
let src_path = root.join(&path);
match &*part.0 {
"file" => {
if self.copy {
builder.copy_file(path.clone(), &src_path)?
} else {
builder.move_file(path.clone(), &src_path)?
}
}
"dir" => {
if self.copy {
builder.copy_dir(path.clone(), &src_path)?
} else {
builder.move_dir(path.clone(), &src_path)?
}
}
_ => return Err(ErrorKind::CorruptComponent(name.to_owned()).into()),
}
}
let tx = builder.finish()?;
Ok(tx)
}
fn components(&self) -> Vec<String> {
self.components.iter().cloned().collect()
}
}
#[derive(Debug)]
pub struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>);
impl<'a> TarPackage<'a> {
pub fn new<R: Read>(
stream: R,
temp_cfg: &'a temp::Cfg,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<Self> {
let temp_dir = temp_cfg.new_directory()?;
let mut archive = tar::Archive::new(stream);
// The rust-installer packages unpack to a directory called
// $pkgname-$version-$target. Skip that directory when
// unpacking.
unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler)?;
Ok(TarPackage(
DirectoryPackage::new(temp_dir.to_owned(), false)?,
temp_dir,
))
}
}
struct MemoryBudget {
limit: usize,
used: usize,
}
// Probably this should live in diskio but ¯\_(ツ)_/¯
impl MemoryBudget {
fn new(
max_file_size: usize,
effective_max_ram: Option<usize>,
notify_handler: Option<&dyn Fn(Notification<'_>)>,
) -> Self {
const DEFAULT_UNPACK_RAM_MAX: usize = 500 * 1024 * 1024;
const RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS: usize = 100 * 1024 * 1024;
let default_max_unpack_ram = if let Some(effective_max_ram) = effective_max_ram {
let ram_for_unpacking = effective_max_ram - RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS;
std::cmp::min(DEFAULT_UNPACK_RAM_MAX, ram_for_unpacking)
} else {
// Rustup does not know how much RAM the machine has: use the
// minimum known to work reliably.
DEFAULT_UNPACK_RAM_MAX
};
let unpack_ram = match process()
.var("RUSTUP_UNPACK_RAM")
.ok()
.and_then(|budget_str| budget_str.parse::<usize>().ok())
{
// Note: In future we may want to add a warning or even an override if a user
// supplied budget is larger than effective_max_ram.
Some(budget) => budget,
None => {
if let Some(h) = notify_handler {
h(Notification::SetDefaultBufferSize(default_max_unpack_ram))
}
default_max_unpack_ram
}
};
if max_file_size > unpack_ram {
panic!("RUSTUP_UNPACK_RAM must be larger than {}", max_file_size);
}
Self {
limit: unpack_ram,
used: 0,
}
}
fn reclaim(&mut self, op: &Item) {
match &op.kind {
Kind::Directory => {}
Kind::File(content) => self.used -= content.len(),
};
}
fn claim(&mut self, op: &Item) {
match &op.kind {
Kind::Directory => {}
Kind::File(content) => self.used += content.len(),
};
}
fn available(&self) -> usize {
self.limit - self.used
}
}
/// Handle the async result of io operations
/// Replaces op.result with Ok(())
fn filter_result(op: &mut Item) -> io::Result<()> {
let result = mem::replace(&mut op.result, Ok(()));
match result {
Ok(_) => Ok(()),
Err(e) => match e.kind() {
IOErrorKind::AlreadyExists => {
// mkdir of e.g. ~/.rustup already existing is just fine;
// for others it would be better to know whether it is
// expected to exist or not -so put a flag in the state.
if let Kind::Directory = op.kind {
Ok(())
} else {
Err(e)
}
}
_ => Err(e),
},
}
}
/// Dequeue the children of directories queued up waiting for the directory to
/// be created.
///
/// Currently the volume of queued items does not count as backpressure against
/// the main tar extraction process.
fn trigger_children(
io_executor: &mut dyn Executor,
directories: &mut HashMap<PathBuf, DirStatus>,
budget: &mut MemoryBudget,
item: Item,
) -> Result<usize> {
let mut result = 0;
if let Kind::Directory = item.kind {
let mut pending = Vec::new();
directories
.entry(item.full_path)
.and_modify(|status| match status {
DirStatus::Exists => unreachable!(),
DirStatus::Pending(pending_inner) => {
pending.append(pending_inner);
*status = DirStatus::Exists;
}
})
.or_insert_with(|| unreachable!());
result += pending.len();
for pending_item in pending.into_iter() {
for mut item in Vec::from_iter(io_executor.execute(pending_item)) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
result += trigger_children(io_executor, directories, budget, item)?;
}
}
};
Ok(result)
}
/// What is the status of this directory ?
enum DirStatus {
Exists,
Pending(Vec<Item>),
}
fn unpack_without_first_dir<'a, R: Read>(
archive: &mut tar::Archive<R>,
path: &Path,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<()> {
let mut io_executor: Box<dyn Executor> = get_executor(notify_handler)?;
let entries = archive
.entries()
.chain_err(|| ErrorKind::ExtractingPackage)?;
const MAX_FILE_SIZE: u64 = 220_000_000;
let effective_max_ram = match effective_limits::memory_limit() {
Ok(ram) => Some(ram as usize),
Err(e) => {
if let Some(h) = notify_handler {
h(Notification::Error(e.to_string()))
}
None
}
};
let mut budget = MemoryBudget::new(MAX_FILE_SIZE as usize, effective_max_ram, notify_handler);
let mut directories: HashMap<PathBuf, DirStatus> = HashMap::new();
// Path is presumed to exist. Call it a precondition.
directories.insert(path.to_owned(), DirStatus::Exists);
'entries: for entry in entries {
// drain completed results to keep memory pressure low and respond
// rapidly to completed events even if we couldn't submit work (because
// our unpacked item is pending dequeue)
for mut item in Vec::from_iter(io_executor.completed()) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
let mut entry = entry.chain_err(|| ErrorKind::ExtractingPackage)?;
let relpath = {
let path = entry.path();
let path = path.chain_err(|| ErrorKind::ExtractingPackage)?;
path.into_owned()
};
// Reject path components that are not normal (.|..|/| etc)
for part in relpath.components() {
match part {
std::path::Component::Normal(_) => {}
_ => return Err(ErrorKind::BadPath(relpath).into()),
}
}
let mut components = relpath.components();
// Throw away the first path component: our root was supplied.
components.next();
let full_path = path.join(&components.as_path());
if full_path == path {
// The tmp dir code makes the root dir for us.
continue;
}
let size = entry.header().size()?;
if size > MAX_FILE_SIZE {
return Err(format!("File too big {} {}", relpath.display(), size).into());
}
while size > budget.available() as u64 {
for mut item in Vec::from_iter(io_executor.completed()) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
}
// Bail out if we get hard links, device nodes or any other unusual content
// - it is most likely an attack, as rusts cross-platform nature precludes
// such artifacts
let kind = entry.header().entry_type();
// https://github.com/rust-lang/rustup/issues/1140 and before that
// https://github.com/rust-lang/rust/issues/25479
// tl;dr: code got convoluted and we *may* have damaged tarballs out
// there.
// However the mandate we have is very simple: unpack as the current
// user with modes matching the tar contents. No documented tars with
// bad modes are in the bug tracker : the previous permission splatting
// code was inherited from interactions with sudo that are best
// addressed outside of rustup (run with an appropriate effective uid).
// THAT SAID: If regressions turn up immediately post release this code -
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=a8549057f0827bf3a068d8917256765a
// is a translation of the prior helper function into an in-iterator
// application.
let tar_mode = entry.header().mode().ok().unwrap();
// That said, the tarballs that are shipped way back have single-user
// permissions:
// -rwx------ rustbuild/rustbuild ..... release/test-release.sh
// so we should normalise the mode to match the previous behaviour users
// may be expecting where the above file would end up with mode 0o755
let u_mode = tar_mode & 0o700;
let g_mode = (u_mode & 0o0500) >> 3;
let o_mode = g_mode >> 3;
let mode = u_mode | g_mode | o_mode;
let mut item = match kind {
EntryType::Directory => {
directories.insert(full_path.to_owned(), DirStatus::Pending(Vec::new()));
Item::make_dir(full_path, mode)
}
EntryType::Regular => {
let mut v = Vec::with_capacity(size as usize);
entry.read_to_end(&mut v)?;
Item::write_file(full_path, v, mode)
}
_ => return Err(ErrorKind::UnsupportedKind(format!("{:?}", kind)).into()),
};
budget.claim(&item);
let item = loop {
// Create the full path to the entry if it does not exist already
if let Some(parent) = item.full_path.to_owned().parent() {
match directories.get_mut(parent) {
None => {
// Tar has item before containing directory
// Complain about this so we can see if these exist.
writeln!(
process().stderr(),
"Unexpected: missing parent '{}' for '{}'",
parent.display(),
entry.path()?.display()
)?;
directories.insert(parent.to_owned(), DirStatus::Pending(vec![item]));
item = Item::make_dir(parent.to_owned(), 0o755);
// Check the parent's parent
continue;
}
Some(DirStatus::Exists) => {
break item;
}
Some(DirStatus::Pending(pending)) => {
// Parent dir is being made, take next item from tar
pending.push(item);
continue 'entries;
}
}
} else {
// We should never see a path with no parent.
panic!();
}
};
for mut item in Vec::from_iter(io_executor.execute(item)) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
}
loop {
let mut triggered = 0;
for mut item in Vec::from_iter(io_executor.join()) {
// handle final IOs
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
triggered += trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
if triggered == 0 {
// None of the IO submitted before the prior join triggered any new
// submissions
break;
}
}
Ok(())
}
impl<'a> Package for TarPackage<'a> {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.0.contains(component, short_name)
}
fn install<'b>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'b>,
) -> Result<Transaction<'b>> {
self.0.install(target, component, short_name, tx)
}
fn components(&self) -> Vec<String> {
self.0.components()
}
}
#[derive(Debug)]
pub struct TarGzPackage<'a>(TarPackage<'a>);
impl<'a> TarGzPackage<'a> {
pub fn new<R: Read>(
stream: R,
temp_cfg: &'a temp::Cfg,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<Self> {
let stream = flate2::read::GzDecoder::new(stream);
Ok(TarGzPackage(TarPackage::new(
stream,
temp_cfg,
notify_handler,
)?))
}
}
impl<'a> Package for TarGzPackage<'a> {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.0.contains(component, short_name)
}
fn install<'b>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'b>,
) -> Result<Transaction<'b>> {
self.0.install(target, component, short_name, tx)
}
fn components(&self) -> Vec<String> {
self.0.components()
}
}
#[derive(Debug)]
pub struct TarXzPackage<'a>(TarPackage<'a>);
impl<'a> TarXzPackage<'a> {
pub fn new<R: Read>(
stream: R,
temp_cfg: &'a temp::Cfg,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<Self> {
let stream = xz2::read::XzDecoder::new(stream);
Ok(TarXzPackage(TarPackage::new(
stream,
temp_cfg,
notify_handler,
)?))
}
}
impl<'a> Package for TarXzPackage<'a> {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.0.contains(component, short_name)
}
fn install<'b>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'b>,
) -> Result<Transaction<'b>> {
self.0.install(target, component, short_name, tx)
}
fn components(&self) -> Vec<String> {
self.0.components()
}
}
dist: Make use of effective-limits be reliant on non-bsd
Signed-off-by: Daniel Silverstone <3cc17a76d780e0cd3d87bad586a1c53967be5625@digital-scurf.org>
//! An interpreter for the rust-installer package format. Responsible
//! for installing from a directory or tarball to an installation
//! prefix, represented by a `Components` instance.
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::io::{self, ErrorKind as IOErrorKind, Read};
use std::iter::FromIterator;
use std::mem;
use std::path::{Path, PathBuf};
use tar::EntryType;
use crate::diskio::{get_executor, Executor, Item, Kind};
use crate::dist::component::components::*;
use crate::dist::component::transaction::*;
use crate::dist::temp;
use crate::errors::*;
use crate::process;
use crate::utils::notifications::Notification;
use crate::utils::utils;
/// The current metadata revision used by rust-installer
pub const INSTALLER_VERSION: &str = "3";
pub const VERSION_FILE: &str = "rust-installer-version";
pub trait Package: fmt::Debug {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool;
fn install<'a>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'a>,
) -> Result<Transaction<'a>>;
fn components(&self) -> Vec<String>;
}
#[derive(Debug)]
pub struct DirectoryPackage {
path: PathBuf,
components: HashSet<String>,
copy: bool,
}
impl DirectoryPackage {
pub fn new(path: PathBuf, copy: bool) -> Result<Self> {
validate_installer_version(&path)?;
let content = utils::read_file("package components", &path.join("components"))?;
let components = content
.lines()
.map(std::borrow::ToOwned::to_owned)
.collect();
Ok(Self {
path,
components,
copy,
})
}
}
fn validate_installer_version(path: &Path) -> Result<()> {
let file = utils::read_file("installer version", &path.join(VERSION_FILE))?;
let v = file.trim();
if v == INSTALLER_VERSION {
Ok(())
} else {
Err(ErrorKind::BadInstallerVersion(v.to_owned()).into())
}
}
impl Package for DirectoryPackage {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.components.contains(component)
|| if let Some(n) = short_name {
self.components.contains(n)
} else {
false
}
}
fn install<'a>(
&self,
target: &Components,
name: &str,
short_name: Option<&str>,
tx: Transaction<'a>,
) -> Result<Transaction<'a>> {
let actual_name = if self.components.contains(name) {
name
} else if let Some(n) = short_name {
n
} else {
name
};
let root = self.path.join(actual_name);
let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?;
let mut builder = target.add(name, tx);
for l in manifest.lines() {
let part = ComponentPart::decode(l)
.ok_or_else(|| ErrorKind::CorruptComponent(name.to_owned()))?;
let path = part.1;
let src_path = root.join(&path);
match &*part.0 {
"file" => {
if self.copy {
builder.copy_file(path.clone(), &src_path)?
} else {
builder.move_file(path.clone(), &src_path)?
}
}
"dir" => {
if self.copy {
builder.copy_dir(path.clone(), &src_path)?
} else {
builder.move_dir(path.clone(), &src_path)?
}
}
_ => return Err(ErrorKind::CorruptComponent(name.to_owned()).into()),
}
}
let tx = builder.finish()?;
Ok(tx)
}
fn components(&self) -> Vec<String> {
self.components.iter().cloned().collect()
}
}
#[derive(Debug)]
pub struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>);
impl<'a> TarPackage<'a> {
pub fn new<R: Read>(
stream: R,
temp_cfg: &'a temp::Cfg,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<Self> {
let temp_dir = temp_cfg.new_directory()?;
let mut archive = tar::Archive::new(stream);
// The rust-installer packages unpack to a directory called
// $pkgname-$version-$target. Skip that directory when
// unpacking.
unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler)?;
Ok(TarPackage(
DirectoryPackage::new(temp_dir.to_owned(), false)?,
temp_dir,
))
}
}
struct MemoryBudget {
limit: usize,
used: usize,
}
// Probably this should live in diskio but ¯\_(ツ)_/¯
impl MemoryBudget {
fn new(
max_file_size: usize,
effective_max_ram: Option<usize>,
notify_handler: Option<&dyn Fn(Notification<'_>)>,
) -> Self {
const DEFAULT_UNPACK_RAM_MAX: usize = 500 * 1024 * 1024;
const RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS: usize = 100 * 1024 * 1024;
let default_max_unpack_ram = if let Some(effective_max_ram) = effective_max_ram {
let ram_for_unpacking = effective_max_ram - RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS;
std::cmp::min(DEFAULT_UNPACK_RAM_MAX, ram_for_unpacking)
} else {
// Rustup does not know how much RAM the machine has: use the
// minimum known to work reliably.
DEFAULT_UNPACK_RAM_MAX
};
let unpack_ram = match process()
.var("RUSTUP_UNPACK_RAM")
.ok()
.and_then(|budget_str| budget_str.parse::<usize>().ok())
{
// Note: In future we may want to add a warning or even an override if a user
// supplied budget is larger than effective_max_ram.
Some(budget) => budget,
None => {
if let Some(h) = notify_handler {
h(Notification::SetDefaultBufferSize(default_max_unpack_ram))
}
default_max_unpack_ram
}
};
if max_file_size > unpack_ram {
panic!("RUSTUP_UNPACK_RAM must be larger than {}", max_file_size);
}
Self {
limit: unpack_ram,
used: 0,
}
}
fn reclaim(&mut self, op: &Item) {
match &op.kind {
Kind::Directory => {}
Kind::File(content) => self.used -= content.len(),
};
}
fn claim(&mut self, op: &Item) {
match &op.kind {
Kind::Directory => {}
Kind::File(content) => self.used += content.len(),
};
}
fn available(&self) -> usize {
self.limit - self.used
}
}
/// Handle the async result of io operations
/// Replaces op.result with Ok(())
fn filter_result(op: &mut Item) -> io::Result<()> {
let result = mem::replace(&mut op.result, Ok(()));
match result {
Ok(_) => Ok(()),
Err(e) => match e.kind() {
IOErrorKind::AlreadyExists => {
// mkdir of e.g. ~/.rustup already existing is just fine;
// for others it would be better to know whether it is
// expected to exist or not -so put a flag in the state.
if let Kind::Directory = op.kind {
Ok(())
} else {
Err(e)
}
}
_ => Err(e),
},
}
}
/// Dequeue the children of directories queued up waiting for the directory to
/// be created.
///
/// Currently the volume of queued items does not count as backpressure against
/// the main tar extraction process.
fn trigger_children(
io_executor: &mut dyn Executor,
directories: &mut HashMap<PathBuf, DirStatus>,
budget: &mut MemoryBudget,
item: Item,
) -> Result<usize> {
let mut result = 0;
if let Kind::Directory = item.kind {
let mut pending = Vec::new();
directories
.entry(item.full_path)
.and_modify(|status| match status {
DirStatus::Exists => unreachable!(),
DirStatus::Pending(pending_inner) => {
pending.append(pending_inner);
*status = DirStatus::Exists;
}
})
.or_insert_with(|| unreachable!());
result += pending.len();
for pending_item in pending.into_iter() {
for mut item in Vec::from_iter(io_executor.execute(pending_item)) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
result += trigger_children(io_executor, directories, budget, item)?;
}
}
};
Ok(result)
}
/// What is the status of this directory ?
enum DirStatus {
Exists,
Pending(Vec<Item>),
}
fn unpack_without_first_dir<'a, R: Read>(
archive: &mut tar::Archive<R>,
path: &Path,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<()> {
let mut io_executor: Box<dyn Executor> = get_executor(notify_handler)?;
let entries = archive
.entries()
.chain_err(|| ErrorKind::ExtractingPackage)?;
const MAX_FILE_SIZE: u64 = 220_000_000;
let effective_max_ram = {
cfg_if::cfg_if! {
if #[cfg(not(any(target_os="freebsd", target_os="netbsd")))] {
match effective_limits::memory_limit() {
Ok(ram) => Some(ram as usize),
Err(e) => {
if let Some(h) = notify_handler {
h(Notification::Error(e.to_string()))
}
None
}
}
} else {
None
}
}
};
let mut budget = MemoryBudget::new(MAX_FILE_SIZE as usize, effective_max_ram, notify_handler);
let mut directories: HashMap<PathBuf, DirStatus> = HashMap::new();
// Path is presumed to exist. Call it a precondition.
directories.insert(path.to_owned(), DirStatus::Exists);
'entries: for entry in entries {
// drain completed results to keep memory pressure low and respond
// rapidly to completed events even if we couldn't submit work (because
// our unpacked item is pending dequeue)
for mut item in Vec::from_iter(io_executor.completed()) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
let mut entry = entry.chain_err(|| ErrorKind::ExtractingPackage)?;
let relpath = {
let path = entry.path();
let path = path.chain_err(|| ErrorKind::ExtractingPackage)?;
path.into_owned()
};
// Reject path components that are not normal (.|..|/| etc)
for part in relpath.components() {
match part {
std::path::Component::Normal(_) => {}
_ => return Err(ErrorKind::BadPath(relpath).into()),
}
}
let mut components = relpath.components();
// Throw away the first path component: our root was supplied.
components.next();
let full_path = path.join(&components.as_path());
if full_path == path {
// The tmp dir code makes the root dir for us.
continue;
}
let size = entry.header().size()?;
if size > MAX_FILE_SIZE {
return Err(format!("File too big {} {}", relpath.display(), size).into());
}
while size > budget.available() as u64 {
for mut item in Vec::from_iter(io_executor.completed()) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
}
// Bail out if we get hard links, device nodes or any other unusual content
// - it is most likely an attack, as rusts cross-platform nature precludes
// such artifacts
let kind = entry.header().entry_type();
// https://github.com/rust-lang/rustup/issues/1140 and before that
// https://github.com/rust-lang/rust/issues/25479
// tl;dr: code got convoluted and we *may* have damaged tarballs out
// there.
// However the mandate we have is very simple: unpack as the current
// user with modes matching the tar contents. No documented tars with
// bad modes are in the bug tracker : the previous permission splatting
// code was inherited from interactions with sudo that are best
// addressed outside of rustup (run with an appropriate effective uid).
// THAT SAID: If regressions turn up immediately post release this code -
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=a8549057f0827bf3a068d8917256765a
// is a translation of the prior helper function into an in-iterator
// application.
let tar_mode = entry.header().mode().ok().unwrap();
// That said, the tarballs that are shipped way back have single-user
// permissions:
// -rwx------ rustbuild/rustbuild ..... release/test-release.sh
// so we should normalise the mode to match the previous behaviour users
// may be expecting where the above file would end up with mode 0o755
let u_mode = tar_mode & 0o700;
let g_mode = (u_mode & 0o0500) >> 3;
let o_mode = g_mode >> 3;
let mode = u_mode | g_mode | o_mode;
let mut item = match kind {
EntryType::Directory => {
directories.insert(full_path.to_owned(), DirStatus::Pending(Vec::new()));
Item::make_dir(full_path, mode)
}
EntryType::Regular => {
let mut v = Vec::with_capacity(size as usize);
entry.read_to_end(&mut v)?;
Item::write_file(full_path, v, mode)
}
_ => return Err(ErrorKind::UnsupportedKind(format!("{:?}", kind)).into()),
};
budget.claim(&item);
let item = loop {
// Create the full path to the entry if it does not exist already
if let Some(parent) = item.full_path.to_owned().parent() {
match directories.get_mut(parent) {
None => {
// Tar has item before containing directory
// Complain about this so we can see if these exist.
writeln!(
process().stderr(),
"Unexpected: missing parent '{}' for '{}'",
parent.display(),
entry.path()?.display()
)?;
directories.insert(parent.to_owned(), DirStatus::Pending(vec![item]));
item = Item::make_dir(parent.to_owned(), 0o755);
// Check the parent's parent
continue;
}
Some(DirStatus::Exists) => {
break item;
}
Some(DirStatus::Pending(pending)) => {
// Parent dir is being made, take next item from tar
pending.push(item);
continue 'entries;
}
}
} else {
// We should never see a path with no parent.
panic!();
}
};
for mut item in Vec::from_iter(io_executor.execute(item)) {
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
}
loop {
let mut triggered = 0;
for mut item in Vec::from_iter(io_executor.join()) {
// handle final IOs
// TODO capture metrics
budget.reclaim(&item);
filter_result(&mut item).chain_err(|| ErrorKind::ExtractingPackage)?;
triggered += trigger_children(&mut *io_executor, &mut directories, &mut budget, item)?;
}
if triggered == 0 {
// None of the IO submitted before the prior join triggered any new
// submissions
break;
}
}
Ok(())
}
impl<'a> Package for TarPackage<'a> {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.0.contains(component, short_name)
}
fn install<'b>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'b>,
) -> Result<Transaction<'b>> {
self.0.install(target, component, short_name, tx)
}
fn components(&self) -> Vec<String> {
self.0.components()
}
}
#[derive(Debug)]
pub struct TarGzPackage<'a>(TarPackage<'a>);
impl<'a> TarGzPackage<'a> {
pub fn new<R: Read>(
stream: R,
temp_cfg: &'a temp::Cfg,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<Self> {
let stream = flate2::read::GzDecoder::new(stream);
Ok(TarGzPackage(TarPackage::new(
stream,
temp_cfg,
notify_handler,
)?))
}
}
impl<'a> Package for TarGzPackage<'a> {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.0.contains(component, short_name)
}
fn install<'b>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'b>,
) -> Result<Transaction<'b>> {
self.0.install(target, component, short_name, tx)
}
fn components(&self) -> Vec<String> {
self.0.components()
}
}
#[derive(Debug)]
pub struct TarXzPackage<'a>(TarPackage<'a>);
impl<'a> TarXzPackage<'a> {
pub fn new<R: Read>(
stream: R,
temp_cfg: &'a temp::Cfg,
notify_handler: Option<&'a dyn Fn(Notification<'_>)>,
) -> Result<Self> {
let stream = xz2::read::XzDecoder::new(stream);
Ok(TarXzPackage(TarPackage::new(
stream,
temp_cfg,
notify_handler,
)?))
}
}
impl<'a> Package for TarXzPackage<'a> {
fn contains(&self, component: &str, short_name: Option<&str>) -> bool {
self.0.contains(component, short_name)
}
fn install<'b>(
&self,
target: &Components,
component: &str,
short_name: Option<&str>,
tx: Transaction<'b>,
) -> Result<Transaction<'b>> {
self.0.install(target, component, short_name, tx)
}
fn components(&self) -> Vec<String> {
self.0.components()
}
}
|
// Copyright (c) 2017 fd developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Integration tests for the CLI interface of fd.
extern crate regex;
mod testenv;
use testenv::TestEnv;
use regex::escape;
use std::fs;
use std::io::Write;
static DEFAULT_DIRS: &'static [&'static str] = &["one/two/three", "one/two/three/directory_foo"];
static DEFAULT_FILES: &'static [&'static str] = &[
"a.foo",
"one/b.foo",
"one/two/c.foo",
"one/two/C.Foo2",
"one/two/three/d.foo",
"fdignored.foo",
"gitignored.foo",
".hidden.foo",
"e1 e2",
];
fn get_absolute_root_path(env: &TestEnv) -> String {
let path = env.test_root()
.canonicalize()
.expect("absolute path")
.to_str()
.expect("string")
.to_string();
#[cfg(windows)]
let path = path.trim_left_matches(r"\\?\").to_string();
path
}
#[cfg(test)]
fn get_test_env_with_abs_path(dirs: &[&'static str], files: &[&'static str]) -> (TestEnv, String) {
let env = TestEnv::new(dirs, files);
let root_path = get_absolute_root_path(&env);
(env, root_path)
}
/// Simple tests
#[test]
fn test_simple() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(&["a.foo"], "a.foo");
te.assert_output(&["b.foo"], "one/b.foo");
te.assert_output(&["d.foo"], "one/two/three/d.foo");
te.assert_output(
&["foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&[],
"a.foo
e1 e2
one
one/b.foo
one/two
one/two/c.foo
one/two/C.Foo2
one/two/three
one/two/three/d.foo
one/two/three/directory_foo
symlink",
);
}
/// Test multiple directory searches
#[test]
fn test_multi_file() {
let dirs = &["test1", "test2"];
let files = &["test1/a.foo", "test1/b.foo", "test2/a.foo"];
let te = TestEnv::new(dirs, files);
te.assert_output(
&["a.foo", "test1", "test2"],
"test1/a.foo
test2/a.foo",
);
te.assert_output(
&["", "test1", "test2"],
"test1/a.foo
test2/a.foo
test1/b.foo",
);
te.assert_output(&["a.foo", "test1"], "test1/a.foo");
te.assert_output(&["b.foo", "test1", "test2"], "test1/b.foo");
}
/// Explicit root path
#[test]
fn test_explicit_root_path() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["foo", "one"],
"one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["foo", "one/two/three"],
"one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output_subdirectory(
"one/two",
&["foo", "../../"],
"../../a.foo
../../one/b.foo
../../one/two/c.foo
../../one/two/C.Foo2
../../one/two/three/d.foo
../../one/two/three/directory_foo",
);
te.assert_output_subdirectory(
"one/two/three",
&["", ".."],
"../c.foo
../C.Foo2
../three
../three/d.foo
../three/directory_foo",
);
}
/// Regex searches
#[test]
fn test_regex_searches() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["[a-c].foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(
&["--case-sensitive", "[a-c].foo"],
"a.foo
one/b.foo
one/two/c.foo",
);
}
/// Smart case
#[test]
fn test_smart_case() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["c.foo"],
"one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(&["C.Foo"], "one/two/C.Foo2");
te.assert_output(&["Foo"], "one/two/C.Foo2");
// Only literal uppercase chars should trigger case sensitivity.
te.assert_output(
&["\\Ac"],
"one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(&["\\AC"], "one/two/C.Foo2");
}
/// Case sensitivity (--case-sensitive)
#[test]
fn test_case_sensitive() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(&["--case-sensitive", "c.foo"], "one/two/c.foo");
te.assert_output(&["--case-sensitive", "C.Foo"], "one/two/C.Foo2");
te.assert_output(
&["--ignore-case", "--case-sensitive", "C.Foo"],
"one/two/C.Foo2",
);
}
/// Case insensitivity (--ignore-case)
#[test]
fn test_case_insensitive() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--ignore-case", "C.Foo"],
"one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(
&["--case-sensitive", "--ignore-case", "C.Foo"],
"one/two/c.foo
one/two/C.Foo2",
);
}
/// Full path search (--full-path)
#[test]
fn test_full_path() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
let root = te.system_root();
let prefix = escape(&root.to_string_lossy());
te.assert_output(
&[
"--full-path",
&format!("^{prefix}.*three.*foo$", prefix = prefix),
],
"one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Hidden files (--hidden)
#[test]
fn test_hidden() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--hidden", "foo"],
".hidden.foo
a.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Ignored files (--no-ignore)
#[test]
fn test_no_ignore() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--no-ignore", "foo"],
"a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["--hidden", "--no-ignore", "foo"],
".hidden.foo
a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Custom ignore files
#[test]
fn test_custom_ignore() {
let files = &[
"ignored-by-nothing",
"ignored-by-fdignore",
"ignored-by-gitignore",
"ignored-by-both",
];
let te = TestEnv::new(&[], files);
fs::File::create(te.test_root().join(".fdignore"))
.unwrap()
.write_all(b"ignored-by-fdignore\nignored-by-both")
.unwrap();
fs::File::create(te.test_root().join(".gitignore"))
.unwrap()
.write_all(b"ignored-by-gitignore\nignored-by-both")
.unwrap();
te.assert_output(&["ignored"], "ignored-by-nothing");
te.assert_output(
&["--no-ignore-vcs", "ignored"],
"ignored-by-nothing
ignored-by-gitignore",
);
te.assert_output(
&["--no-ignore", "ignored"],
"ignored-by-nothing
ignored-by-fdignore
ignored-by-gitignore
ignored-by-both",
);
}
/// Precedence of custom ignore files
#[test]
fn test_custom_ignore_precedence() {
let dirs = &["inner"];
let files = &["inner/foo"];
let te = TestEnv::new(dirs, files);
// Ignore 'foo' via .gitignore
fs::File::create(te.test_root().join("inner/.gitignore"))
.unwrap()
.write_all(b"foo")
.unwrap();
// Whitelist 'foo' via .fdignore
fs::File::create(te.test_root().join(".fdignore"))
.unwrap()
.write_all(b"!foo")
.unwrap();
te.assert_output(&["foo"], "inner/foo");
te.assert_output(&["--no-ignore-vcs", "foo"], "inner/foo");
te.assert_output(&["--no-ignore", "foo"], "inner/foo");
}
/// VCS ignored files (--no-ignore-vcs)
#[test]
fn test_no_ignore_vcs() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--no-ignore-vcs", "foo"],
"a.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Ignored files with ripgrep aliases (-u / -uu)
#[test]
fn test_no_ignore_aliases() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["-u", "foo"],
"a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["-uu", "foo"],
".hidden.foo
a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Symlinks (--follow)
#[test]
fn test_follow() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--follow", "c.foo"],
"one/two/c.foo
one/two/C.Foo2
symlink/c.foo
symlink/C.Foo2",
);
}
/// Null separator (--print0)
#[test]
fn test_print0() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--print0", "foo"],
"a.fooNULL
one/b.fooNULL
one/two/C.Foo2NULL
one/two/c.fooNULL
one/two/three/d.fooNULL
one/two/three/directory_fooNULL",
);
}
/// Maximum depth (--max-depth)
#[test]
fn test_max_depth() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--max-depth", "3"],
"a.foo
e1 e2
one
one/b.foo
one/two
one/two/c.foo
one/two/C.Foo2
one/two/three
symlink",
);
te.assert_output(
&["--max-depth", "2"],
"a.foo
e1 e2
one
one/b.foo
one/two
symlink",
);
te.assert_output(
&["--max-depth", "1"],
"a.foo
e1 e2
one
symlink",
);
}
/// Absolute paths (--absolute-path)
#[test]
fn test_absolute_path() {
let (te, abs_path) = get_test_env_with_abs_path(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--absolute-path"],
&format!(
"{abs_path}/a.foo
{abs_path}/e1 e2
{abs_path}/one
{abs_path}/one/b.foo
{abs_path}/one/two
{abs_path}/one/two/c.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/three
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo
{abs_path}/symlink",
abs_path = &abs_path
),
);
te.assert_output(
&["--absolute-path", "foo"],
&format!(
"{abs_path}/a.foo
{abs_path}/one/b.foo
{abs_path}/one/two/c.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo",
abs_path = &abs_path
),
);
te.assert_output(
&["foo", &abs_path],
&format!(
"{abs_path}/a.foo
{abs_path}/one/b.foo
{abs_path}/one/two/c.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo",
abs_path = &abs_path
),
);
}
/// File type filter (--type)
#[test]
fn test_type() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--type", "f"],
"a.foo
e1 e2
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo",
);
te.assert_output(&["--type", "f", "e1"], "e1 e2");
te.assert_output(
&["--type", "d"],
"one
one/two
one/two/three
one/two/three/directory_foo",
);
te.assert_output(
&["--type", "d", "--type", "l"],
"one
one/two
one/two/three
one/two/three/directory_foo
symlink",
);
te.assert_output(&["--type", "l"], "symlink");
}
/// File extension (--extension)
#[test]
fn test_extension() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--extension", "foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/three/d.foo",
);
te.assert_output(
&["--extension", ".foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/three/d.foo",
);
te.assert_output(
&["--extension", ".foo", "--extension", "foo2"],
"a.foo
one/b.foo
one/two/c.foo
one/two/three/d.foo
one/two/C.Foo2",
);
te.assert_output(&["--extension", ".foo", "a"], "a.foo");
te.assert_output(&["--extension", "foo2"], "one/two/C.Foo2");
let te2 = TestEnv::new(&[], &["spam.bar.baz", "egg.bar.baz", "yolk.bar.baz.sig"]);
te2.assert_output(
&["--extension", ".bar.baz"],
"spam.bar.baz
egg.bar.baz",
);
te2.assert_output(&["--extension", "sig"], "yolk.bar.baz.sig");
te2.assert_output(&["--extension", "bar.baz.sig"], "yolk.bar.baz.sig");
let te3 = TestEnv::new(&[], &["latin1.e\u{301}xt", "smiley.☻"]);
te3.assert_output(&["--extension", "☻"], "smiley.☻");
te3.assert_output(&["--extension", ".e\u{301}xt"], "latin1.e\u{301}xt");
let te4 = TestEnv::new(&[], &[".hidden", "test.hidden"]);
te4.assert_output(&["--hidden", "--extension", ".hidden"], "test.hidden");
}
/// Symlinks misc
#[test]
fn test_symlink() {
let (te, abs_path) = get_test_env_with_abs_path(DEFAULT_DIRS, DEFAULT_FILES);
// From: http://pubs.opengroup.org/onlinepubs/9699919799/functions/getcwd.html
// The getcwd() function shall place an absolute pathname of the current working directory in
// the array pointed to by buf, and return buf. The pathname shall contain no components that
// are dot or dot-dot, or are symbolic links.
//
// Key points:
// 1. The path of the current working directory of a Unix process cannot contain symlinks.
// 2. The path of the current working directory of a Windows process can contain symlinks.
//
// More:
// 1. On Windows, symlinks are resolved after the ".." component.
// 2. On Unix, symlinks are resolved immediately as encountered.
let parent_parent = if cfg!(windows) { ".." } else { "../.." };
te.assert_output_subdirectory(
"symlink",
&["", parent_parent],
&format!(
"{dir}/a.foo
{dir}/e1 e2
{dir}/one
{dir}/one/b.foo
{dir}/one/two
{dir}/one/two/c.foo
{dir}/one/two/C.Foo2
{dir}/one/two/three
{dir}/one/two/three/d.foo
{dir}/one/two/three/directory_foo
{dir}/symlink",
dir = &parent_parent
),
);
te.assert_output_subdirectory(
"symlink",
&["--absolute-path"],
&format!(
"{abs_path}/{dir}/c.foo
{abs_path}/{dir}/C.Foo2
{abs_path}/{dir}/three
{abs_path}/{dir}/three/d.foo
{abs_path}/{dir}/three/directory_foo",
dir = if cfg!(windows) { "symlink" } else { "one/two" },
abs_path = &abs_path
),
);
te.assert_output(
&["", &format!("{abs_path}/symlink", abs_path = abs_path)],
&format!(
"{abs_path}/symlink/c.foo
{abs_path}/symlink/C.Foo2
{abs_path}/symlink/three
{abs_path}/symlink/three/d.foo
{abs_path}/symlink/three/directory_foo",
abs_path = &abs_path
),
);
let root = te.system_root();
let prefix = escape(&root.to_string_lossy());
te.assert_output_subdirectory(
"symlink",
&[
"--absolute-path",
"--full-path",
&format!("^{prefix}.*three", prefix = prefix),
],
&format!(
"{abs_path}/{dir}/three
{abs_path}/{dir}/three/d.foo
{abs_path}/{dir}/three/directory_foo",
dir = if cfg!(windows) { "symlink" } else { "one/two" },
abs_path = &abs_path
),
);
te.assert_output(
&[
"--full-path",
&format!("^{prefix}.*symlink.*three", prefix = prefix),
&format!("{abs_path}/symlink", abs_path = abs_path),
],
&format!(
"{abs_path}/symlink/three
{abs_path}/symlink/three/d.foo
{abs_path}/symlink/three/directory_foo",
abs_path = &abs_path
),
);
}
/// Exclude patterns (--exclude)
#[test]
fn test_excludes() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--exclude", "*.foo"],
"one
one/two
one/two/C.Foo2
one/two/three
one/two/three/directory_foo
e1 e2
symlink",
);
te.assert_output(
&["--exclude", "*.foo", "--exclude", "*.Foo2"],
"one
one/two
one/two/three
one/two/three/directory_foo
e1 e2
symlink",
);
te.assert_output(
&["--exclude", "*.foo", "--exclude", "*.Foo2", "foo"],
"one/two/three/directory_foo",
);
te.assert_output(
&["--exclude", "one/two", "foo"],
"a.foo
one/b.foo",
);
te.assert_output(
&["--exclude", "one/**/*.foo"],
"a.foo
e1 e2
one
one/two
one/two/C.Foo2
one/two/three
one/two/three/directory_foo
symlink",
);
}
/// Shell script execution (--exec)
#[test]
fn test_exec() {
assert_exec_output("--exec");
}
/// Shell script execution using -exec
#[test]
fn test_exec_substitution() {
assert_exec_output("-exec");
}
// Shell script execution using -x
#[test]
fn test_exec_short_arg() {
assert_exec_output("-x");
}
#[cfg(test)]
fn assert_exec_output(exec_style: &str) {
let (te, abs_path) = get_test_env_with_abs_path(DEFAULT_DIRS, DEFAULT_FILES);
// TODO Windows tests: D:file.txt \file.txt \\server\share\file.txt ...
if !cfg!(windows) {
te.assert_output(
&["--absolute-path", "foo", exec_style, "echo"],
&format!(
"{abs_path}/a.foo
{abs_path}/one/b.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/c.foo
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo",
abs_path = &abs_path
),
);
te.assert_output(
&["foo", exec_style, "echo", "{}"],
"a.foo
one/b.foo
one/two/C.Foo2
one/two/c.foo
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{.}"],
"a
one/b
one/two/C
one/two/c
one/two/three/d
one/two/three/directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{/}"],
"a.foo
b.foo
C.Foo2
c.foo
d.foo
directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{/.}"],
"a
b
C
c
d
directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{//}"],
".
one
one/two
one/two
one/two/three
one/two/three",
);
te.assert_output(&["e1", exec_style, "printf", "%s.%s\n"], "e1 e2.");
}
}
/// Literal search (--fixed-strings)
#[test]
fn test_fixed_strings() {
let dirs = &["test1", "test2"];
let files = &["test1/a.foo", "test1/a_foo", "test2/Download (1).tar.gz"];
let te = TestEnv::new(dirs, files);
// Regex search, dot is treated as "any character"
te.assert_output(
&["a.foo"],
"test1/a.foo
test1/a_foo",
);
// Literal search, dot is treated as character
te.assert_output(&["--fixed-strings", "a.foo"], "test1/a.foo");
// Regex search, parens are treated as group
te.assert_output(&["download (1)"], "");
// Literal search, parens are treated as characters
te.assert_output(
&["--fixed-strings", "download (1)"],
"test2/Download (1).tar.gz",
);
// Combine with --case-sensitive
te.assert_output(&["--fixed-strings", "--case-sensitive", "download (1)"], "");
}
/// Filenames with invalid UTF-8 sequences
#[cfg(target_os = "linux")]
#[test]
fn test_invalid_utf8() {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let dirs = &["test1"];
let files = &[];
let te = TestEnv::new(dirs, files);
fs::File::create(
te.test_root()
.join(OsStr::from_bytes(b"test1/test_\xFEinvalid.txt")),
).unwrap();
te.assert_output(&["", "test1/"], "test1/test_�invalid.txt");
te.assert_output(&["invalid", "test1/"], "test1/test_�invalid.txt");
// Should not be found under a different extension
te.assert_output(&["-e", "zip", "", "test1/"], "");
}
Unit test for --type x
// Copyright (c) 2017 fd developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Integration tests for the CLI interface of fd.
extern crate regex;
mod testenv;
use testenv::TestEnv;
use regex::escape;
use std::fs;
use std::io::Write;
static DEFAULT_DIRS: &'static [&'static str] = &["one/two/three", "one/two/three/directory_foo"];
static DEFAULT_FILES: &'static [&'static str] = &[
"a.foo",
"one/b.foo",
"one/two/c.foo",
"one/two/C.Foo2",
"one/two/three/d.foo",
"fdignored.foo",
"gitignored.foo",
".hidden.foo",
"e1 e2",
];
fn get_absolute_root_path(env: &TestEnv) -> String {
let path = env.test_root()
.canonicalize()
.expect("absolute path")
.to_str()
.expect("string")
.to_string();
#[cfg(windows)]
let path = path.trim_left_matches(r"\\?\").to_string();
path
}
#[cfg(test)]
fn get_test_env_with_abs_path(dirs: &[&'static str], files: &[&'static str]) -> (TestEnv, String) {
let env = TestEnv::new(dirs, files);
let root_path = get_absolute_root_path(&env);
(env, root_path)
}
/// Simple tests
#[test]
fn test_simple() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(&["a.foo"], "a.foo");
te.assert_output(&["b.foo"], "one/b.foo");
te.assert_output(&["d.foo"], "one/two/three/d.foo");
te.assert_output(
&["foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&[],
"a.foo
e1 e2
one
one/b.foo
one/two
one/two/c.foo
one/two/C.Foo2
one/two/three
one/two/three/d.foo
one/two/three/directory_foo
symlink",
);
}
/// Test multiple directory searches
#[test]
fn test_multi_file() {
let dirs = &["test1", "test2"];
let files = &["test1/a.foo", "test1/b.foo", "test2/a.foo"];
let te = TestEnv::new(dirs, files);
te.assert_output(
&["a.foo", "test1", "test2"],
"test1/a.foo
test2/a.foo",
);
te.assert_output(
&["", "test1", "test2"],
"test1/a.foo
test2/a.foo
test1/b.foo",
);
te.assert_output(&["a.foo", "test1"], "test1/a.foo");
te.assert_output(&["b.foo", "test1", "test2"], "test1/b.foo");
}
/// Explicit root path
#[test]
fn test_explicit_root_path() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["foo", "one"],
"one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["foo", "one/two/three"],
"one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output_subdirectory(
"one/two",
&["foo", "../../"],
"../../a.foo
../../one/b.foo
../../one/two/c.foo
../../one/two/C.Foo2
../../one/two/three/d.foo
../../one/two/three/directory_foo",
);
te.assert_output_subdirectory(
"one/two/three",
&["", ".."],
"../c.foo
../C.Foo2
../three
../three/d.foo
../three/directory_foo",
);
}
/// Regex searches
#[test]
fn test_regex_searches() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["[a-c].foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(
&["--case-sensitive", "[a-c].foo"],
"a.foo
one/b.foo
one/two/c.foo",
);
}
/// Smart case
#[test]
fn test_smart_case() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["c.foo"],
"one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(&["C.Foo"], "one/two/C.Foo2");
te.assert_output(&["Foo"], "one/two/C.Foo2");
// Only literal uppercase chars should trigger case sensitivity.
te.assert_output(
&["\\Ac"],
"one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(&["\\AC"], "one/two/C.Foo2");
}
/// Case sensitivity (--case-sensitive)
#[test]
fn test_case_sensitive() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(&["--case-sensitive", "c.foo"], "one/two/c.foo");
te.assert_output(&["--case-sensitive", "C.Foo"], "one/two/C.Foo2");
te.assert_output(
&["--ignore-case", "--case-sensitive", "C.Foo"],
"one/two/C.Foo2",
);
}
/// Case insensitivity (--ignore-case)
#[test]
fn test_case_insensitive() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--ignore-case", "C.Foo"],
"one/two/c.foo
one/two/C.Foo2",
);
te.assert_output(
&["--case-sensitive", "--ignore-case", "C.Foo"],
"one/two/c.foo
one/two/C.Foo2",
);
}
/// Full path search (--full-path)
#[test]
fn test_full_path() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
let root = te.system_root();
let prefix = escape(&root.to_string_lossy());
te.assert_output(
&[
"--full-path",
&format!("^{prefix}.*three.*foo$", prefix = prefix),
],
"one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Hidden files (--hidden)
#[test]
fn test_hidden() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--hidden", "foo"],
".hidden.foo
a.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Ignored files (--no-ignore)
#[test]
fn test_no_ignore() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--no-ignore", "foo"],
"a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["--hidden", "--no-ignore", "foo"],
".hidden.foo
a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Custom ignore files
#[test]
fn test_custom_ignore() {
let files = &[
"ignored-by-nothing",
"ignored-by-fdignore",
"ignored-by-gitignore",
"ignored-by-both",
];
let te = TestEnv::new(&[], files);
fs::File::create(te.test_root().join(".fdignore"))
.unwrap()
.write_all(b"ignored-by-fdignore\nignored-by-both")
.unwrap();
fs::File::create(te.test_root().join(".gitignore"))
.unwrap()
.write_all(b"ignored-by-gitignore\nignored-by-both")
.unwrap();
te.assert_output(&["ignored"], "ignored-by-nothing");
te.assert_output(
&["--no-ignore-vcs", "ignored"],
"ignored-by-nothing
ignored-by-gitignore",
);
te.assert_output(
&["--no-ignore", "ignored"],
"ignored-by-nothing
ignored-by-fdignore
ignored-by-gitignore
ignored-by-both",
);
}
/// Precedence of custom ignore files
#[test]
fn test_custom_ignore_precedence() {
let dirs = &["inner"];
let files = &["inner/foo"];
let te = TestEnv::new(dirs, files);
// Ignore 'foo' via .gitignore
fs::File::create(te.test_root().join("inner/.gitignore"))
.unwrap()
.write_all(b"foo")
.unwrap();
// Whitelist 'foo' via .fdignore
fs::File::create(te.test_root().join(".fdignore"))
.unwrap()
.write_all(b"!foo")
.unwrap();
te.assert_output(&["foo"], "inner/foo");
te.assert_output(&["--no-ignore-vcs", "foo"], "inner/foo");
te.assert_output(&["--no-ignore", "foo"], "inner/foo");
}
/// VCS ignored files (--no-ignore-vcs)
#[test]
fn test_no_ignore_vcs() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--no-ignore-vcs", "foo"],
"a.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Ignored files with ripgrep aliases (-u / -uu)
#[test]
fn test_no_ignore_aliases() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["-u", "foo"],
"a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["-uu", "foo"],
".hidden.foo
a.foo
fdignored.foo
gitignored.foo
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo
one/two/three/directory_foo",
);
}
/// Symlinks (--follow)
#[test]
fn test_follow() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--follow", "c.foo"],
"one/two/c.foo
one/two/C.Foo2
symlink/c.foo
symlink/C.Foo2",
);
}
/// Null separator (--print0)
#[test]
fn test_print0() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--print0", "foo"],
"a.fooNULL
one/b.fooNULL
one/two/C.Foo2NULL
one/two/c.fooNULL
one/two/three/d.fooNULL
one/two/three/directory_fooNULL",
);
}
/// Maximum depth (--max-depth)
#[test]
fn test_max_depth() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--max-depth", "3"],
"a.foo
e1 e2
one
one/b.foo
one/two
one/two/c.foo
one/two/C.Foo2
one/two/three
symlink",
);
te.assert_output(
&["--max-depth", "2"],
"a.foo
e1 e2
one
one/b.foo
one/two
symlink",
);
te.assert_output(
&["--max-depth", "1"],
"a.foo
e1 e2
one
symlink",
);
}
/// Absolute paths (--absolute-path)
#[test]
fn test_absolute_path() {
let (te, abs_path) = get_test_env_with_abs_path(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--absolute-path"],
&format!(
"{abs_path}/a.foo
{abs_path}/e1 e2
{abs_path}/one
{abs_path}/one/b.foo
{abs_path}/one/two
{abs_path}/one/two/c.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/three
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo
{abs_path}/symlink",
abs_path = &abs_path
),
);
te.assert_output(
&["--absolute-path", "foo"],
&format!(
"{abs_path}/a.foo
{abs_path}/one/b.foo
{abs_path}/one/two/c.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo",
abs_path = &abs_path
),
);
te.assert_output(
&["foo", &abs_path],
&format!(
"{abs_path}/a.foo
{abs_path}/one/b.foo
{abs_path}/one/two/c.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo",
abs_path = &abs_path
),
);
}
/// File type filter (--type)
#[test]
fn test_type() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--type", "f"],
"a.foo
e1 e2
one/b.foo
one/two/c.foo
one/two/C.Foo2
one/two/three/d.foo",
);
te.assert_output(&["--type", "f", "e1"], "e1 e2");
te.assert_output(
&["--type", "d"],
"one
one/two
one/two/three
one/two/three/directory_foo",
);
te.assert_output(
&["--type", "d", "--type", "l"],
"one
one/two
one/two/three
one/two/three/directory_foo
symlink",
);
te.assert_output(&["--type", "l"], "symlink");
}
/// Test `--type executable`
#[cfg(unix)]
#[test]
fn test_type_executable() {
use std::os::unix::fs::OpenOptionsExt;
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
fs::OpenOptions::new()
.create(true)
.write(true)
.mode(0o777)
.open(te.test_root().join("executable-file.sh"))
.unwrap();
te.assert_output(&["--type", "executable"], "executable-file.sh");
te.assert_output(
&["--type", "executable", "--type", "directory"],
"executable-file.sh
one
one/two
one/two/three
one/two/three/directory_foo",
);
}
/// File extension (--extension)
#[test]
fn test_extension() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--extension", "foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/three/d.foo",
);
te.assert_output(
&["--extension", ".foo"],
"a.foo
one/b.foo
one/two/c.foo
one/two/three/d.foo",
);
te.assert_output(
&["--extension", ".foo", "--extension", "foo2"],
"a.foo
one/b.foo
one/two/c.foo
one/two/three/d.foo
one/two/C.Foo2",
);
te.assert_output(&["--extension", ".foo", "a"], "a.foo");
te.assert_output(&["--extension", "foo2"], "one/two/C.Foo2");
let te2 = TestEnv::new(&[], &["spam.bar.baz", "egg.bar.baz", "yolk.bar.baz.sig"]);
te2.assert_output(
&["--extension", ".bar.baz"],
"spam.bar.baz
egg.bar.baz",
);
te2.assert_output(&["--extension", "sig"], "yolk.bar.baz.sig");
te2.assert_output(&["--extension", "bar.baz.sig"], "yolk.bar.baz.sig");
let te3 = TestEnv::new(&[], &["latin1.e\u{301}xt", "smiley.☻"]);
te3.assert_output(&["--extension", "☻"], "smiley.☻");
te3.assert_output(&["--extension", ".e\u{301}xt"], "latin1.e\u{301}xt");
let te4 = TestEnv::new(&[], &[".hidden", "test.hidden"]);
te4.assert_output(&["--hidden", "--extension", ".hidden"], "test.hidden");
}
/// Symlinks misc
#[test]
fn test_symlink() {
let (te, abs_path) = get_test_env_with_abs_path(DEFAULT_DIRS, DEFAULT_FILES);
// From: http://pubs.opengroup.org/onlinepubs/9699919799/functions/getcwd.html
// The getcwd() function shall place an absolute pathname of the current working directory in
// the array pointed to by buf, and return buf. The pathname shall contain no components that
// are dot or dot-dot, or are symbolic links.
//
// Key points:
// 1. The path of the current working directory of a Unix process cannot contain symlinks.
// 2. The path of the current working directory of a Windows process can contain symlinks.
//
// More:
// 1. On Windows, symlinks are resolved after the ".." component.
// 2. On Unix, symlinks are resolved immediately as encountered.
let parent_parent = if cfg!(windows) { ".." } else { "../.." };
te.assert_output_subdirectory(
"symlink",
&["", parent_parent],
&format!(
"{dir}/a.foo
{dir}/e1 e2
{dir}/one
{dir}/one/b.foo
{dir}/one/two
{dir}/one/two/c.foo
{dir}/one/two/C.Foo2
{dir}/one/two/three
{dir}/one/two/three/d.foo
{dir}/one/two/three/directory_foo
{dir}/symlink",
dir = &parent_parent
),
);
te.assert_output_subdirectory(
"symlink",
&["--absolute-path"],
&format!(
"{abs_path}/{dir}/c.foo
{abs_path}/{dir}/C.Foo2
{abs_path}/{dir}/three
{abs_path}/{dir}/three/d.foo
{abs_path}/{dir}/three/directory_foo",
dir = if cfg!(windows) { "symlink" } else { "one/two" },
abs_path = &abs_path
),
);
te.assert_output(
&["", &format!("{abs_path}/symlink", abs_path = abs_path)],
&format!(
"{abs_path}/symlink/c.foo
{abs_path}/symlink/C.Foo2
{abs_path}/symlink/three
{abs_path}/symlink/three/d.foo
{abs_path}/symlink/three/directory_foo",
abs_path = &abs_path
),
);
let root = te.system_root();
let prefix = escape(&root.to_string_lossy());
te.assert_output_subdirectory(
"symlink",
&[
"--absolute-path",
"--full-path",
&format!("^{prefix}.*three", prefix = prefix),
],
&format!(
"{abs_path}/{dir}/three
{abs_path}/{dir}/three/d.foo
{abs_path}/{dir}/three/directory_foo",
dir = if cfg!(windows) { "symlink" } else { "one/two" },
abs_path = &abs_path
),
);
te.assert_output(
&[
"--full-path",
&format!("^{prefix}.*symlink.*three", prefix = prefix),
&format!("{abs_path}/symlink", abs_path = abs_path),
],
&format!(
"{abs_path}/symlink/three
{abs_path}/symlink/three/d.foo
{abs_path}/symlink/three/directory_foo",
abs_path = &abs_path
),
);
}
/// Exclude patterns (--exclude)
#[test]
fn test_excludes() {
let te = TestEnv::new(DEFAULT_DIRS, DEFAULT_FILES);
te.assert_output(
&["--exclude", "*.foo"],
"one
one/two
one/two/C.Foo2
one/two/three
one/two/three/directory_foo
e1 e2
symlink",
);
te.assert_output(
&["--exclude", "*.foo", "--exclude", "*.Foo2"],
"one
one/two
one/two/three
one/two/three/directory_foo
e1 e2
symlink",
);
te.assert_output(
&["--exclude", "*.foo", "--exclude", "*.Foo2", "foo"],
"one/two/three/directory_foo",
);
te.assert_output(
&["--exclude", "one/two", "foo"],
"a.foo
one/b.foo",
);
te.assert_output(
&["--exclude", "one/**/*.foo"],
"a.foo
e1 e2
one
one/two
one/two/C.Foo2
one/two/three
one/two/three/directory_foo
symlink",
);
}
/// Shell script execution (--exec)
#[test]
fn test_exec() {
assert_exec_output("--exec");
}
/// Shell script execution using -exec
#[test]
fn test_exec_substitution() {
assert_exec_output("-exec");
}
// Shell script execution using -x
#[test]
fn test_exec_short_arg() {
assert_exec_output("-x");
}
#[cfg(test)]
fn assert_exec_output(exec_style: &str) {
let (te, abs_path) = get_test_env_with_abs_path(DEFAULT_DIRS, DEFAULT_FILES);
// TODO Windows tests: D:file.txt \file.txt \\server\share\file.txt ...
if !cfg!(windows) {
te.assert_output(
&["--absolute-path", "foo", exec_style, "echo"],
&format!(
"{abs_path}/a.foo
{abs_path}/one/b.foo
{abs_path}/one/two/C.Foo2
{abs_path}/one/two/c.foo
{abs_path}/one/two/three/d.foo
{abs_path}/one/two/three/directory_foo",
abs_path = &abs_path
),
);
te.assert_output(
&["foo", exec_style, "echo", "{}"],
"a.foo
one/b.foo
one/two/C.Foo2
one/two/c.foo
one/two/three/d.foo
one/two/three/directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{.}"],
"a
one/b
one/two/C
one/two/c
one/two/three/d
one/two/three/directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{/}"],
"a.foo
b.foo
C.Foo2
c.foo
d.foo
directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{/.}"],
"a
b
C
c
d
directory_foo",
);
te.assert_output(
&["foo", exec_style, "echo", "{//}"],
".
one
one/two
one/two
one/two/three
one/two/three",
);
te.assert_output(&["e1", exec_style, "printf", "%s.%s\n"], "e1 e2.");
}
}
/// Literal search (--fixed-strings)
#[test]
fn test_fixed_strings() {
let dirs = &["test1", "test2"];
let files = &["test1/a.foo", "test1/a_foo", "test2/Download (1).tar.gz"];
let te = TestEnv::new(dirs, files);
// Regex search, dot is treated as "any character"
te.assert_output(
&["a.foo"],
"test1/a.foo
test1/a_foo",
);
// Literal search, dot is treated as character
te.assert_output(&["--fixed-strings", "a.foo"], "test1/a.foo");
// Regex search, parens are treated as group
te.assert_output(&["download (1)"], "");
// Literal search, parens are treated as characters
te.assert_output(
&["--fixed-strings", "download (1)"],
"test2/Download (1).tar.gz",
);
// Combine with --case-sensitive
te.assert_output(&["--fixed-strings", "--case-sensitive", "download (1)"], "");
}
/// Filenames with invalid UTF-8 sequences
#[cfg(target_os = "linux")]
#[test]
fn test_invalid_utf8() {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let dirs = &["test1"];
let files = &[];
let te = TestEnv::new(dirs, files);
fs::File::create(
te.test_root()
.join(OsStr::from_bytes(b"test1/test_\xFEinvalid.txt")),
).unwrap();
te.assert_output(&["", "test1/"], "test1/test_�invalid.txt");
te.assert_output(&["invalid", "test1/"], "test1/test_�invalid.txt");
// Should not be found under a different extension
te.assert_output(&["-e", "zip", "", "test1/"], "");
}
|
// We add this `extern crate` here to ensure that bindgen is up-to-date and
// rebuilt, even though we aren't using any of its types or functions here, only
// indirectly calling the executable.
#[allow(dead_code)]
extern crate bindgen;
use std::env;
use std::fs;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process;
const TEST_BATCH_DEFAULT_SIZE: usize = 16;
fn spawn_run_bindgen<P, Q, R>(run_bindgen: P, bindgen: Q, header: R) -> process::Child
where P: AsRef<Path>,
Q: AsRef<Path>,
R: AsRef<Path>
{
let run_bindgen = run_bindgen.as_ref();
let bindgen = bindgen.as_ref();
let header = header.as_ref();
// Convert from "tests/headers/foo.hpp" to "tests/expectations/foo.rs" by
// saving the filename, popping off "headers/foo.hpp", pushing
// "expectations", pushing the saved filename, and finally modifying the
// extension.
let mut expected = PathBuf::from(header);
let file_name = expected.file_name()
.expect("Should have filename")
.to_os_string();
expected.pop();
expected.pop();
expected.push("expectations");
expected.push(file_name);
expected.set_extension("rs");
let mut cmd = process::Command::new(run_bindgen);
cmd.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.arg(bindgen)
.arg(header)
.arg(expected);
if cfg!(feature = "llvm_stable") {
cmd.arg("--feature")
.arg("llvm_stable");
}
cmd.spawn()
.expect("Should be able to spawn run-bindgen.py child process")
}
#[test]
fn run_bindgen_tests() {
let crate_root = env::var("CARGO_MANIFEST_DIR")
.expect("should have CARGO_MANIFEST_DIR environment variable");
let mut run_bindgen = PathBuf::from(&crate_root);
run_bindgen.push("tests");
run_bindgen.push("tools");
run_bindgen.push("run-bindgen.py");
let mut bindgen = PathBuf::from(&crate_root);
bindgen.push("target");
bindgen.push("debug");
bindgen.push("bindgen");
if !bindgen.is_file() {
panic!("{} is not a file! Build bindgen before running tests.",
bindgen.display());
}
let mut headers_dir = PathBuf::from(&crate_root);
headers_dir.push("tests");
headers_dir.push("headers");
let entries = fs::read_dir(&headers_dir)
.expect("Should read directory")
.map(|result| result.expect("Should read directory entry"));
let tests = entries.filter(|entry| {
match entry.path().extension().map(|s| s.to_str()) {
Some(Some("h")) |
Some(Some("hpp")) => true,
_ => false,
}
}).collect::<Vec<_>>();
let batch_size = env::var("BINDGEN_TEST_BATCH_SIZE")
.ok()
.and_then(|x| x.parse::<usize>().ok())
.unwrap_or(TEST_BATCH_DEFAULT_SIZE);
// Spawn batch_size child to run in parallel
// and wait on all of them before processing the next batch
let children = tests.chunks(batch_size).map(|x| {
x.iter().map(|entry| {
let child = spawn_run_bindgen(run_bindgen.clone(), bindgen.clone(), entry.path());
(entry.path(), child)
}).collect::<Vec<_>>()
});
let failures: Vec<_> = children.flat_map(|x| {
x.into_iter().filter_map(|(path, mut child)| {
let passed = child.wait()
.expect("Should wait on child process")
.success();
if passed { None } else { Some((path, child)) }
})
})
.collect();
let num_failures = failures.len();
for (path, child) in failures {
println!("FAIL: {}", path.display());
let mut buf = String::new();
child.stdout
.expect("should have stdout piped")
.read_to_string(&mut buf)
.expect("should read child's stdout");
for line in buf.lines() {
println!("child stdout> {}", line);
}
child.stderr
.expect("should have stderr piped")
.read_to_string(&mut buf)
.expect("should read child's stderr");
for line in buf.lines() {
println!("child stderr> {}", line);
}
}
if num_failures > 0 {
panic!("{} test failures!", num_failures);
}
}
Auto merge of #174 - fitzgen:small-typos, r=emilio
Fix a small typo and expand batching comment
r? @emilio
// We add this `extern crate` here to ensure that bindgen is up-to-date and
// rebuilt, even though we aren't using any of its types or functions here, only
// indirectly calling the executable.
#[allow(dead_code)]
extern crate bindgen;
use std::env;
use std::fs;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process;
const TEST_BATCH_DEFAULT_SIZE: usize = 16;
fn spawn_run_bindgen<P, Q, R>(run_bindgen: P, bindgen: Q, header: R) -> process::Child
where P: AsRef<Path>,
Q: AsRef<Path>,
R: AsRef<Path>
{
let run_bindgen = run_bindgen.as_ref();
let bindgen = bindgen.as_ref();
let header = header.as_ref();
// Convert from "tests/headers/foo.hpp" to "tests/expectations/foo.rs" by
// saving the filename, popping off "headers/foo.hpp", pushing
// "expectations", pushing the saved filename, and finally modifying the
// extension.
let mut expected = PathBuf::from(header);
let file_name = expected.file_name()
.expect("Should have filename")
.to_os_string();
expected.pop();
expected.pop();
expected.push("expectations");
expected.push(file_name);
expected.set_extension("rs");
let mut cmd = process::Command::new(run_bindgen);
cmd.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.arg(bindgen)
.arg(header)
.arg(expected);
if cfg!(feature = "llvm_stable") {
cmd.arg("--feature")
.arg("llvm_stable");
}
cmd.spawn()
.expect("Should be able to spawn run-bindgen.py child process")
}
#[test]
fn run_bindgen_tests() {
let crate_root = env::var("CARGO_MANIFEST_DIR")
.expect("should have CARGO_MANIFEST_DIR environment variable");
let mut run_bindgen = PathBuf::from(&crate_root);
run_bindgen.push("tests");
run_bindgen.push("tools");
run_bindgen.push("run-bindgen.py");
let mut bindgen = PathBuf::from(&crate_root);
bindgen.push("target");
bindgen.push("debug");
bindgen.push("bindgen");
if !bindgen.is_file() {
panic!("{} is not a file! Build bindgen before running tests.",
bindgen.display());
}
let mut headers_dir = PathBuf::from(&crate_root);
headers_dir.push("tests");
headers_dir.push("headers");
let entries = fs::read_dir(&headers_dir)
.expect("Should read directory")
.map(|result| result.expect("Should read directory entry"));
let tests = entries.filter(|entry| {
match entry.path().extension().map(|s| s.to_str()) {
Some(Some("h")) |
Some(Some("hpp")) => true,
_ => false,
}
}).collect::<Vec<_>>();
let batch_size = env::var("BINDGEN_TEST_BATCH_SIZE")
.ok()
.and_then(|x| x.parse::<usize>().ok())
.unwrap_or(TEST_BATCH_DEFAULT_SIZE);
// Spawn `batch_size` children to run in parallel and wait on all of them
// before processing the next batch. This puts a limit on the resources
// consumed when testing, so that we don't overload the system.
let children = tests.chunks(batch_size).map(|x| {
x.iter().map(|entry| {
let child = spawn_run_bindgen(run_bindgen.clone(), bindgen.clone(), entry.path());
(entry.path(), child)
}).collect::<Vec<_>>()
});
let failures: Vec<_> = children.flat_map(|x| {
x.into_iter().filter_map(|(path, mut child)| {
let passed = child.wait()
.expect("Should wait on child process")
.success();
if passed { None } else { Some((path, child)) }
})
})
.collect();
let num_failures = failures.len();
for (path, child) in failures {
println!("FAIL: {}", path.display());
let mut buf = String::new();
child.stdout
.expect("should have stdout piped")
.read_to_string(&mut buf)
.expect("should read child's stdout");
for line in buf.lines() {
println!("child stdout> {}", line);
}
child.stderr
.expect("should have stderr piped")
.read_to_string(&mut buf)
.expect("should read child's stderr");
for line in buf.lines() {
println!("child stderr> {}", line);
}
}
if num_failures > 0 {
panic!("{} test failures!", num_failures);
}
}
|
extern crate arrayvec;
#[macro_use] extern crate matches;
use arrayvec::ArrayVec;
use arrayvec::ArrayString;
use std::mem;
use arrayvec::CapacityError;
use std::collections::HashMap;
#[test]
fn test_simple() {
use std::ops::Add;
let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![10]);
vec.push(vec![-1, 13, -2]);
for elt in &vec {
assert_eq!(elt.iter().fold(0, Add::add), 10);
}
let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add);
assert_eq!(sum_len, 8);
}
#[test]
fn test_capacity_left() {
let mut vec: ArrayVec<[usize; 4]> = ArrayVec::new();
assert_eq!(vec.remaining_capacity(), 4);
vec.push(1);
assert_eq!(vec.remaining_capacity(), 3);
vec.push(2);
assert_eq!(vec.remaining_capacity(), 2);
vec.push(3);
assert_eq!(vec.remaining_capacity(), 1);
vec.push(4);
assert_eq!(vec.remaining_capacity(), 0);
}
#[test]
fn test_extend_from_slice() {
let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new();
vec.try_extend_from_slice(&[1, 2, 3]);
assert_eq!(vec.len(), 3);
assert_eq!(&vec[..], &[1, 2, 3]);
assert_eq!(vec.pop(), Some(3));
assert_eq!(&vec[..], &[1, 2]);
}
#[test]
fn test_u16_index() {
const N: usize = 4096;
let mut vec: ArrayVec<[_; N]> = ArrayVec::new();
for _ in 0..N {
assert!(vec.try_push(1u8).is_ok());
}
assert!(vec.try_push(0).is_err());
assert_eq!(vec.len(), N);
}
#[test]
fn test_iter() {
let mut iter = ArrayVec::from([1, 2, 3]).into_iter();
assert_eq!(iter.size_hint(), (3, Some(3)));
assert_eq!(iter.next_back(), Some(3));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next_back(), Some(2));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next_back(), None);
}
#[test]
fn test_drop() {
use std::cell::Cell;
let flag = &Cell::new(0);
#[derive(Clone)]
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
}
assert_eq!(flag.get(), 2);
// test something with the nullable pointer optimization
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(vec![Bump(flag)]);
array.push(vec![Bump(flag), Bump(flag)]);
array.push(vec![]);
let push4 = array.try_push(vec![Bump(flag)]);
assert_eq!(flag.get(), 0);
drop(push4);
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 3);
}
assert_eq!(flag.get(), 4);
// test into_inner
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let inner = array.into_inner();
assert!(inner.is_ok());
assert_eq!(flag.get(), 0);
drop(inner);
assert_eq!(flag.get(), 3);
}
// test cloning into_iter
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let mut iter = array.into_iter();
assert_eq!(flag.get(), 0);
iter.next();
assert_eq!(flag.get(), 1);
let clone = iter.clone();
assert_eq!(flag.get(), 1);
drop(clone);
assert_eq!(flag.get(), 3);
drop(iter);
assert_eq!(flag.get(), 5);
}
}
#[test]
fn test_drop_panics() {
use std::cell::Cell;
use std::panic::catch_unwind;
use std::panic::AssertUnwindSafe;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
// Panic in the first drop
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
if n == 0 {
panic!("Panic in Bump's drop");
}
}
}
// check if rust is new enough
flag.set(0);
{
let array = vec![Bump(flag), Bump(flag)];
let res = catch_unwind(AssertUnwindSafe(|| {
drop(array);
}));
assert!(res.is_err());
}
if flag.get() != 2 {
println!("test_drop_panics: skip, this version of Rust doesn't continue in drop_in_place");
return;
}
flag.set(0);
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let res = catch_unwind(AssertUnwindSafe(|| {
drop(array);
}));
assert!(res.is_err());
}
// Check that all the elements drop, even if the first drop panics.
assert_eq!(flag.get(), 3);
flag.set(0);
{
let mut array = ArrayVec::<[Bump; 16]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let i = 2;
let tail_len = array.len() - i;
let res = catch_unwind(AssertUnwindSafe(|| {
array.truncate(i);
}));
assert!(res.is_err());
// Check that all the tail elements drop, even if the first drop panics.
assert_eq!(flag.get(), tail_len as i32);
}
}
#[test]
fn test_extend() {
let mut range = 0..10;
let mut array: ArrayVec<[_; 5]> = range.by_ref().collect();
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
assert_eq!(range.next(), Some(5));
array.extend(range.by_ref());
assert_eq!(range.next(), Some(6));
let mut array: ArrayVec<[_; 10]> = (0..3).collect();
assert_eq!(&array[..], &[0, 1, 2]);
array.extend(3..5);
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
}
#[test]
fn test_is_send_sync() {
let data = ArrayVec::<[Vec<i32>; 5]>::new();
&data as &Send;
&data as &Sync;
}
#[test]
fn test_compact_size() {
// Future rust will kill these drop flags!
// 4 elements size + 1 len + 1 enum tag + [1 drop flag]
type ByteArray = ArrayVec<[u8; 4]>;
println!("{}", mem::size_of::<ByteArray>());
assert!(mem::size_of::<ByteArray>() <= 8);
// 1 enum tag + 1 drop flag
type EmptyArray = ArrayVec<[u8; 0]>;
println!("{}", mem::size_of::<EmptyArray>());
assert!(mem::size_of::<EmptyArray>() <= 2);
// 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding
type QuadArray = ArrayVec<[u32; 3]>;
println!("{}", mem::size_of::<QuadArray>());
assert!(mem::size_of::<QuadArray>() <= 24);
}
#[test]
fn test_drain() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
}
#[test]
fn test_retain() {
let mut v = ArrayVec::from([0; 8]);
for (i, elt) in v.iter_mut().enumerate() {
*elt = i;
}
v.retain(|_| true);
assert_eq!(&v[..], &[0, 1, 2, 3, 4, 5, 6, 7]);
v.retain(|elt| {
*elt /= 2;
*elt % 2 == 0
});
assert_eq!(&v[..], &[0, 0, 2, 2]);
v.retain(|_| false);
assert_eq!(&v[..], &[]);
}
#[test]
#[should_panic]
fn test_drain_oob() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..8);
}
#[test]
#[should_panic]
fn test_drop_panic() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
}
#[test]
#[should_panic]
fn test_drop_panic_into_iter() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
array.into_iter();
}
#[test]
fn test_insert() {
let mut v = ArrayVec::from([]);
assert_matches!(v.try_push(1), Err(_));
let mut v = ArrayVec::<[_; 3]>::new();
v.insert(0, 0);
v.insert(1, 1);
//let ret1 = v.try_insert(3, 3);
//assert_matches!(ret1, Err(InsertError::OutOfBounds(_)));
assert_eq!(&v[..], &[0, 1]);
v.insert(2, 2);
assert_eq!(&v[..], &[0, 1, 2]);
let ret2 = v.try_insert(1, 9);
assert_eq!(&v[..], &[0, 1, 2]);
assert_matches!(ret2, Err(_));
let mut v = ArrayVec::from([2]);
assert_matches!(v.try_insert(0, 1), Err(CapacityError { .. }));
assert_matches!(v.try_insert(1, 1), Err(CapacityError { .. }));
//assert_matches!(v.try_insert(2, 1), Err(CapacityError { .. }));
}
#[test]
fn test_into_inner_1() {
let mut v = ArrayVec::from([1, 2]);
v.pop();
let u = v.clone();
assert_eq!(v.into_inner(), Err(u));
}
#[test]
fn test_into_inner_2() {
let mut v = ArrayVec::<[String; 4]>::new();
v.push("a".into());
v.push("b".into());
v.push("c".into());
v.push("d".into());
assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]);
}
#[test]
fn test_into_inner_3_() {
let mut v = ArrayVec::<[i32; 4]>::new();
v.extend(1..);
assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]);
}
#[test]
fn test_write() {
use std::io::Write;
let mut v = ArrayVec::<[_; 8]>::new();
write!(&mut v, "\x01\x02\x03").unwrap();
assert_eq!(&v[..], &[1, 2, 3]);
let r = v.write(&[9; 16]).unwrap();
assert_eq!(r, 5);
assert_eq!(&v[..], &[1, 2, 3, 9, 9, 9, 9, 9]);
}
#[test]
fn array_clone_from() {
let mut v = ArrayVec::<[_; 4]>::new();
v.push(vec![1, 2]);
v.push(vec![3, 4, 5]);
v.push(vec![6]);
let reference = v.to_vec();
let mut u = ArrayVec::<[_; 4]>::new();
u.clone_from(&v);
assert_eq!(&u, &reference[..]);
let mut t = ArrayVec::<[_; 4]>::new();
t.push(vec![97]);
t.push(vec![]);
t.push(vec![5, 6, 2]);
t.push(vec![2]);
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
t.clear();
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
}
#[test]
fn test_string() {
use std::error::Error;
let text = "hello world";
let mut s = ArrayString::<[_; 16]>::new();
s.try_push_str(text).unwrap();
assert_eq!(&s, text);
assert_eq!(text, &s);
// Make sure Hash / Eq / Borrow match up so we can use HashMap
let mut map = HashMap::new();
map.insert(s, 1);
assert_eq!(map[text], 1);
let mut t = ArrayString::<[_; 2]>::new();
assert!(t.try_push_str(text).is_err());
assert_eq!(&t, "");
t.push_str("ab");
// DerefMut
let tmut: &mut str = &mut t;
assert_eq!(tmut, "ab");
// Test Error trait / try
let t = || -> Result<(), Box<Error>> {
let mut t = ArrayString::<[_; 2]>::new();
try!(t.try_push_str(text));
Ok(())
}();
assert!(t.is_err());
}
#[test]
fn test_string_from() {
let text = "hello world";
// Test `from` constructor
let u = ArrayString::<[_; 11]>::from(text).unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_from_bytes() {
let text = "hello world";
let u = ArrayString::from_byte_string(b"hello world").unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_clone() {
let text = "hi";
let mut s = ArrayString::<[_; 4]>::new();
s.push_str("abcd");
let t = ArrayString::<[_; 4]>::from(text).unwrap();
s.clone_from(&t);
assert_eq!(&t, &s);
}
#[test]
fn test_string_push() {
let text = "abcαβγ";
let mut s = ArrayString::<[_; 8]>::new();
for c in text.chars() {
if let Err(_) = s.try_push(c) {
break;
}
}
assert_eq!("abcαβ", &s[..]);
s.push('x');
assert_eq!("abcαβx", &s[..]);
assert!(s.try_push('x').is_err());
}
#[test]
fn test_insert_at_length() {
let mut v = ArrayVec::<[_; 8]>::new();
let result1 = v.try_insert(0, "a");
let result2 = v.try_insert(1, "b");
assert!(result1.is_ok() && result2.is_ok());
assert_eq!(&v[..], &["a", "b"]);
}
#[should_panic]
#[test]
fn test_insert_out_of_bounds() {
let mut v = ArrayVec::<[_; 8]>::new();
let _ = v.try_insert(1, "test");
}
/*
* insert that pushes out the last
let mut u = ArrayVec::from([1, 2, 3, 4]);
let ret = u.try_insert(3, 99);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
let ret = u.try_insert(4, 77);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
*/
#[test]
fn test_drop_in_insert() {
use std::cell::Cell;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
flag.set(0);
{
let mut array = ArrayVec::<[_; 2]>::new();
array.push(Bump(flag));
array.insert(0, Bump(flag));
assert_eq!(flag.get(), 0);
let ret = array.try_insert(1, Bump(flag));
assert_eq!(flag.get(), 0);
assert_matches!(ret, Err(_));
drop(ret);
assert_eq!(flag.get(), 1);
}
assert_eq!(flag.get(), 3);
}
#[test]
fn test_pop_at() {
let mut v = ArrayVec::<[String; 4]>::new();
let s = String::from;
v.push(s("a"));
v.push(s("b"));
v.push(s("c"));
v.push(s("d"));
assert_eq!(v.pop_at(4), None);
assert_eq!(v.pop_at(1), Some(s("b")));
assert_eq!(v.pop_at(1), Some(s("c")));
assert_eq!(v.pop_at(2), None);
assert_eq!(&v[..], &["a", "d"]);
}
#[test]
fn test_sizes() {
let v = ArrayVec::from([0u8; 1 << 16]);
assert_eq!(vec![0u8; v.len()], &v[..]);
}
#[test]
fn test_default() {
use std::net;
let s: ArrayString<[u8; 4]> = Default::default();
// Something without `Default` implementation.
let v: ArrayVec<[net::TcpStream; 4]> = Default::default();
assert_eq!(s.len(), 0);
assert_eq!(v.len(), 0);
}
#[cfg(feature="array-sizes-33-128")]
#[test]
fn test_sizes_33_128() {
ArrayVec::from([0u8; 52]);
ArrayVec::from([0u8; 127]);
}
#[cfg(feature="array-sizes-129-255")]
#[test]
fn test_sizes_129_255() {
ArrayVec::from([0u8; 237]);
ArrayVec::from([0u8; 255]);
}
TEST: Add test for try_extend_from_slice's error
extern crate arrayvec;
#[macro_use] extern crate matches;
use arrayvec::ArrayVec;
use arrayvec::ArrayString;
use std::mem;
use arrayvec::CapacityError;
use std::collections::HashMap;
#[test]
fn test_simple() {
use std::ops::Add;
let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![10]);
vec.push(vec![-1, 13, -2]);
for elt in &vec {
assert_eq!(elt.iter().fold(0, Add::add), 10);
}
let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add);
assert_eq!(sum_len, 8);
}
#[test]
fn test_capacity_left() {
let mut vec: ArrayVec<[usize; 4]> = ArrayVec::new();
assert_eq!(vec.remaining_capacity(), 4);
vec.push(1);
assert_eq!(vec.remaining_capacity(), 3);
vec.push(2);
assert_eq!(vec.remaining_capacity(), 2);
vec.push(3);
assert_eq!(vec.remaining_capacity(), 1);
vec.push(4);
assert_eq!(vec.remaining_capacity(), 0);
}
#[test]
fn test_extend_from_slice() {
let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new();
vec.try_extend_from_slice(&[1, 2, 3]).unwrap();
assert_eq!(vec.len(), 3);
assert_eq!(&vec[..], &[1, 2, 3]);
assert_eq!(vec.pop(), Some(3));
assert_eq!(&vec[..], &[1, 2]);
}
#[test]
fn test_extend_from_slice_error() {
let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new();
vec.try_extend_from_slice(&[1, 2, 3]).unwrap();
let res = vec.try_extend_from_slice(&[0; 8]);
assert_matches!(res, Err(_));
let mut vec: ArrayVec<[usize; 0]> = ArrayVec::new();
let res = vec.try_extend_from_slice(&[0; 1]);
assert_matches!(res, Err(_));
}
#[test]
fn test_u16_index() {
const N: usize = 4096;
let mut vec: ArrayVec<[_; N]> = ArrayVec::new();
for _ in 0..N {
assert!(vec.try_push(1u8).is_ok());
}
assert!(vec.try_push(0).is_err());
assert_eq!(vec.len(), N);
}
#[test]
fn test_iter() {
let mut iter = ArrayVec::from([1, 2, 3]).into_iter();
assert_eq!(iter.size_hint(), (3, Some(3)));
assert_eq!(iter.next_back(), Some(3));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next_back(), Some(2));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next_back(), None);
}
#[test]
fn test_drop() {
use std::cell::Cell;
let flag = &Cell::new(0);
#[derive(Clone)]
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
}
assert_eq!(flag.get(), 2);
// test something with the nullable pointer optimization
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(vec![Bump(flag)]);
array.push(vec![Bump(flag), Bump(flag)]);
array.push(vec![]);
let push4 = array.try_push(vec![Bump(flag)]);
assert_eq!(flag.get(), 0);
drop(push4);
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 3);
}
assert_eq!(flag.get(), 4);
// test into_inner
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let inner = array.into_inner();
assert!(inner.is_ok());
assert_eq!(flag.get(), 0);
drop(inner);
assert_eq!(flag.get(), 3);
}
// test cloning into_iter
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let mut iter = array.into_iter();
assert_eq!(flag.get(), 0);
iter.next();
assert_eq!(flag.get(), 1);
let clone = iter.clone();
assert_eq!(flag.get(), 1);
drop(clone);
assert_eq!(flag.get(), 3);
drop(iter);
assert_eq!(flag.get(), 5);
}
}
#[test]
fn test_drop_panics() {
use std::cell::Cell;
use std::panic::catch_unwind;
use std::panic::AssertUnwindSafe;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
// Panic in the first drop
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
if n == 0 {
panic!("Panic in Bump's drop");
}
}
}
// check if rust is new enough
flag.set(0);
{
let array = vec![Bump(flag), Bump(flag)];
let res = catch_unwind(AssertUnwindSafe(|| {
drop(array);
}));
assert!(res.is_err());
}
if flag.get() != 2 {
println!("test_drop_panics: skip, this version of Rust doesn't continue in drop_in_place");
return;
}
flag.set(0);
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let res = catch_unwind(AssertUnwindSafe(|| {
drop(array);
}));
assert!(res.is_err());
}
// Check that all the elements drop, even if the first drop panics.
assert_eq!(flag.get(), 3);
flag.set(0);
{
let mut array = ArrayVec::<[Bump; 16]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let i = 2;
let tail_len = array.len() - i;
let res = catch_unwind(AssertUnwindSafe(|| {
array.truncate(i);
}));
assert!(res.is_err());
// Check that all the tail elements drop, even if the first drop panics.
assert_eq!(flag.get(), tail_len as i32);
}
}
#[test]
fn test_extend() {
let mut range = 0..10;
let mut array: ArrayVec<[_; 5]> = range.by_ref().collect();
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
assert_eq!(range.next(), Some(5));
array.extend(range.by_ref());
assert_eq!(range.next(), Some(6));
let mut array: ArrayVec<[_; 10]> = (0..3).collect();
assert_eq!(&array[..], &[0, 1, 2]);
array.extend(3..5);
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
}
#[test]
fn test_is_send_sync() {
let data = ArrayVec::<[Vec<i32>; 5]>::new();
&data as &Send;
&data as &Sync;
}
#[test]
fn test_compact_size() {
// Future rust will kill these drop flags!
// 4 elements size + 1 len + 1 enum tag + [1 drop flag]
type ByteArray = ArrayVec<[u8; 4]>;
println!("{}", mem::size_of::<ByteArray>());
assert!(mem::size_of::<ByteArray>() <= 8);
// 1 enum tag + 1 drop flag
type EmptyArray = ArrayVec<[u8; 0]>;
println!("{}", mem::size_of::<EmptyArray>());
assert!(mem::size_of::<EmptyArray>() <= 2);
// 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding
type QuadArray = ArrayVec<[u32; 3]>;
println!("{}", mem::size_of::<QuadArray>());
assert!(mem::size_of::<QuadArray>() <= 24);
}
#[test]
fn test_drain() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
}
#[test]
fn test_retain() {
let mut v = ArrayVec::from([0; 8]);
for (i, elt) in v.iter_mut().enumerate() {
*elt = i;
}
v.retain(|_| true);
assert_eq!(&v[..], &[0, 1, 2, 3, 4, 5, 6, 7]);
v.retain(|elt| {
*elt /= 2;
*elt % 2 == 0
});
assert_eq!(&v[..], &[0, 0, 2, 2]);
v.retain(|_| false);
assert_eq!(&v[..], &[]);
}
#[test]
#[should_panic]
fn test_drain_oob() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..8);
}
#[test]
#[should_panic]
fn test_drop_panic() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
}
#[test]
#[should_panic]
fn test_drop_panic_into_iter() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
array.into_iter();
}
#[test]
fn test_insert() {
let mut v = ArrayVec::from([]);
assert_matches!(v.try_push(1), Err(_));
let mut v = ArrayVec::<[_; 3]>::new();
v.insert(0, 0);
v.insert(1, 1);
//let ret1 = v.try_insert(3, 3);
//assert_matches!(ret1, Err(InsertError::OutOfBounds(_)));
assert_eq!(&v[..], &[0, 1]);
v.insert(2, 2);
assert_eq!(&v[..], &[0, 1, 2]);
let ret2 = v.try_insert(1, 9);
assert_eq!(&v[..], &[0, 1, 2]);
assert_matches!(ret2, Err(_));
let mut v = ArrayVec::from([2]);
assert_matches!(v.try_insert(0, 1), Err(CapacityError { .. }));
assert_matches!(v.try_insert(1, 1), Err(CapacityError { .. }));
//assert_matches!(v.try_insert(2, 1), Err(CapacityError { .. }));
}
#[test]
fn test_into_inner_1() {
let mut v = ArrayVec::from([1, 2]);
v.pop();
let u = v.clone();
assert_eq!(v.into_inner(), Err(u));
}
#[test]
fn test_into_inner_2() {
let mut v = ArrayVec::<[String; 4]>::new();
v.push("a".into());
v.push("b".into());
v.push("c".into());
v.push("d".into());
assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]);
}
#[test]
fn test_into_inner_3_() {
let mut v = ArrayVec::<[i32; 4]>::new();
v.extend(1..);
assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]);
}
#[test]
fn test_write() {
use std::io::Write;
let mut v = ArrayVec::<[_; 8]>::new();
write!(&mut v, "\x01\x02\x03").unwrap();
assert_eq!(&v[..], &[1, 2, 3]);
let r = v.write(&[9; 16]).unwrap();
assert_eq!(r, 5);
assert_eq!(&v[..], &[1, 2, 3, 9, 9, 9, 9, 9]);
}
#[test]
fn array_clone_from() {
let mut v = ArrayVec::<[_; 4]>::new();
v.push(vec![1, 2]);
v.push(vec![3, 4, 5]);
v.push(vec![6]);
let reference = v.to_vec();
let mut u = ArrayVec::<[_; 4]>::new();
u.clone_from(&v);
assert_eq!(&u, &reference[..]);
let mut t = ArrayVec::<[_; 4]>::new();
t.push(vec![97]);
t.push(vec![]);
t.push(vec![5, 6, 2]);
t.push(vec![2]);
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
t.clear();
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
}
#[test]
fn test_string() {
use std::error::Error;
let text = "hello world";
let mut s = ArrayString::<[_; 16]>::new();
s.try_push_str(text).unwrap();
assert_eq!(&s, text);
assert_eq!(text, &s);
// Make sure Hash / Eq / Borrow match up so we can use HashMap
let mut map = HashMap::new();
map.insert(s, 1);
assert_eq!(map[text], 1);
let mut t = ArrayString::<[_; 2]>::new();
assert!(t.try_push_str(text).is_err());
assert_eq!(&t, "");
t.push_str("ab");
// DerefMut
let tmut: &mut str = &mut t;
assert_eq!(tmut, "ab");
// Test Error trait / try
let t = || -> Result<(), Box<Error>> {
let mut t = ArrayString::<[_; 2]>::new();
try!(t.try_push_str(text));
Ok(())
}();
assert!(t.is_err());
}
#[test]
fn test_string_from() {
let text = "hello world";
// Test `from` constructor
let u = ArrayString::<[_; 11]>::from(text).unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_from_bytes() {
let text = "hello world";
let u = ArrayString::from_byte_string(b"hello world").unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_clone() {
let text = "hi";
let mut s = ArrayString::<[_; 4]>::new();
s.push_str("abcd");
let t = ArrayString::<[_; 4]>::from(text).unwrap();
s.clone_from(&t);
assert_eq!(&t, &s);
}
#[test]
fn test_string_push() {
let text = "abcαβγ";
let mut s = ArrayString::<[_; 8]>::new();
for c in text.chars() {
if let Err(_) = s.try_push(c) {
break;
}
}
assert_eq!("abcαβ", &s[..]);
s.push('x');
assert_eq!("abcαβx", &s[..]);
assert!(s.try_push('x').is_err());
}
#[test]
fn test_insert_at_length() {
let mut v = ArrayVec::<[_; 8]>::new();
let result1 = v.try_insert(0, "a");
let result2 = v.try_insert(1, "b");
assert!(result1.is_ok() && result2.is_ok());
assert_eq!(&v[..], &["a", "b"]);
}
#[should_panic]
#[test]
fn test_insert_out_of_bounds() {
let mut v = ArrayVec::<[_; 8]>::new();
let _ = v.try_insert(1, "test");
}
/*
* insert that pushes out the last
let mut u = ArrayVec::from([1, 2, 3, 4]);
let ret = u.try_insert(3, 99);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
let ret = u.try_insert(4, 77);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
*/
#[test]
fn test_drop_in_insert() {
use std::cell::Cell;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
flag.set(0);
{
let mut array = ArrayVec::<[_; 2]>::new();
array.push(Bump(flag));
array.insert(0, Bump(flag));
assert_eq!(flag.get(), 0);
let ret = array.try_insert(1, Bump(flag));
assert_eq!(flag.get(), 0);
assert_matches!(ret, Err(_));
drop(ret);
assert_eq!(flag.get(), 1);
}
assert_eq!(flag.get(), 3);
}
#[test]
fn test_pop_at() {
let mut v = ArrayVec::<[String; 4]>::new();
let s = String::from;
v.push(s("a"));
v.push(s("b"));
v.push(s("c"));
v.push(s("d"));
assert_eq!(v.pop_at(4), None);
assert_eq!(v.pop_at(1), Some(s("b")));
assert_eq!(v.pop_at(1), Some(s("c")));
assert_eq!(v.pop_at(2), None);
assert_eq!(&v[..], &["a", "d"]);
}
#[test]
fn test_sizes() {
let v = ArrayVec::from([0u8; 1 << 16]);
assert_eq!(vec![0u8; v.len()], &v[..]);
}
#[test]
fn test_default() {
use std::net;
let s: ArrayString<[u8; 4]> = Default::default();
// Something without `Default` implementation.
let v: ArrayVec<[net::TcpStream; 4]> = Default::default();
assert_eq!(s.len(), 0);
assert_eq!(v.len(), 0);
}
#[cfg(feature="array-sizes-33-128")]
#[test]
fn test_sizes_33_128() {
ArrayVec::from([0u8; 52]);
ArrayVec::from([0u8; 127]);
}
#[cfg(feature="array-sizes-129-255")]
#[test]
fn test_sizes_129_255() {
ArrayVec::from([0u8; 237]);
ArrayVec::from([0u8; 255]);
}
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use bytecode::function_target::FunctionTarget;
use move_model::ast::Spec;
use crate::workflow::WorkflowOptions;
pub(crate) fn inline_all_exp_in_spec(
_options: &WorkflowOptions,
_target: FunctionTarget,
spec: Spec,
) -> Result<Spec> {
Ok(spec)
}
[move-prover][spec-flatten] unroll function call, lambda, and block exp
This commit adds an inliner that unroll
- function calls
- lambda expressions, and
- block expressions
After the unrolling, only function arguments and the global state can be
variables in the expressions.
This serves the foundation for a more generic simplifier.
To test the effect:
```bash
cd language/move-prover/tools/spec-flatten
cargo run -- \
../../../diem-framework/core/sources/AccountCreationScripts.move \
-d ../../../diem-framework/core/sources \
-d ../../../move-stdlib/sources \
-p inline \
-v
```
The combine the unrolling with the triming pass, further add
`-p trim_aborts_if` after `-p inline`.
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use std::collections::BTreeMap;
use bytecode::function_target::FunctionTarget;
use move_model::{
ast::{Condition, ConditionKind, Exp, ExpData, Operation, Spec, TempIndex},
model::GlobalEnv,
symbol::Symbol,
};
use crate::workflow::WorkflowOptions;
pub(crate) fn inline_all_exp_in_spec(
_options: &WorkflowOptions,
target: FunctionTarget,
spec: Spec,
) -> Result<Spec> {
let env = target.global_env();
let inliner = ExpInliner { env };
let Spec {
loc,
conditions,
properties,
on_impl,
} = spec;
let mut local_vars = BTreeMap::new();
let mut new_conditions = vec![];
for cond in conditions {
let Condition {
loc,
kind,
properties,
exp,
additional_exps,
} = cond;
match &kind {
ConditionKind::LetPre(sym) | ConditionKind::LetPost(sym) => {
let var_exp = inliner.inline_exp(&exp, None, Some(&local_vars));
local_vars.insert(*sym, var_exp);
}
_ => {
let new_exp = inliner.inline_exp(&exp, None, Some(&local_vars));
let new_additional_exps = additional_exps
.into_iter()
.map(|e| inliner.inline_exp(&e, None, Some(&local_vars)))
.collect();
let new_cond = Condition {
loc,
kind,
properties,
exp: new_exp,
additional_exps: new_additional_exps,
};
new_conditions.push(new_cond);
}
}
}
let new_spec = Spec {
loc,
conditions: new_conditions,
properties,
on_impl,
};
Ok(new_spec)
}
struct ExpInliner<'env> {
env: &'env GlobalEnv,
}
impl ExpInliner<'_> {
fn inline_exp(
&self,
exp: &Exp,
temp_var_repl: Option<&BTreeMap<TempIndex, Exp>>,
local_var_repl: Option<&BTreeMap<Symbol, Exp>>,
) -> Exp {
use Operation::*;
let mut rewriter = |e: Exp| match e.as_ref() {
ExpData::LocalVar(_, sym) => match local_var_repl {
None => Err(e),
Some(var_map) => Ok(var_map.get(sym).unwrap().clone()),
},
ExpData::Temporary(_, idx) => match temp_var_repl {
None => Err(e),
Some(var_map) => Ok(var_map.get(idx).unwrap().clone()),
},
ExpData::Call(node_id, Function(mid, fid, _), args) => {
let callee_menv = self.env.get_module(*mid);
let callee_decl = callee_menv.get_spec_fun(*fid);
debug_assert_eq!(args.len(), callee_decl.params.len());
if callee_decl.is_native || callee_decl.uninterpreted || callee_decl.body.is_none()
{
Err(e)
} else {
let mut callee_local_vars =
local_var_repl.cloned().unwrap_or_else(BTreeMap::new);
for (arg_exp, (sym, _)) in args
.iter()
.map(|e| self.inline_exp(e, temp_var_repl, local_var_repl))
.zip(callee_decl.params.iter())
{
callee_local_vars.insert(*sym, arg_exp);
}
let callee_targs = self.env.get_node_instantiation(*node_id);
let callee_body = ExpData::rewrite_node_id(
callee_decl.body.as_ref().unwrap().clone(),
&mut |id| ExpData::instantiate_node(self.env, id, &callee_targs),
);
Ok(self.inline_exp(&callee_body, temp_var_repl, Some(&callee_local_vars)))
}
}
ExpData::Invoke(_, lambda, args) => match lambda.as_ref() {
ExpData::Lambda(_, locals, body) => {
debug_assert_eq!(args.len(), locals.len());
let mut lambda_local_vars =
local_var_repl.cloned().unwrap_or_else(BTreeMap::new);
for (arg_exp, decl) in args
.iter()
.map(|e| self.inline_exp(e, temp_var_repl, local_var_repl))
.zip(locals)
{
lambda_local_vars.insert(decl.name, arg_exp);
}
Ok(self.inline_exp(body, temp_var_repl, Some(&lambda_local_vars)))
}
_ => Err(e),
},
ExpData::Block(_, var_decls, body) => {
let mut block_local_vars = local_var_repl.cloned().unwrap_or_else(BTreeMap::new);
for var_decl in var_decls {
let var_exp = self.inline_exp(
var_decl.binding.as_ref().unwrap(),
temp_var_repl,
Some(&block_local_vars),
);
block_local_vars.insert(var_decl.name, var_exp);
}
Ok(self.inline_exp(body, temp_var_repl, Some(&block_local_vars)))
}
_ => Err(e),
};
ExpData::rewrite(exp.clone(), &mut rewriter)
}
}
|
use std::io::{Read, Write};
use super::{CountedList, CountedListWriter, CountedWriter, Deserialize, Error, Serialize, VarInt32, VarUint32, VarUint7};
const FUNCTION_INDEX_LEB: u8 = 0;
const TABLE_INDEX_SLEB: u8 = 1;
const TABLE_INDEX_I32: u8 = 2;
const MEMORY_ADDR_LEB: u8 = 3;
const MEMORY_ADDR_SLEB: u8 = 4;
const MEMORY_ADDR_I32: u8 = 5;
const TYPE_INDEX_LEB: u8 = 6;
const GLOBAL_INDEX_LEB: u8 = 7;
/// Relocation information.
#[derive(Clone, Debug)]
pub struct RelocSection {
/// Name of this section.
name: String,
/// ID of the section containing the relocations described in this section.
section_id: u32,
/// Name of the section containing the relocations described in this section. Only set if section_id is 0.
relocation_section_name: Option<String>,
/// Relocation entries.
entries: Vec<RelocationEntry>,
}
impl RelocSection {
/// Name of this section.
pub fn name(&self) -> &str {
&self.name
}
/// Name of this section (mutable).
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// ID of the section containing the relocations described in this section.
pub fn section_id(&self) -> u32 {
self.section_id
}
/// ID of the section containing the relocations described in this section (mutable).
pub fn section_id_mut(&mut self) -> &mut u32 {
&mut self.section_id
}
/// Name of the section containing the relocations described in this section.
pub fn relocation_section_name(&self) -> Option<&str> {
self.relocation_section_name.as_ref().map(String::as_str)
}
/// Name of the section containing the relocations described in this section (mutable).
pub fn relocation_section_name_mut(&mut self) -> &mut Option<String> {
&mut self.relocation_section_name
}
/// List of relocation entries.
pub fn entries(&self) -> &[RelocationEntry] {
&self.entries
}
/// List of relocation entries (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<RelocationEntry> {
&mut self.entries
}
}
impl RelocSection {
pub fn deserialize<R: Read>(
name: String,
rdr: &mut R,
) -> Result<Self, Error> {
let section_id = VarUint32::deserialize(rdr)?.into();
let relocation_section_name =
if section_id == 0 {
Some(String::deserialize(rdr)?)
}
else {
None
};
let entries = CountedList::deserialize(rdr)?.into_inner();
Ok(RelocSection {
name,
section_id,
relocation_section_name,
entries,
})
}
}
impl Serialize for RelocSection {
type Error = Error;
fn serialize<W: Write>(self, wtr: &mut W) -> Result<(), Error> {
let mut counted_writer = CountedWriter::new(wtr);
self.name.serialize(&mut counted_writer)?;
VarUint32::from(self.section_id).serialize(&mut counted_writer)?;
if let Some(relocation_section_name) = self.relocation_section_name {
relocation_section_name.serialize(&mut counted_writer)?;
}
let counted_list = CountedListWriter(self.entries.len(), self.entries.into_iter());
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Relocation entry.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RelocationEntry {
/// Function index.
FunctionIndexLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the function symbol in the symbol table.
index: u32,
},
/// Function table index.
TableIndexSleb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the function symbol in the symbol table.
index: u32,
},
/// Function table index.
TableIndexI32 {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the function symbol in the symbol table.
index: u32,
},
/// Linear memory index.
MemoryAddressLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the data symbol in the symbol table.
index: u32,
/// Addend to add to the address.
addend: i32,
},
/// Linear memory index.
MemoryAddressSleb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the data symbol in the symbol table.
index: u32,
/// Addend to add to the address.
addend: i32,
},
/// Linear memory index.
MemoryAddressI32 {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the data symbol in the symbol table.
index: u32,
/// Addend to add to the address.
addend: i32,
},
/// Type table index.
TypeIndexLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the type used.
index: u32,
},
/// Global index.
GlobalIndexLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the global symbol in the symbol table.
index: u32,
},
}
impl Deserialize for RelocationEntry {
type Error = Error;
fn deserialize<R: Read>(rdr: &mut R) -> Result<Self, Self::Error> {
match VarUint7::deserialize(rdr)?.into() {
FUNCTION_INDEX_LEB => Ok(RelocationEntry::FunctionIndexLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
TABLE_INDEX_SLEB => Ok(RelocationEntry::TableIndexSleb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
TABLE_INDEX_I32 => Ok(RelocationEntry::TableIndexI32 {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
MEMORY_ADDR_LEB => Ok(RelocationEntry::MemoryAddressLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
addend: VarInt32::deserialize(rdr)?.into(),
}),
MEMORY_ADDR_SLEB => Ok(RelocationEntry::MemoryAddressSleb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
addend: VarInt32::deserialize(rdr)?.into(),
}),
MEMORY_ADDR_I32 => Ok(RelocationEntry::MemoryAddressI32 {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
addend: VarInt32::deserialize(rdr)?.into(),
}),
TYPE_INDEX_LEB => Ok(RelocationEntry::TypeIndexLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
GLOBAL_INDEX_LEB => Ok(RelocationEntry::GlobalIndexLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
entry_type => Err(Error::UnknownValueType(entry_type as i8)),
}
}
}
impl Serialize for RelocationEntry {
type Error = Error;
fn serialize<W: Write>(self, wtr: &mut W) -> Result<(), Error> {
match self {
RelocationEntry::FunctionIndexLeb { offset, index } => {
VarUint7::from(FUNCTION_INDEX_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::TableIndexSleb { offset, index } => {
VarUint7::from(TABLE_INDEX_SLEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::TableIndexI32 { offset, index } => {
VarUint7::from(TABLE_INDEX_I32).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::MemoryAddressLeb { offset, index, addend } => {
VarUint7::from(MEMORY_ADDR_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
VarInt32::from(addend).serialize(wtr)?;
},
RelocationEntry::MemoryAddressSleb { offset, index, addend } => {
VarUint7::from(MEMORY_ADDR_SLEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
VarInt32::from(addend).serialize(wtr)?;
},
RelocationEntry::MemoryAddressI32 { offset, index, addend } => {
VarUint7::from(MEMORY_ADDR_I32).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
VarInt32::from(addend).serialize(wtr)?;
},
RelocationEntry::TypeIndexLeb { offset, index } => {
VarUint7::from(TYPE_INDEX_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::GlobalIndexLeb { offset, index } => {
VarUint7::from(GLOBAL_INDEX_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::super::{Section, deserialize_file};
use super::RelocationEntry;
#[test]
fn reloc_section() {
let module =
deserialize_file("./res/cases/v1/relocatable.wasm").expect("Module should be deserialized")
.parse_reloc().expect("Reloc section should be deserialized");
let mut found = false;
for section in module.sections() {
match *section {
Section::Reloc(ref reloc_section) => {
assert_eq!(vec![
RelocationEntry::MemoryAddressSleb { offset: 4, index: 0, addend: 0 },
RelocationEntry::FunctionIndexLeb { offset: 12, index: 0 },
], reloc_section.entries());
found = true
},
_ => { }
}
}
assert!(found, "There should be a reloc section in relocatable.wasm");
}
}
One last missing doc comment warning.
use std::io::{Read, Write};
use super::{CountedList, CountedListWriter, CountedWriter, Deserialize, Error, Serialize, VarInt32, VarUint32, VarUint7};
const FUNCTION_INDEX_LEB: u8 = 0;
const TABLE_INDEX_SLEB: u8 = 1;
const TABLE_INDEX_I32: u8 = 2;
const MEMORY_ADDR_LEB: u8 = 3;
const MEMORY_ADDR_SLEB: u8 = 4;
const MEMORY_ADDR_I32: u8 = 5;
const TYPE_INDEX_LEB: u8 = 6;
const GLOBAL_INDEX_LEB: u8 = 7;
/// Relocation information.
#[derive(Clone, Debug)]
pub struct RelocSection {
/// Name of this section.
name: String,
/// ID of the section containing the relocations described in this section.
section_id: u32,
/// Name of the section containing the relocations described in this section. Only set if section_id is 0.
relocation_section_name: Option<String>,
/// Relocation entries.
entries: Vec<RelocationEntry>,
}
impl RelocSection {
/// Name of this section.
pub fn name(&self) -> &str {
&self.name
}
/// Name of this section (mutable).
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// ID of the section containing the relocations described in this section.
pub fn section_id(&self) -> u32 {
self.section_id
}
/// ID of the section containing the relocations described in this section (mutable).
pub fn section_id_mut(&mut self) -> &mut u32 {
&mut self.section_id
}
/// Name of the section containing the relocations described in this section.
pub fn relocation_section_name(&self) -> Option<&str> {
self.relocation_section_name.as_ref().map(String::as_str)
}
/// Name of the section containing the relocations described in this section (mutable).
pub fn relocation_section_name_mut(&mut self) -> &mut Option<String> {
&mut self.relocation_section_name
}
/// List of relocation entries.
pub fn entries(&self) -> &[RelocationEntry] {
&self.entries
}
/// List of relocation entries (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<RelocationEntry> {
&mut self.entries
}
}
impl RelocSection {
/// Deserialize a reloc section.
pub fn deserialize<R: Read>(
name: String,
rdr: &mut R,
) -> Result<Self, Error> {
let section_id = VarUint32::deserialize(rdr)?.into();
let relocation_section_name =
if section_id == 0 {
Some(String::deserialize(rdr)?)
}
else {
None
};
let entries = CountedList::deserialize(rdr)?.into_inner();
Ok(RelocSection {
name,
section_id,
relocation_section_name,
entries,
})
}
}
impl Serialize for RelocSection {
type Error = Error;
fn serialize<W: Write>(self, wtr: &mut W) -> Result<(), Error> {
let mut counted_writer = CountedWriter::new(wtr);
self.name.serialize(&mut counted_writer)?;
VarUint32::from(self.section_id).serialize(&mut counted_writer)?;
if let Some(relocation_section_name) = self.relocation_section_name {
relocation_section_name.serialize(&mut counted_writer)?;
}
let counted_list = CountedListWriter(self.entries.len(), self.entries.into_iter());
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Relocation entry.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RelocationEntry {
/// Function index.
FunctionIndexLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the function symbol in the symbol table.
index: u32,
},
/// Function table index.
TableIndexSleb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the function symbol in the symbol table.
index: u32,
},
/// Function table index.
TableIndexI32 {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the function symbol in the symbol table.
index: u32,
},
/// Linear memory index.
MemoryAddressLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the data symbol in the symbol table.
index: u32,
/// Addend to add to the address.
addend: i32,
},
/// Linear memory index.
MemoryAddressSleb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the data symbol in the symbol table.
index: u32,
/// Addend to add to the address.
addend: i32,
},
/// Linear memory index.
MemoryAddressI32 {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the data symbol in the symbol table.
index: u32,
/// Addend to add to the address.
addend: i32,
},
/// Type table index.
TypeIndexLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the type used.
index: u32,
},
/// Global index.
GlobalIndexLeb {
/// Offset of the value to rewrite.
offset: u32,
/// Index of the global symbol in the symbol table.
index: u32,
},
}
impl Deserialize for RelocationEntry {
type Error = Error;
fn deserialize<R: Read>(rdr: &mut R) -> Result<Self, Self::Error> {
match VarUint7::deserialize(rdr)?.into() {
FUNCTION_INDEX_LEB => Ok(RelocationEntry::FunctionIndexLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
TABLE_INDEX_SLEB => Ok(RelocationEntry::TableIndexSleb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
TABLE_INDEX_I32 => Ok(RelocationEntry::TableIndexI32 {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
MEMORY_ADDR_LEB => Ok(RelocationEntry::MemoryAddressLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
addend: VarInt32::deserialize(rdr)?.into(),
}),
MEMORY_ADDR_SLEB => Ok(RelocationEntry::MemoryAddressSleb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
addend: VarInt32::deserialize(rdr)?.into(),
}),
MEMORY_ADDR_I32 => Ok(RelocationEntry::MemoryAddressI32 {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
addend: VarInt32::deserialize(rdr)?.into(),
}),
TYPE_INDEX_LEB => Ok(RelocationEntry::TypeIndexLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
GLOBAL_INDEX_LEB => Ok(RelocationEntry::GlobalIndexLeb {
offset: VarUint32::deserialize(rdr)?.into(),
index: VarUint32::deserialize(rdr)?.into(),
}),
entry_type => Err(Error::UnknownValueType(entry_type as i8)),
}
}
}
impl Serialize for RelocationEntry {
type Error = Error;
fn serialize<W: Write>(self, wtr: &mut W) -> Result<(), Error> {
match self {
RelocationEntry::FunctionIndexLeb { offset, index } => {
VarUint7::from(FUNCTION_INDEX_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::TableIndexSleb { offset, index } => {
VarUint7::from(TABLE_INDEX_SLEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::TableIndexI32 { offset, index } => {
VarUint7::from(TABLE_INDEX_I32).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::MemoryAddressLeb { offset, index, addend } => {
VarUint7::from(MEMORY_ADDR_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
VarInt32::from(addend).serialize(wtr)?;
},
RelocationEntry::MemoryAddressSleb { offset, index, addend } => {
VarUint7::from(MEMORY_ADDR_SLEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
VarInt32::from(addend).serialize(wtr)?;
},
RelocationEntry::MemoryAddressI32 { offset, index, addend } => {
VarUint7::from(MEMORY_ADDR_I32).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
VarInt32::from(addend).serialize(wtr)?;
},
RelocationEntry::TypeIndexLeb { offset, index } => {
VarUint7::from(TYPE_INDEX_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
RelocationEntry::GlobalIndexLeb { offset, index } => {
VarUint7::from(GLOBAL_INDEX_LEB).serialize(wtr)?;
VarUint32::from(offset).serialize(wtr)?;
VarUint32::from(index).serialize(wtr)?;
},
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::super::{Section, deserialize_file};
use super::RelocationEntry;
#[test]
fn reloc_section() {
let module =
deserialize_file("./res/cases/v1/relocatable.wasm").expect("Module should be deserialized")
.parse_reloc().expect("Reloc section should be deserialized");
let mut found = false;
for section in module.sections() {
match *section {
Section::Reloc(ref reloc_section) => {
assert_eq!(vec![
RelocationEntry::MemoryAddressSleb { offset: 4, index: 0, addend: 0 },
RelocationEntry::FunctionIndexLeb { offset: 12, index: 0 },
], reloc_section.entries());
found = true
},
_ => { }
}
}
assert!(found, "There should be a reloc section in relocatable.wasm");
}
}
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let id: &Mat2<float> = &Matrix::identity();
}
pub trait Index<Index,Result> { }
pub trait Dimensional<T>: Index<uint, T> { }
pub struct Mat2<T> { x: () }
pub struct Vec2<T> { x: () }
impl<T> Dimensional<Vec2<T>> for Mat2<T> { }
impl<T> Index<uint, Vec2<T>> for Mat2<T> { }
impl<T> Dimensional<T> for Vec2<T> { }
impl<T> Index<uint, T> for Vec2<T> { }
pub trait Matrix<T,V>: Dimensional<V> {
fn identity() -> Self;
}
impl<T> Matrix<T, Vec2<T>> for Mat2<T> {
fn identity() -> Mat2<T> { Mat2{ x: () } }
}
testsuite: Make main fn public
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let id: &Mat2<float> = &Matrix::identity();
}
pub trait Index<Index,Result> { }
pub trait Dimensional<T>: Index<uint, T> { }
pub struct Mat2<T> { x: () }
pub struct Vec2<T> { x: () }
impl<T> Dimensional<Vec2<T>> for Mat2<T> { }
impl<T> Index<uint, Vec2<T>> for Mat2<T> { }
impl<T> Dimensional<T> for Vec2<T> { }
impl<T> Index<uint, T> for Vec2<T> { }
pub trait Matrix<T,V>: Dimensional<V> {
fn identity() -> Self;
}
impl<T> Matrix<T, Vec2<T>> for Mat2<T> {
fn identity() -> Mat2<T> { Mat2{ x: () } }
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.