CombinedText stringlengths 4 3.42M |
|---|
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::sync::mpsc;
use parking_lot::{MutexGuard};
use Rgb;
use cli;
use config::Config;
use font::{self, Rasterize};
use meter::Meter;
use renderer::{self, GlyphCache, QuadRenderer};
use selection::Selection;
use term::{Term, SizeInfo};
use window::{self, Size, Pixels, Window, SetInnerSize};
#[derive(Debug)]
pub enum Error {
/// Error with window management
Window(window::Error),
/// Error dealing with fonts
Font(font::Error),
/// Error in renderer
Render(renderer::Error),
}
impl ::std::error::Error for Error {
fn cause(&self) -> Option<&::std::error::Error> {
match *self {
Error::Window(ref err) => Some(err),
Error::Font(ref err) => Some(err),
Error::Render(ref err) => Some(err),
}
}
fn description(&self) -> &str {
match *self {
Error::Window(ref err) => err.description(),
Error::Font(ref err) => err.description(),
Error::Render(ref err) => err.description(),
}
}
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Window(ref err) => err.fmt(f),
Error::Font(ref err) => err.fmt(f),
Error::Render(ref err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Error {
Error::Window(val)
}
}
impl From<font::Error> for Error {
fn from(val: font::Error) -> Error {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Error {
Error::Render(val)
}
}
/// The display wraps a window, font rasterizer, and GPU renderer
pub struct Display {
window: Window,
renderer: QuadRenderer,
glyph_cache: GlyphCache,
render_timer: bool,
rx: mpsc::Receiver<(u32, u32)>,
tx: mpsc::Sender<(u32, u32)>,
meter: Meter,
font_size_modifier: i8,
size_info: SizeInfo,
last_background_color: Rgb,
}
/// Can wakeup the render loop from other threads
pub struct Notifier(window::Proxy);
/// Types that are interested in when the display is resized
pub trait OnResize {
fn on_resize(&mut self, size: &SizeInfo);
}
impl Notifier {
pub fn notify(&self) {
self.0.wakeup_event_loop();
}
}
impl Display {
pub fn notifier(&self) -> Notifier {
Notifier(self.window.create_window_proxy())
}
pub fn update_config(&mut self, config: &Config) {
self.render_timer = config.render_timer();
}
/// Get size info about the display
pub fn size(&self) -> &SizeInfo {
&self.size_info
}
pub fn new(
config: &Config,
options: &cli::Options,
) -> Result<Display, Error> {
// Extract some properties from config
let render_timer = config.render_timer();
// Create the window where Alacritty will be displayed
let mut window = Window::new(&options.title)?;
// get window properties for initializing the other subsystems
let mut viewport_size = window.inner_size_pixels()
.expect("glutin returns window size");
let dpr = window.hidpi_factor();
info!("device_pixel_ratio: {}", dpr);
// Create renderer
let mut renderer = QuadRenderer::new(&config, viewport_size)?;
let (glyph_cache, cell_width, cell_height) =
Self::new_glyph_cache(&window, &mut renderer, config, 0)?;
let dimensions = options.dimensions()
.unwrap_or_else(|| config.dimensions());
// Resize window to specified dimensions unless one or both dimensions are 0
if dimensions.columns_u32() > 0 && dimensions.lines_u32() > 0 {
let width = cell_width as u32 * dimensions.columns_u32();
let height = cell_height as u32 * dimensions.lines_u32();
let new_viewport_size = Size {
width: Pixels(width + 2 * config.padding().x as u32),
height: Pixels(height + 2 * config.padding().y as u32),
};
window.set_inner_size(&new_viewport_size);
renderer.resize(new_viewport_size.width.0 as _, new_viewport_size.height.0 as _);
viewport_size = new_viewport_size
}
info!("Cell Size: ({} x {})", cell_width, cell_height);
let size_info = SizeInfo {
width: viewport_size.width.0 as f32,
height: viewport_size.height.0 as f32,
cell_width: cell_width as f32,
cell_height: cell_height as f32,
padding_x: config.padding().x.floor(),
padding_y: config.padding().y.floor(),
};
// Channel for resize events
//
// macOS has a callback for getting resize events, the channel is used
// to queue resize events until the next draw call. Unfortunately, it
// seems that the event loop is blocked until the window is done
// resizing. If any drawing were to happen during a resize, it would
// need to be in the callback.
let (tx, rx) = mpsc::channel();
// Clear screen
let background_color = config.colors().primary.background;
renderer.with_api(config, &size_info, 0. /* visual bell intensity */, |api| {
api.clear(background_color);
});
Ok(Display {
window: window,
renderer: renderer,
glyph_cache: glyph_cache,
render_timer: render_timer,
tx: tx,
rx: rx,
meter: Meter::new(),
font_size_modifier: 0,
size_info: size_info,
last_background_color: background_color,
})
}
fn new_glyph_cache(window : &Window, renderer : &mut QuadRenderer,
config: &Config, font_size_delta: i8)
-> Result<(GlyphCache, f32, f32), Error>
{
let font = config.font().clone().with_size_delta(font_size_delta as f32);
let dpr = window.hidpi_factor();
let rasterizer = font::Rasterizer::new(dpr, config.use_thin_strokes())?;
// Initialize glyph cache
let glyph_cache = {
info!("Initializing glyph cache");
let init_start = ::std::time::Instant::now();
let cache = renderer.with_loader(|mut api| {
GlyphCache::new(rasterizer, &font, &mut api)
})?;
let stop = init_start.elapsed();
let stop_f = stop.as_secs() as f64 + stop.subsec_nanos() as f64 / 1_000_000_000f64;
info!("Finished initializing glyph cache in {}", stop_f);
cache
};
// Need font metrics to resize the window properly. This suggests to me the
// font metrics should be computed before creating the window in the first
// place so that a resize is not needed.
let metrics = glyph_cache.font_metrics();
let cell_width = (metrics.average_advance + font.offset().x as f64) as u32;
let cell_height = (metrics.line_height + font.offset().y as f64) as u32;
return Ok((glyph_cache, cell_width as f32, cell_height as f32));
}
pub fn update_glyph_cache(&mut self, config: &Config, font_size_delta: i8) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
let _ = cache.update_font_size(config.font(), font_size_delta, &mut api);
});
let metrics = cache.font_metrics();
self.size_info.cell_width = ((metrics.average_advance + config.font().offset().x as f64) as f32).floor();
self.size_info.cell_height = ((metrics.line_height + config.font().offset().y as f64) as f32).floor();
}
#[inline]
pub fn resize_channel(&self) -> mpsc::Sender<(u32, u32)> {
self.tx.clone()
}
pub fn window(&mut self) -> &mut Window {
&mut self.window
}
/// Process pending resize events
pub fn handle_resize(
&mut self,
terminal: &mut MutexGuard<Term>,
config: &Config,
items: &mut [&mut OnResize]
) {
// Resize events new_size and are handled outside the poll_events
// iterator. This has the effect of coalescing multiple resize
// events into one.
let mut new_size = None;
// Take most recent resize event, if any
while let Ok(sz) = self.rx.try_recv() {
new_size = Some(sz);
}
if terminal.font_size_modifier != self.font_size_modifier {
// Font size modification detected
self.font_size_modifier = terminal.font_size_modifier;
self.update_glyph_cache(config, terminal.font_size_modifier);
if new_size == None {
// Force a resize to refresh things
new_size = Some((self.size_info.width as u32,
self.size_info.height as u32));
}
}
// Receive any resize events; only call gl::Viewport on last
// available
if let Some((w, h)) = new_size.take() {
self.size_info.width = w as f32;
self.size_info.height = h as f32;
let size = &self.size_info;
terminal.resize(size);
for item in items {
item.on_resize(size)
}
self.window.resize(w, h);
self.renderer.resize(w as i32, h as i32);
}
}
/// Draw the screen
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled
pub fn draw(&mut self, mut terminal: MutexGuard<Term>, config: &Config, selection: Option<&Selection>) {
// Clear dirty flag
terminal.dirty = !terminal.visual_bell.completed();
if let Some(title) = terminal.get_next_title() {
self.window.set_title(&title);
}
if let Some(is_urgent) = terminal.next_is_urgent.take() {
// We don't need to set the urgent flag if we already have the
// user's attention.
if !is_urgent || !self.window.is_focused {
self.window.set_urgent(is_urgent);
}
}
let size_info = *terminal.size_info();
let visual_bell_intensity = terminal.visual_bell.intensity();
let background_color = terminal.background_color();
let background_color_changed = background_color != self.last_background_color;
self.last_background_color = background_color;
{
let glyph_cache = &mut self.glyph_cache;
// Draw grid
{
let _sampler = self.meter.sampler();
// Make a copy of size_info since the closure passed here
// borrows terminal mutably
//
// TODO I wonder if the renderable cells iter could avoid the
// mutable borrow
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
// Clear screen to update whole background with new color
if background_color_changed {
api.clear(background_color);
}
// Draw the grid
api.render_cells(terminal.renderable_cells(config, selection), glyph_cache);
});
}
// Draw render timer
if self.render_timer {
let timing = format!("{:.3} usec", self.meter.average());
let color = Rgb { r: 0xd5, g: 0x4e, b: 0x53 };
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
api.render_string(&timing[..], glyph_cache, color);
});
}
}
// Unlock the terminal mutex; following call to swap_buffers() may block
drop(terminal);
self.window
.swap_buffers()
.expect("swap buffers");
// Clear after swap_buffers when terminal mutex isn't held. Mesa for
// some reason takes a long time to call glClear(). The driver descends
// into xcb_connect_to_fd() which ends up calling __poll_nocancel()
// which blocks for a while.
//
// By keeping this outside of the critical region, the Mesa bug is
// worked around to some extent. Since this doesn't actually address the
// issue of glClear being slow, less time is available for input
// handling and rendering.
self.renderer.with_api(config, &size_info, visual_bell_intensity, |api| {
api.clear(background_color);
});
}
pub fn get_window_id(&self) -> Option<usize> {
self.window.get_window_id()
}
/// Adjust the XIM editor position according to the new location of the cursor
pub fn update_ime_position(&mut self, terminal: &Term) {
use index::{Point, Line, Column};
use term::SizeInfo;
let Point{line: Line(row), col: Column(col)} = terminal.cursor().point;
let SizeInfo{cell_width: cw,
cell_height: ch,
padding_x: px,
padding_y: py, ..} = *terminal.size_info();
let nspot_y = (py + (row + 1) as f32 * ch) as i16;
let nspot_x = (px + col as f32 * cw) as i16;
self.window().send_xim_spot(nspot_x, nspot_y);
}
}
clippy: remove unneeded return statement (needless_return).
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::sync::mpsc;
use parking_lot::{MutexGuard};
use Rgb;
use cli;
use config::Config;
use font::{self, Rasterize};
use meter::Meter;
use renderer::{self, GlyphCache, QuadRenderer};
use selection::Selection;
use term::{Term, SizeInfo};
use window::{self, Size, Pixels, Window, SetInnerSize};
#[derive(Debug)]
pub enum Error {
/// Error with window management
Window(window::Error),
/// Error dealing with fonts
Font(font::Error),
/// Error in renderer
Render(renderer::Error),
}
impl ::std::error::Error for Error {
fn cause(&self) -> Option<&::std::error::Error> {
match *self {
Error::Window(ref err) => Some(err),
Error::Font(ref err) => Some(err),
Error::Render(ref err) => Some(err),
}
}
fn description(&self) -> &str {
match *self {
Error::Window(ref err) => err.description(),
Error::Font(ref err) => err.description(),
Error::Render(ref err) => err.description(),
}
}
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Window(ref err) => err.fmt(f),
Error::Font(ref err) => err.fmt(f),
Error::Render(ref err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Error {
Error::Window(val)
}
}
impl From<font::Error> for Error {
fn from(val: font::Error) -> Error {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Error {
Error::Render(val)
}
}
/// The display wraps a window, font rasterizer, and GPU renderer
pub struct Display {
window: Window,
renderer: QuadRenderer,
glyph_cache: GlyphCache,
render_timer: bool,
rx: mpsc::Receiver<(u32, u32)>,
tx: mpsc::Sender<(u32, u32)>,
meter: Meter,
font_size_modifier: i8,
size_info: SizeInfo,
last_background_color: Rgb,
}
/// Can wakeup the render loop from other threads
pub struct Notifier(window::Proxy);
/// Types that are interested in when the display is resized
pub trait OnResize {
fn on_resize(&mut self, size: &SizeInfo);
}
impl Notifier {
pub fn notify(&self) {
self.0.wakeup_event_loop();
}
}
impl Display {
pub fn notifier(&self) -> Notifier {
Notifier(self.window.create_window_proxy())
}
pub fn update_config(&mut self, config: &Config) {
self.render_timer = config.render_timer();
}
/// Get size info about the display
pub fn size(&self) -> &SizeInfo {
&self.size_info
}
pub fn new(
config: &Config,
options: &cli::Options,
) -> Result<Display, Error> {
// Extract some properties from config
let render_timer = config.render_timer();
// Create the window where Alacritty will be displayed
let mut window = Window::new(&options.title)?;
// get window properties for initializing the other subsystems
let mut viewport_size = window.inner_size_pixels()
.expect("glutin returns window size");
let dpr = window.hidpi_factor();
info!("device_pixel_ratio: {}", dpr);
// Create renderer
let mut renderer = QuadRenderer::new(&config, viewport_size)?;
let (glyph_cache, cell_width, cell_height) =
Self::new_glyph_cache(&window, &mut renderer, config, 0)?;
let dimensions = options.dimensions()
.unwrap_or_else(|| config.dimensions());
// Resize window to specified dimensions unless one or both dimensions are 0
if dimensions.columns_u32() > 0 && dimensions.lines_u32() > 0 {
let width = cell_width as u32 * dimensions.columns_u32();
let height = cell_height as u32 * dimensions.lines_u32();
let new_viewport_size = Size {
width: Pixels(width + 2 * config.padding().x as u32),
height: Pixels(height + 2 * config.padding().y as u32),
};
window.set_inner_size(&new_viewport_size);
renderer.resize(new_viewport_size.width.0 as _, new_viewport_size.height.0 as _);
viewport_size = new_viewport_size
}
info!("Cell Size: ({} x {})", cell_width, cell_height);
let size_info = SizeInfo {
width: viewport_size.width.0 as f32,
height: viewport_size.height.0 as f32,
cell_width: cell_width as f32,
cell_height: cell_height as f32,
padding_x: config.padding().x.floor(),
padding_y: config.padding().y.floor(),
};
// Channel for resize events
//
// macOS has a callback for getting resize events, the channel is used
// to queue resize events until the next draw call. Unfortunately, it
// seems that the event loop is blocked until the window is done
// resizing. If any drawing were to happen during a resize, it would
// need to be in the callback.
let (tx, rx) = mpsc::channel();
// Clear screen
let background_color = config.colors().primary.background;
renderer.with_api(config, &size_info, 0. /* visual bell intensity */, |api| {
api.clear(background_color);
});
Ok(Display {
window: window,
renderer: renderer,
glyph_cache: glyph_cache,
render_timer: render_timer,
tx: tx,
rx: rx,
meter: Meter::new(),
font_size_modifier: 0,
size_info: size_info,
last_background_color: background_color,
})
}
fn new_glyph_cache(window : &Window, renderer : &mut QuadRenderer,
config: &Config, font_size_delta: i8)
-> Result<(GlyphCache, f32, f32), Error>
{
let font = config.font().clone().with_size_delta(font_size_delta as f32);
let dpr = window.hidpi_factor();
let rasterizer = font::Rasterizer::new(dpr, config.use_thin_strokes())?;
// Initialize glyph cache
let glyph_cache = {
info!("Initializing glyph cache");
let init_start = ::std::time::Instant::now();
let cache = renderer.with_loader(|mut api| {
GlyphCache::new(rasterizer, &font, &mut api)
})?;
let stop = init_start.elapsed();
let stop_f = stop.as_secs() as f64 + stop.subsec_nanos() as f64 / 1_000_000_000f64;
info!("Finished initializing glyph cache in {}", stop_f);
cache
};
// Need font metrics to resize the window properly. This suggests to me the
// font metrics should be computed before creating the window in the first
// place so that a resize is not needed.
let metrics = glyph_cache.font_metrics();
let cell_width = (metrics.average_advance + font.offset().x as f64) as u32;
let cell_height = (metrics.line_height + font.offset().y as f64) as u32;
Ok((glyph_cache, cell_width as f32, cell_height as f32))
}
pub fn update_glyph_cache(&mut self, config: &Config, font_size_delta: i8) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
let _ = cache.update_font_size(config.font(), font_size_delta, &mut api);
});
let metrics = cache.font_metrics();
self.size_info.cell_width = ((metrics.average_advance + config.font().offset().x as f64) as f32).floor();
self.size_info.cell_height = ((metrics.line_height + config.font().offset().y as f64) as f32).floor();
}
#[inline]
pub fn resize_channel(&self) -> mpsc::Sender<(u32, u32)> {
self.tx.clone()
}
pub fn window(&mut self) -> &mut Window {
&mut self.window
}
/// Process pending resize events
pub fn handle_resize(
&mut self,
terminal: &mut MutexGuard<Term>,
config: &Config,
items: &mut [&mut OnResize]
) {
// Resize events new_size and are handled outside the poll_events
// iterator. This has the effect of coalescing multiple resize
// events into one.
let mut new_size = None;
// Take most recent resize event, if any
while let Ok(sz) = self.rx.try_recv() {
new_size = Some(sz);
}
if terminal.font_size_modifier != self.font_size_modifier {
// Font size modification detected
self.font_size_modifier = terminal.font_size_modifier;
self.update_glyph_cache(config, terminal.font_size_modifier);
if new_size == None {
// Force a resize to refresh things
new_size = Some((self.size_info.width as u32,
self.size_info.height as u32));
}
}
// Receive any resize events; only call gl::Viewport on last
// available
if let Some((w, h)) = new_size.take() {
self.size_info.width = w as f32;
self.size_info.height = h as f32;
let size = &self.size_info;
terminal.resize(size);
for item in items {
item.on_resize(size)
}
self.window.resize(w, h);
self.renderer.resize(w as i32, h as i32);
}
}
/// Draw the screen
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled
pub fn draw(&mut self, mut terminal: MutexGuard<Term>, config: &Config, selection: Option<&Selection>) {
// Clear dirty flag
terminal.dirty = !terminal.visual_bell.completed();
if let Some(title) = terminal.get_next_title() {
self.window.set_title(&title);
}
if let Some(is_urgent) = terminal.next_is_urgent.take() {
// We don't need to set the urgent flag if we already have the
// user's attention.
if !is_urgent || !self.window.is_focused {
self.window.set_urgent(is_urgent);
}
}
let size_info = *terminal.size_info();
let visual_bell_intensity = terminal.visual_bell.intensity();
let background_color = terminal.background_color();
let background_color_changed = background_color != self.last_background_color;
self.last_background_color = background_color;
{
let glyph_cache = &mut self.glyph_cache;
// Draw grid
{
let _sampler = self.meter.sampler();
// Make a copy of size_info since the closure passed here
// borrows terminal mutably
//
// TODO I wonder if the renderable cells iter could avoid the
// mutable borrow
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
// Clear screen to update whole background with new color
if background_color_changed {
api.clear(background_color);
}
// Draw the grid
api.render_cells(terminal.renderable_cells(config, selection), glyph_cache);
});
}
// Draw render timer
if self.render_timer {
let timing = format!("{:.3} usec", self.meter.average());
let color = Rgb { r: 0xd5, g: 0x4e, b: 0x53 };
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
api.render_string(&timing[..], glyph_cache, color);
});
}
}
// Unlock the terminal mutex; following call to swap_buffers() may block
drop(terminal);
self.window
.swap_buffers()
.expect("swap buffers");
// Clear after swap_buffers when terminal mutex isn't held. Mesa for
// some reason takes a long time to call glClear(). The driver descends
// into xcb_connect_to_fd() which ends up calling __poll_nocancel()
// which blocks for a while.
//
// By keeping this outside of the critical region, the Mesa bug is
// worked around to some extent. Since this doesn't actually address the
// issue of glClear being slow, less time is available for input
// handling and rendering.
self.renderer.with_api(config, &size_info, visual_bell_intensity, |api| {
api.clear(background_color);
});
}
pub fn get_window_id(&self) -> Option<usize> {
self.window.get_window_id()
}
/// Adjust the XIM editor position according to the new location of the cursor
pub fn update_ime_position(&mut self, terminal: &Term) {
use index::{Point, Line, Column};
use term::SizeInfo;
let Point{line: Line(row), col: Column(col)} = terminal.cursor().point;
let SizeInfo{cell_width: cw,
cell_height: ch,
padding_x: px,
padding_y: py, ..} = *terminal.size_info();
let nspot_y = (py + (row + 1) as f32 * ch) as i16;
let nspot_x = (px + col as f32 * cw) as i16;
self.window().send_xim_spot(nspot_x, nspot_y);
}
}
|
//! The generic ELF module, which gives access to ELF constants and other helper functions, which are independent of ELF bithood. Also defines an `Elf` struct which implements a unified parser that returns a wrapped `Elf64` or `Elf32` binary.
//!
//! To access the exact 32-bit or 64-bit versions, use [goblin::elf32::Header](header/header32/struct.Header.html)/[goblin::elf64::Header](header/header64/struct.Header.html), etc., for the various 32/64-bit structs.
//!
//! # Example
//!
//! ```rust
//! use std::fs::File;
//!
//! pub fn read (bytes: &[u8]) {
//! match goblin::elf::Elf::parse(&bytes) {
//! Ok(binary) => {
//! let entry = binary.entry;
//! for ph in binary.program_headers {
//! if ph.p_type == goblin::elf::program_header::PT_LOAD {
//! let mut _buf = vec![0u8; ph.p_filesz as usize];
//! // read responsibly
//! }
//! }
//! },
//! Err(_) => ()
//! }
//! }
//! ```
//!
//! This will properly access the underlying 32-bit or 64-bit binary automatically. Note that since
//! 32-bit binaries typically have shorter 32-bit values in some cases (specifically for addresses and pointer
//! values), these values are upcasted to u64/i64s when appropriate.
//!
//! See [goblin::elf::Elf](struct.Elf.html) for more information.
//!
//! You are still free to use the specific 32-bit or 64-bit versions by accessing them through `goblin::elf64`, etc., but you will have to parse and/or construct the various components yourself.
//! In other words, there is no unified 32/64-bit `Elf` struct.
//!
//! # Note
//! To use the automagic ELF datatype union parser, you _must_ enable/opt-in to the `elf64`, `elf32`, and
//! `endian_fd` features if you disable `default`.
#[macro_use]
mod gnu_hash;
// These are shareable values for the 32/64 bit implementations.
//
// They are publicly re-exported by the pub-using module
pub mod header;
pub mod program_header;
pub mod section_header;
pub mod compression_header;
#[macro_use]
pub mod sym;
pub mod dyn;
#[macro_use]
pub mod reloc;
pub mod note;
macro_rules! if_sylvan {
($($i:item)*) => ($(
#[cfg(all(feature = "elf32", feature = "elf64", feature = "endian_fd"))]
$i
)*)
}
if_sylvan! {
use scroll::{self, ctx, Pread, Endian};
use strtab::Strtab;
use error;
use container::{Container, Ctx};
use alloc::vec::Vec;
use core::cmp;
pub type Header = header::Header;
pub type ProgramHeader = program_header::ProgramHeader;
pub type SectionHeader = section_header::SectionHeader;
pub type Symtab<'a> = sym::Symtab<'a>;
pub type Sym = sym::Sym;
pub type Dyn = dyn::Dyn;
pub type Dynamic = dyn::Dynamic;
pub type Reloc = reloc::Reloc;
pub type RelocSection<'a> = reloc::RelocSection<'a>;
pub type ProgramHeaders = Vec<ProgramHeader>;
pub type SectionHeaders = Vec<SectionHeader>;
pub type ShdrIdx = usize;
#[derive(Debug)]
/// An ELF binary. The underlying data structures are read according to the headers byte order and container size (32 or 64).
pub struct Elf<'a> {
/// The ELF header, which provides a rudimentary index into the rest of the binary
pub header: Header,
/// The program headers; they primarily tell the kernel and the dynamic linker
/// how to load this binary
pub program_headers: ProgramHeaders,
/// The sections headers. These are strippable, never count on them being
/// here unless you're a static linker!
pub section_headers: SectionHeaders,
/// The section header string table
pub shdr_strtab: Strtab<'a>,
/// The string table for the dynamically accessible symbols
pub dynstrtab: Strtab<'a>,
/// The dynamically accessible symbols, i.e., exports, imports.
/// This is what the dynamic linker uses to dynamically load and link your binary,
/// or find imported symbols for binaries which dynamically link against your library
pub dynsyms: Symtab<'a>,
/// The debugging symbol table
pub syms: Symtab<'a>,
/// The string table for the symbol table
pub strtab: Strtab<'a>,
/// Contains dynamic linking information, with the _DYNAMIC array + a preprocessed DynamicInfo for that array
pub dynamic: Option<Dynamic>,
/// The dynamic relocation entries (strings, copy-data, etc.) with an addend
pub dynrelas: RelocSection<'a>,
/// The dynamic relocation entries without an addend
pub dynrels: RelocSection<'a>,
/// The plt relocation entries (procedure linkage table). For 32-bit binaries these are usually Rel (no addend)
pub pltrelocs: RelocSection<'a>,
/// Section relocations by section index (only present if this is a relocatable object file)
pub shdr_relocs: Vec<(ShdrIdx, RelocSection<'a>)>,
/// The binary's soname, if it has one
pub soname: Option<&'a str>,
/// The binary's program interpreter (e.g., dynamic linker), if it has one
pub interpreter: Option<&'a str>,
/// A list of this binary's dynamic libraries it uses, if there are any
pub libraries: Vec<&'a str>,
pub is_64: bool,
/// Whether this is a shared object or not
pub is_lib: bool,
/// The binaries entry point address, if it has one
pub entry: u64,
/// Whether the binary is little endian or not
pub little_endian: bool,
ctx: Ctx,
}
impl<'a> Elf<'a> {
/// Try to iterate notes in PT_NOTE program headers; returns `None` if there aren't any note headers in this binary
pub fn iter_note_headers(&self, data: &'a [u8]) -> Option<note::NoteIterator<'a>> {
let mut iters = vec![];
for phdr in &self.program_headers {
if phdr.p_type == program_header::PT_NOTE {
let offset = phdr.p_offset as usize;
let alignment = phdr.p_align as usize;
iters.push(note::NoteDataIterator {
data,
offset,
size: offset + phdr.p_filesz as usize,
ctx: (alignment, self.ctx)
});
}
}
if iters.is_empty() {
None
} else {
Some(note::NoteIterator {
iters: iters,
index: 0,
})
}
}
/// Try to iterate notes in SHT_NOTE sections; returns `None` if there aren't any note sections in this binary
///
/// If a section_name is given, only the section with the according name is iterated.
pub fn iter_note_sections(
&self,
data: &'a [u8],
section_name: Option<&str>,
) -> Option<note::NoteIterator<'a>> {
let mut iters = vec![];
for sect in &self.section_headers {
if sect.sh_type != section_header::SHT_NOTE {
continue;
}
if section_name.is_some() && !self.shdr_strtab
.get(sect.sh_name)
.map_or(false, |r| r.ok() == section_name) {
continue;
}
let offset = sect.sh_offset as usize;
let alignment = sect.sh_addralign as usize;
iters.push(note::NoteDataIterator {
data,
offset,
size: offset + sect.sh_size as usize,
ctx: (alignment, self.ctx)
});
}
if iters.is_empty() {
None
} else {
Some(note::NoteIterator {
iters: iters,
index: 0,
})
}
}
pub fn is_object_file(&self) -> bool {
self.header.e_type == header::ET_REL
}
/// Parses the contents of the byte stream in `bytes`, and maybe returns a unified binary
pub fn parse(bytes: &'a [u8]) -> error::Result<Self> {
let header = bytes.pread::<Header>(0)?;
let entry = header.e_entry as usize;
let is_lib = header.e_type == header::ET_DYN;
let is_lsb = header.e_ident[header::EI_DATA] == header::ELFDATA2LSB;
let endianness = scroll::Endian::from(is_lsb);
let class = header.e_ident[header::EI_CLASS];
if class != header::ELFCLASS64 && class != header::ELFCLASS32 {
return Err(error::Error::Malformed(format!("Unknown values in ELF ident header: class: {} endianness: {}",
class,
header.e_ident[header::EI_DATA])).into());
}
let is_64 = class == header::ELFCLASS64;
let container = if is_64 { Container::Big } else { Container::Little };
let ctx = Ctx::new(container, endianness);
let program_headers = ProgramHeader::parse(bytes, header.e_phoff as usize, header.e_phnum as usize, ctx)?;
let mut interpreter = None;
for ph in &program_headers {
if ph.p_type == program_header::PT_INTERP && ph.p_filesz != 0 {
let count = (ph.p_filesz - 1) as usize;
let offset = ph.p_offset as usize;
interpreter = Some(bytes.pread_with::<&str>(offset, ::scroll::ctx::StrCtx::Length(count))?);
}
}
let section_headers = SectionHeader::parse(bytes, header.e_shoff as usize, header.e_shnum as usize, ctx)?;
let get_strtab = |section_headers: &[SectionHeader], section_idx: usize| {
if section_idx >= section_headers.len() {
// FIXME: warn! here
Ok(Strtab::default())
} else {
let shdr = §ion_headers[section_idx];
shdr.check_size(bytes.len())?;
Strtab::parse(bytes, shdr.sh_offset as usize, shdr.sh_size as usize, 0x0)
}
};
let strtab_idx = header.e_shstrndx as usize;
let shdr_strtab = get_strtab(§ion_headers, strtab_idx)?;
let mut syms = Symtab::default();
let mut strtab = Strtab::default();
for shdr in §ion_headers {
if shdr.sh_type as u32 == section_header::SHT_SYMTAB {
let size = shdr.sh_entsize;
let count = if size == 0 { 0 } else { shdr.sh_size / size };
syms = Symtab::parse(bytes, shdr.sh_offset as usize, count as usize, ctx)?;
strtab = get_strtab(§ion_headers, shdr.sh_link as usize)?;
}
}
let mut soname = None;
let mut libraries = vec![];
let mut dynsyms = Symtab::default();
let mut dynrelas = RelocSection::default();
let mut dynrels = RelocSection::default();
let mut pltrelocs = RelocSection::default();
let mut dynstrtab = Strtab::default();
let dynamic = Dynamic::parse(bytes, &program_headers, ctx)?;
if let Some(ref dynamic) = dynamic {
let dyn_info = &dynamic.info;
dynstrtab = Strtab::parse(bytes,
dyn_info.strtab,
dyn_info.strsz,
0x0)?;
if dyn_info.soname != 0 {
// FIXME: warn! here
soname = match dynstrtab.get(dyn_info.soname) { Some(Ok(soname)) => Some(soname), _ => None };
}
if dyn_info.needed_count > 0 {
libraries = dynamic.get_libraries(&dynstrtab);
}
// parse the dynamic relocations
dynrelas = RelocSection::parse(bytes, dyn_info.rela, dyn_info.relasz, true, ctx)?;
dynrels = RelocSection::parse(bytes, dyn_info.rel, dyn_info.relsz, false, ctx)?;
let is_rela = dyn_info.pltrel as u64 == dyn::DT_RELA;
pltrelocs = RelocSection::parse(bytes, dyn_info.jmprel, dyn_info.pltrelsz, is_rela, ctx)?;
let mut num_syms = if let Some(gnu_hash) = dyn_info.gnu_hash {
gnu_hash_len(bytes, gnu_hash as usize, ctx)?
} else if let Some(hash) = dyn_info.hash {
hash_len(bytes, hash as usize, header.e_machine, ctx)?
} else {
0
};
let max_reloc_sym = dynrelas.iter()
.chain(dynrels.iter())
.chain(pltrelocs.iter())
.fold(0, |num, reloc| cmp::max(num, reloc.r_sym));
if max_reloc_sym != 0 {
num_syms = cmp::max(num_syms, max_reloc_sym + 1);
}
dynsyms = Symtab::parse(bytes, dyn_info.symtab, num_syms, ctx)?;
}
let mut shdr_relocs = vec![];
for (idx, section) in section_headers.iter().enumerate() {
if section.sh_type == section_header::SHT_REL {
section.check_size(bytes.len())?;
let sh_relocs = RelocSection::parse(bytes, section.sh_offset as usize, section.sh_size as usize, false, ctx)?;
shdr_relocs.push((idx, sh_relocs));
}
if section.sh_type == section_header::SHT_RELA {
section.check_size(bytes.len())?;
let sh_relocs = RelocSection::parse(bytes, section.sh_offset as usize, section.sh_size as usize, true, ctx)?;
shdr_relocs.push((idx, sh_relocs));
}
}
Ok(Elf {
header: header,
program_headers: program_headers,
section_headers: section_headers,
shdr_strtab: shdr_strtab,
dynamic: dynamic,
dynsyms: dynsyms,
dynstrtab: dynstrtab,
syms: syms,
strtab: strtab,
dynrelas: dynrelas,
dynrels: dynrels,
pltrelocs: pltrelocs,
shdr_relocs: shdr_relocs,
soname: soname,
interpreter: interpreter,
libraries: libraries,
is_64: is_64,
is_lib: is_lib,
entry: entry as u64,
little_endian: is_lsb,
ctx,
})
}
}
impl<'a> ctx::TryFromCtx<'a, (usize, Endian)> for Elf<'a> {
type Error = ::error::Error;
type Size = usize;
fn try_from_ctx(src: &'a [u8], (_, _): (usize, Endian)) -> Result<(Elf<'a>, Self::Size), Self::Error> {
let elf = Elf::parse(src)?;
Ok((elf, src.len()))
}
}
fn gnu_hash_len(bytes: &[u8], offset: usize, ctx: Ctx) -> error::Result<usize> {
let buckets_num = bytes.pread_with::<u32>(offset, ctx.le)? as usize;
let min_chain = bytes.pread_with::<u32>(offset + 4, ctx.le)? as usize;
let bloom_size = bytes.pread_with::<u32>(offset + 8, ctx.le)? as usize;
// We could handle min_chain==0 if we really had to, but it shouldn't happen.
if buckets_num == 0 || min_chain == 0 || bloom_size == 0 {
return Err(error::Error::Malformed(format!("Invalid DT_GNU_HASH: buckets_num={} min_chain={} bloom_size={}",
buckets_num, min_chain, bloom_size)));
}
// Find the last bucket.
let buckets_offset = offset + 16 + bloom_size * if ctx.container.is_big() { 8 } else { 4 };
let mut max_chain = 0;
for bucket in 0..buckets_num {
let chain = bytes.pread_with::<u32>(buckets_offset + bucket * 4, ctx.le)? as usize;
if max_chain < chain {
max_chain = chain;
}
}
if max_chain < min_chain {
return Ok(0);
}
// Find the last chain within the bucket.
let mut chain_offset = buckets_offset + buckets_num * 4 + (max_chain - min_chain) * 4;
loop {
let hash = bytes.pread_with::<u32>(chain_offset, ctx.le)?;
max_chain += 1;
chain_offset += 4;
if hash & 1 != 0 {
return Ok(max_chain);
}
}
}
fn hash_len(bytes: &[u8], offset: usize, machine: u16, ctx: Ctx) -> error::Result<usize> {
// Based on readelf code.
let nchain = if (machine == header::EM_FAKE_ALPHA || machine == header::EM_S390) && ctx.container.is_big() {
bytes.pread_with::<u64>(offset + 4, ctx.le)? as usize
} else {
bytes.pread_with::<u32>(offset + 4, ctx.le)? as usize
};
Ok(nchain)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_crt1_64bit() {
let crt1: Vec<u8> = include!("../../etc/crt1.rs");
match Elf::parse(&crt1) {
Ok (binary) => {
assert!(binary.is_64);
assert!(!binary.is_lib);
assert_eq!(binary.entry, 0);
assert!(binary.syms.get(1000).is_none());
assert!(binary.syms.get(5).is_some());
let syms = binary.syms.to_vec();
let mut i = 0;
assert!(binary.section_headers.len() != 0);
for sym in &syms {
if i == 11 {
let symtab = binary.strtab;
println!("sym: {:?}", &sym);
assert_eq!(&symtab[sym.st_name], "_start");
break;
}
i += 1;
}
assert!(syms.len() != 0);
},
Err (err) => {
println!("failed: {}", err);
assert!(false)
}
}
}
#[test]
fn parse_crt1_32bit() {
let crt1: Vec<u8> = include!("../../etc/crt132.rs");
match Elf::parse(&crt1) {
Ok (binary) => {
assert!(!binary.is_64);
assert!(!binary.is_lib);
assert_eq!(binary.entry, 0);
assert!(binary.syms.get(1000).is_none());
assert!(binary.syms.get(5).is_some());
let syms = binary.syms.to_vec();
let mut i = 0;
assert!(binary.section_headers.len() != 0);
for sym in &syms {
if i == 11 {
let symtab = binary.strtab;
println!("sym: {:?}", &sym);
assert_eq!(&symtab[sym.st_name], "__libc_csu_fini");
break;
}
i += 1;
}
assert!(syms.len() != 0);
},
Err (err) => {
println!("failed: {}", err);
assert!(false)
}
}
}
}
Remove near duplicate branch with boolean flag
//! The generic ELF module, which gives access to ELF constants and other helper functions, which are independent of ELF bithood. Also defines an `Elf` struct which implements a unified parser that returns a wrapped `Elf64` or `Elf32` binary.
//!
//! To access the exact 32-bit or 64-bit versions, use [goblin::elf32::Header](header/header32/struct.Header.html)/[goblin::elf64::Header](header/header64/struct.Header.html), etc., for the various 32/64-bit structs.
//!
//! # Example
//!
//! ```rust
//! use std::fs::File;
//!
//! pub fn read (bytes: &[u8]) {
//! match goblin::elf::Elf::parse(&bytes) {
//! Ok(binary) => {
//! let entry = binary.entry;
//! for ph in binary.program_headers {
//! if ph.p_type == goblin::elf::program_header::PT_LOAD {
//! let mut _buf = vec![0u8; ph.p_filesz as usize];
//! // read responsibly
//! }
//! }
//! },
//! Err(_) => ()
//! }
//! }
//! ```
//!
//! This will properly access the underlying 32-bit or 64-bit binary automatically. Note that since
//! 32-bit binaries typically have shorter 32-bit values in some cases (specifically for addresses and pointer
//! values), these values are upcasted to u64/i64s when appropriate.
//!
//! See [goblin::elf::Elf](struct.Elf.html) for more information.
//!
//! You are still free to use the specific 32-bit or 64-bit versions by accessing them through `goblin::elf64`, etc., but you will have to parse and/or construct the various components yourself.
//! In other words, there is no unified 32/64-bit `Elf` struct.
//!
//! # Note
//! To use the automagic ELF datatype union parser, you _must_ enable/opt-in to the `elf64`, `elf32`, and
//! `endian_fd` features if you disable `default`.
#[macro_use]
mod gnu_hash;
// These are shareable values for the 32/64 bit implementations.
//
// They are publicly re-exported by the pub-using module
pub mod header;
pub mod program_header;
pub mod section_header;
pub mod compression_header;
#[macro_use]
pub mod sym;
pub mod dyn;
#[macro_use]
pub mod reloc;
pub mod note;
macro_rules! if_sylvan {
($($i:item)*) => ($(
#[cfg(all(feature = "elf32", feature = "elf64", feature = "endian_fd"))]
$i
)*)
}
if_sylvan! {
use scroll::{self, ctx, Pread, Endian};
use strtab::Strtab;
use error;
use container::{Container, Ctx};
use alloc::vec::Vec;
use core::cmp;
pub type Header = header::Header;
pub type ProgramHeader = program_header::ProgramHeader;
pub type SectionHeader = section_header::SectionHeader;
pub type Symtab<'a> = sym::Symtab<'a>;
pub type Sym = sym::Sym;
pub type Dyn = dyn::Dyn;
pub type Dynamic = dyn::Dynamic;
pub type Reloc = reloc::Reloc;
pub type RelocSection<'a> = reloc::RelocSection<'a>;
pub type ProgramHeaders = Vec<ProgramHeader>;
pub type SectionHeaders = Vec<SectionHeader>;
pub type ShdrIdx = usize;
#[derive(Debug)]
/// An ELF binary. The underlying data structures are read according to the headers byte order and container size (32 or 64).
pub struct Elf<'a> {
/// The ELF header, which provides a rudimentary index into the rest of the binary
pub header: Header,
/// The program headers; they primarily tell the kernel and the dynamic linker
/// how to load this binary
pub program_headers: ProgramHeaders,
/// The sections headers. These are strippable, never count on them being
/// here unless you're a static linker!
pub section_headers: SectionHeaders,
/// The section header string table
pub shdr_strtab: Strtab<'a>,
/// The string table for the dynamically accessible symbols
pub dynstrtab: Strtab<'a>,
/// The dynamically accessible symbols, i.e., exports, imports.
/// This is what the dynamic linker uses to dynamically load and link your binary,
/// or find imported symbols for binaries which dynamically link against your library
pub dynsyms: Symtab<'a>,
/// The debugging symbol table
pub syms: Symtab<'a>,
/// The string table for the symbol table
pub strtab: Strtab<'a>,
/// Contains dynamic linking information, with the _DYNAMIC array + a preprocessed DynamicInfo for that array
pub dynamic: Option<Dynamic>,
/// The dynamic relocation entries (strings, copy-data, etc.) with an addend
pub dynrelas: RelocSection<'a>,
/// The dynamic relocation entries without an addend
pub dynrels: RelocSection<'a>,
/// The plt relocation entries (procedure linkage table). For 32-bit binaries these are usually Rel (no addend)
pub pltrelocs: RelocSection<'a>,
/// Section relocations by section index (only present if this is a relocatable object file)
pub shdr_relocs: Vec<(ShdrIdx, RelocSection<'a>)>,
/// The binary's soname, if it has one
pub soname: Option<&'a str>,
/// The binary's program interpreter (e.g., dynamic linker), if it has one
pub interpreter: Option<&'a str>,
/// A list of this binary's dynamic libraries it uses, if there are any
pub libraries: Vec<&'a str>,
pub is_64: bool,
/// Whether this is a shared object or not
pub is_lib: bool,
/// The binaries entry point address, if it has one
pub entry: u64,
/// Whether the binary is little endian or not
pub little_endian: bool,
ctx: Ctx,
}
impl<'a> Elf<'a> {
/// Try to iterate notes in PT_NOTE program headers; returns `None` if there aren't any note headers in this binary
pub fn iter_note_headers(&self, data: &'a [u8]) -> Option<note::NoteIterator<'a>> {
let mut iters = vec![];
for phdr in &self.program_headers {
if phdr.p_type == program_header::PT_NOTE {
let offset = phdr.p_offset as usize;
let alignment = phdr.p_align as usize;
iters.push(note::NoteDataIterator {
data,
offset,
size: offset + phdr.p_filesz as usize,
ctx: (alignment, self.ctx)
});
}
}
if iters.is_empty() {
None
} else {
Some(note::NoteIterator {
iters: iters,
index: 0,
})
}
}
/// Try to iterate notes in SHT_NOTE sections; returns `None` if there aren't any note sections in this binary
///
/// If a section_name is given, only the section with the according name is iterated.
pub fn iter_note_sections(
&self,
data: &'a [u8],
section_name: Option<&str>,
) -> Option<note::NoteIterator<'a>> {
let mut iters = vec![];
for sect in &self.section_headers {
if sect.sh_type != section_header::SHT_NOTE {
continue;
}
if section_name.is_some() && !self.shdr_strtab
.get(sect.sh_name)
.map_or(false, |r| r.ok() == section_name) {
continue;
}
let offset = sect.sh_offset as usize;
let alignment = sect.sh_addralign as usize;
iters.push(note::NoteDataIterator {
data,
offset,
size: offset + sect.sh_size as usize,
ctx: (alignment, self.ctx)
});
}
if iters.is_empty() {
None
} else {
Some(note::NoteIterator {
iters: iters,
index: 0,
})
}
}
pub fn is_object_file(&self) -> bool {
self.header.e_type == header::ET_REL
}
/// Parses the contents of the byte stream in `bytes`, and maybe returns a unified binary
pub fn parse(bytes: &'a [u8]) -> error::Result<Self> {
let header = bytes.pread::<Header>(0)?;
let entry = header.e_entry as usize;
let is_lib = header.e_type == header::ET_DYN;
let is_lsb = header.e_ident[header::EI_DATA] == header::ELFDATA2LSB;
let endianness = scroll::Endian::from(is_lsb);
let class = header.e_ident[header::EI_CLASS];
if class != header::ELFCLASS64 && class != header::ELFCLASS32 {
return Err(error::Error::Malformed(format!("Unknown values in ELF ident header: class: {} endianness: {}",
class,
header.e_ident[header::EI_DATA])).into());
}
let is_64 = class == header::ELFCLASS64;
let container = if is_64 { Container::Big } else { Container::Little };
let ctx = Ctx::new(container, endianness);
let program_headers = ProgramHeader::parse(bytes, header.e_phoff as usize, header.e_phnum as usize, ctx)?;
let mut interpreter = None;
for ph in &program_headers {
if ph.p_type == program_header::PT_INTERP && ph.p_filesz != 0 {
let count = (ph.p_filesz - 1) as usize;
let offset = ph.p_offset as usize;
interpreter = Some(bytes.pread_with::<&str>(offset, ::scroll::ctx::StrCtx::Length(count))?);
}
}
let section_headers = SectionHeader::parse(bytes, header.e_shoff as usize, header.e_shnum as usize, ctx)?;
let get_strtab = |section_headers: &[SectionHeader], section_idx: usize| {
if section_idx >= section_headers.len() {
// FIXME: warn! here
Ok(Strtab::default())
} else {
let shdr = §ion_headers[section_idx];
shdr.check_size(bytes.len())?;
Strtab::parse(bytes, shdr.sh_offset as usize, shdr.sh_size as usize, 0x0)
}
};
let strtab_idx = header.e_shstrndx as usize;
let shdr_strtab = get_strtab(§ion_headers, strtab_idx)?;
let mut syms = Symtab::default();
let mut strtab = Strtab::default();
for shdr in §ion_headers {
if shdr.sh_type as u32 == section_header::SHT_SYMTAB {
let size = shdr.sh_entsize;
let count = if size == 0 { 0 } else { shdr.sh_size / size };
syms = Symtab::parse(bytes, shdr.sh_offset as usize, count as usize, ctx)?;
strtab = get_strtab(§ion_headers, shdr.sh_link as usize)?;
}
}
let mut soname = None;
let mut libraries = vec![];
let mut dynsyms = Symtab::default();
let mut dynrelas = RelocSection::default();
let mut dynrels = RelocSection::default();
let mut pltrelocs = RelocSection::default();
let mut dynstrtab = Strtab::default();
let dynamic = Dynamic::parse(bytes, &program_headers, ctx)?;
if let Some(ref dynamic) = dynamic {
let dyn_info = &dynamic.info;
dynstrtab = Strtab::parse(bytes,
dyn_info.strtab,
dyn_info.strsz,
0x0)?;
if dyn_info.soname != 0 {
// FIXME: warn! here
soname = match dynstrtab.get(dyn_info.soname) { Some(Ok(soname)) => Some(soname), _ => None };
}
if dyn_info.needed_count > 0 {
libraries = dynamic.get_libraries(&dynstrtab);
}
// parse the dynamic relocations
dynrelas = RelocSection::parse(bytes, dyn_info.rela, dyn_info.relasz, true, ctx)?;
dynrels = RelocSection::parse(bytes, dyn_info.rel, dyn_info.relsz, false, ctx)?;
let is_rela = dyn_info.pltrel as u64 == dyn::DT_RELA;
pltrelocs = RelocSection::parse(bytes, dyn_info.jmprel, dyn_info.pltrelsz, is_rela, ctx)?;
let mut num_syms = if let Some(gnu_hash) = dyn_info.gnu_hash {
gnu_hash_len(bytes, gnu_hash as usize, ctx)?
} else if let Some(hash) = dyn_info.hash {
hash_len(bytes, hash as usize, header.e_machine, ctx)?
} else {
0
};
let max_reloc_sym = dynrelas.iter()
.chain(dynrels.iter())
.chain(pltrelocs.iter())
.fold(0, |num, reloc| cmp::max(num, reloc.r_sym));
if max_reloc_sym != 0 {
num_syms = cmp::max(num_syms, max_reloc_sym + 1);
}
dynsyms = Symtab::parse(bytes, dyn_info.symtab, num_syms, ctx)?;
}
let mut shdr_relocs = vec![];
for (idx, section) in section_headers.iter().enumerate() {
let is_rela = section.sh_type == section_header::SHT_RELA;
if is_rela || section.sh_type == section_header::SHT_REL {
section.check_size(bytes.len())?;
let sh_relocs = RelocSection::parse(bytes, section.sh_offset as usize, section.sh_size as usize, is_rela, ctx)?;
shdr_relocs.push((idx, sh_relocs));
}
}
Ok(Elf {
header: header,
program_headers: program_headers,
section_headers: section_headers,
shdr_strtab: shdr_strtab,
dynamic: dynamic,
dynsyms: dynsyms,
dynstrtab: dynstrtab,
syms: syms,
strtab: strtab,
dynrelas: dynrelas,
dynrels: dynrels,
pltrelocs: pltrelocs,
shdr_relocs: shdr_relocs,
soname: soname,
interpreter: interpreter,
libraries: libraries,
is_64: is_64,
is_lib: is_lib,
entry: entry as u64,
little_endian: is_lsb,
ctx,
})
}
}
impl<'a> ctx::TryFromCtx<'a, (usize, Endian)> for Elf<'a> {
type Error = ::error::Error;
type Size = usize;
fn try_from_ctx(src: &'a [u8], (_, _): (usize, Endian)) -> Result<(Elf<'a>, Self::Size), Self::Error> {
let elf = Elf::parse(src)?;
Ok((elf, src.len()))
}
}
fn gnu_hash_len(bytes: &[u8], offset: usize, ctx: Ctx) -> error::Result<usize> {
let buckets_num = bytes.pread_with::<u32>(offset, ctx.le)? as usize;
let min_chain = bytes.pread_with::<u32>(offset + 4, ctx.le)? as usize;
let bloom_size = bytes.pread_with::<u32>(offset + 8, ctx.le)? as usize;
// We could handle min_chain==0 if we really had to, but it shouldn't happen.
if buckets_num == 0 || min_chain == 0 || bloom_size == 0 {
return Err(error::Error::Malformed(format!("Invalid DT_GNU_HASH: buckets_num={} min_chain={} bloom_size={}",
buckets_num, min_chain, bloom_size)));
}
// Find the last bucket.
let buckets_offset = offset + 16 + bloom_size * if ctx.container.is_big() { 8 } else { 4 };
let mut max_chain = 0;
for bucket in 0..buckets_num {
let chain = bytes.pread_with::<u32>(buckets_offset + bucket * 4, ctx.le)? as usize;
if max_chain < chain {
max_chain = chain;
}
}
if max_chain < min_chain {
return Ok(0);
}
// Find the last chain within the bucket.
let mut chain_offset = buckets_offset + buckets_num * 4 + (max_chain - min_chain) * 4;
loop {
let hash = bytes.pread_with::<u32>(chain_offset, ctx.le)?;
max_chain += 1;
chain_offset += 4;
if hash & 1 != 0 {
return Ok(max_chain);
}
}
}
fn hash_len(bytes: &[u8], offset: usize, machine: u16, ctx: Ctx) -> error::Result<usize> {
// Based on readelf code.
let nchain = if (machine == header::EM_FAKE_ALPHA || machine == header::EM_S390) && ctx.container.is_big() {
bytes.pread_with::<u64>(offset + 4, ctx.le)? as usize
} else {
bytes.pread_with::<u32>(offset + 4, ctx.le)? as usize
};
Ok(nchain)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_crt1_64bit() {
let crt1: Vec<u8> = include!("../../etc/crt1.rs");
match Elf::parse(&crt1) {
Ok (binary) => {
assert!(binary.is_64);
assert!(!binary.is_lib);
assert_eq!(binary.entry, 0);
assert!(binary.syms.get(1000).is_none());
assert!(binary.syms.get(5).is_some());
let syms = binary.syms.to_vec();
let mut i = 0;
assert!(binary.section_headers.len() != 0);
for sym in &syms {
if i == 11 {
let symtab = binary.strtab;
println!("sym: {:?}", &sym);
assert_eq!(&symtab[sym.st_name], "_start");
break;
}
i += 1;
}
assert!(syms.len() != 0);
},
Err (err) => {
println!("failed: {}", err);
assert!(false)
}
}
}
#[test]
fn parse_crt1_32bit() {
let crt1: Vec<u8> = include!("../../etc/crt132.rs");
match Elf::parse(&crt1) {
Ok (binary) => {
assert!(!binary.is_64);
assert!(!binary.is_lib);
assert_eq!(binary.entry, 0);
assert!(binary.syms.get(1000).is_none());
assert!(binary.syms.get(5).is_some());
let syms = binary.syms.to_vec();
let mut i = 0;
assert!(binary.section_headers.len() != 0);
for sym in &syms {
if i == 11 {
let symtab = binary.strtab;
println!("sym: {:?}", &sym);
assert_eq!(&symtab[sym.st_name], "__libc_csu_fini");
break;
}
i += 1;
}
assert!(syms.len() != 0);
},
Err (err) => {
println!("failed: {}", err);
assert!(false)
}
}
}
}
|
use std::path::Path;
use std::fs::File;
use image::{DynamicImage, GenericImage, ImageFormat};
use image;
use bitreader::BitReader;
pub struct CoverImage {
cover_image: DynamicImage,
}
impl CoverImage {
pub fn new(file_path: &str) -> CoverImage {
CoverImage { cover_image: image::open(&Path::new(&file_path)).unwrap() }
}
pub fn encode_with(&mut self, message: &str) {
let bit_vector = get_bit_vec(message);
encode(&mut self.cover_image, &bit_vector);
let ref mut fout = File::create(&Path::new("/home/hugh/Pictures/yurt.png")).unwrap();
let _ = self.cover_image.save(fout, ImageFormat::PNG).unwrap();
}
}
// Get a Vector of Bits to Encode the Image with
fn get_bit_vec(message: &str) -> Vec<u8> {
let mut bit_vector = Vec::new();
let mut reader = BitReader::new(&message.as_bytes());
// Multiplied by 8 because it's a Vec of bits not bytes
for _ in 0..message.len() * 8 {
bit_vector.push(reader.read_u8(1).unwrap());
}
bit_vector
}
// Encode the Image with the Vector of Bits
fn encode(c_image: &mut DynamicImage, bit_vec: &Vec<u8>) {
let mut image_blocks = Vec::new();
let mut img = c_image.as_mut_rgb8().unwrap();
let (width, height) = c_image.dimensions();
// 9 is used here because each 8 x 8 block will have a dct for each colour and each colour will
// hold 3 bits of information.
let max_iterations = if bit_vec.len() / 9 == 0 { bit_vec.len() / 9 } else { (bit_vec.len() / 9 ) + 1 };
let image_blocks = tile_image(img, max_iterations);
}
fn tile_image(image: &mut DynamicImage, max_iterations: u32) -> Vec<Pixel> {
let mut blocks = Vec::new();
for block_number in 0..max_iterations {
let index = block_number * 8;
for row in 0..8 {
for column in 0..8 {
blocks.push(image.get_pixel_mut(row, column + index));
}
}
}
blocks
}
/**************************************************************************************************
* *
* Tests *
* *
**************************************************************************************************/
#[cfg(test)]
#[test]
fn test_get_bit_vec_len() {
let bits = get_bit_vec("h");
assert_eq!(8, bits.len());
}
#[cfg(test)]
#[test]
fn test_get_bit_vec() {
let test_bits = vec![0, 1, 1, 0, 1, 0, 0, 0];
let bits = get_bit_vec("h");
assert_eq!(bits, test_bits);
}
#[cfg(test)]
#[test]
fn test_encode_red_channel_lsb_set() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
let pixel = img.get_pixel(0, 0);
assert_eq!(pixel.data[0] % 2, bit_vec[0]);
}
#[cfg(test)]
#[test]
fn test_encode_green_channel_lsb_set() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
let pixel = img.get_pixel(0, 0);
assert_eq!(pixel.data[1] % 2, bit_vec[1]);
}
#[cfg(test)]
#[test]
fn test_encode_blue_channel_lsb_set() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
let pixel = img.get_pixel(0, 0);
assert_eq!(pixel.data[2] % 2 as u8, bit_vec[2]);
}
#[cfg(test)]
#[test]
fn test_full_byte_is_encoded() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let mut encoded_bit_vec = Vec::new();
let mut count = 0;
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
'outer: for x_co_ord in 0..3 {
let pixel = img.get_pixel(x_co_ord, 0);
for channel in 0..3 {
if count >= 8 {
break 'outer;
}
encoded_bit_vec.push(pixel.data[channel] % 2);
count += 1;
}
}
assert_eq!(encoded_bit_vec, bit_vec);
}
Image can be tiled
use std::path::Path;
use std::fs::File;
use image::{DynamicImage, GenericImage, ImageFormat};
use image;
use bitreader::BitReader;
pub struct CoverImage {
cover_image: DynamicImage
}
impl CoverImage {
pub fn new(file_path: &str) -> CoverImage {
CoverImage { cover_image: image::open(&Path::new(&file_path)).unwrap() }
}
pub fn encode_with(&self, message: &str) {
let bit_vector = get_bit_vec(message);
encode(&self.cover_image, &bit_vector);
let ref mut fout = File::create(&Path::new("/home/hugh/Pictures/yurt.png")).unwrap();
let _ = self.cover_image.save(fout, ImageFormat::PNG).unwrap();
}
}
// Get a Vector of Bits to Encode the Image with
fn get_bit_vec(message: &str) -> Vec<u8> {
let mut bit_vector = Vec::new();
let mut reader = BitReader::new(&message.as_bytes());
// Multiplied by 8 because it's a Vec of bits not bytes
for _ in 0..message.len() * 8 {
bit_vector.push(reader.read_u8(1).unwrap());
}
bit_vector
}
// Encode the image with the Vector of Bits
fn encode(c_image: &DynamicImage, bit_vec: &Vec<u8>) {
let tiled_image = tile_image(c_image);
}
fn tile_image(c_image: &DynamicImage) -> Vec<image::Rgba<u8>> {
let (width, height) = c_image.dimensions();
let mut image_blocks = Vec::new();
for row_index in 0..(height / 8) as u32 {
for col_index in 0..(width / 8) as u32 {
for row in 0..8 {
for column in 0..8 {
/*if row_index < 1 {
println!("{}. . .{}. . .{}. . .{}", column, row, col_index, row_index);
}*/
image_blocks.push(c_image.get_pixel(column + (col_index * 8), row + (row_index * 8)));
}
}
}
}
image_blocks
}
/**************************************************************************************************
* *
* Tests *
* *
**************************************************************************************************/
#[cfg(test)]
#[test]
fn test_get_bit_vec_len() {
let bits = get_bit_vec("h");
assert_eq!(8, bits.len());
}
#[cfg(test)]
#[test]
fn test_get_bit_vec() {
let test_bits = vec![0, 1, 1, 0, 1, 0, 0, 0];
let bits = get_bit_vec("h");
assert_eq!(bits, test_bits);
}
#[cfg(test)]
#[test]
fn test_encode_red_channel_lsb_set() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
let pixel = img.get_pixel(0, 0);
assert_eq!(pixel.data[0] % 2, bit_vec[0]);
}
#[cfg(test)]
#[test]
fn test_encode_green_channel_lsb_set() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
let pixel = img.get_pixel(0, 0);
assert_eq!(pixel.data[1] % 2, bit_vec[1]);
}
#[cfg(test)]
#[test]
fn test_encode_blue_channel_lsb_set() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
let pixel = img.get_pixel(0, 0);
assert_eq!(pixel.data[2] % 2 as u8, bit_vec[2]);
}
#[cfg(test)]
#[test]
fn test_full_byte_is_encoded() {
let mut img = image::open(&Path::new("/home/hugh/Pictures/scenery.jpg")).unwrap();
let mut encoded_bit_vec = Vec::new();
let mut count = 0;
let bit_vec = vec![0, 1, 1, 0, 1, 0, 0, 0];
encode(&mut img, &bit_vec);
'outer: for x_co_ord in 0..3 {
let pixel = img.get_pixel(x_co_ord, 0);
for channel in 0..3 {
if count >= 8 {
break 'outer;
}
encoded_bit_vec.push(pixel.data[channel] % 2);
count += 1;
}
}
assert_eq!(encoded_bit_vec, bit_vec);
}
#[cfg(test)]
#[test]
fn test_tile_image_length() {
let img = image::open(&Path::new("/home/hugh/Pictures/colour.jpg")).unwrap();
let vec = tile_image(&img);
assert_eq!(vec.len(), 91200);
}
#[cfg(test)]
#[test]
fn test_tile_image_first_pixel() {
let img = image::open(&Path::new("/home/hugh/Pictures/project.jpg")).unwrap();
let pixel = img.get_pixel(0, 0);
let vec = tile_image(&img);
assert_eq!(vec[0], pixel);
}
#[cfg(test)]
#[test]
fn test_tile_image_random_pixel1() {
let img = image::open(&Path::new("/home/hugh/Pictures/project.jpg")).unwrap();
let pixel = img.get_pixel(15, 7);
let vec = tile_image(&img);
assert_eq!(vec[127], pixel);
}
#[cfg(test)]
#[test]
fn test_tile_image_random_pixel2() {
let img = image::open(&Path::new("/home/hugh/Pictures/project.jpg")).unwrap();
let pixel = img.get_pixel(40, 16);
let vec = tile_image(&img);
assert_eq!(vec[6720], pixel);
}
#[cfg(test)]
#[test]
fn test_tile_image_random_pixel3() {
let img = image::open(&Path::new("/home/hugh/Pictures/project.jpg")).unwrap();
let pixel = img.get_pixel(15, 7);
let vec = tile_image(&img);
assert_eq!(vec[127], pixel);
}
#[cfg(test)]
#[test]
fn test_tile_image_last_pixel() {
let img = image::open(&Path::new("/home/hugh/Pictures/project.jpg")).unwrap();
let pixel = img.get_pixel(399, 399);
let vec = tile_image(&img);
assert_eq!(vec[159999], pixel);
}
|
// Copyright(C) 2016 Chris Liebert
extern crate glium;
extern crate nalgebra;
extern crate quick3d;
use glium::glutin;
use glium::glutin::Event;
use glium::glutin::ElementState;
use glium::glutin::VirtualKeyCode;
use glium::DisplayBuild;
use nalgebra::Matrix4;
use nalgebra::identity;
use quick3d::common::{Camera, Mesh, Scene};
use quick3d::loader::DBLoader;
use quick3d::renderer;
fn main() {
let screen_width = 400;
let screen_height = 300;
let db_file: &str = "test.db";
let dbloader: DBLoader = DBLoader::new(db_file);
let shader_dbloader: DBLoader = DBLoader::new("shaders.db");
let scene: Scene = dbloader.load_scene();
let display = glutin::WindowBuilder::new()
//.resizable()
//.with_vsync()
.with_gl_debug_flag(true)
.with_visibility(false) // Window is shown when scene finishes loading.
.with_dimensions(screen_width, screen_height)
.build_glium()
.unwrap();
let camera: Camera = Camera::new(screen_width as f32, screen_height as f32);
let renderer = renderer::Renderer::new(&display, scene);
let shader_program = renderer.create_shader_program("default", &shader_dbloader, &display);
let mut running = true;
// Show the window once the data is loaded
match display.get_window() {
Some(x) => x.show(),
None => {
running = false;
println!("Error retrieving window");
}
}
// The torus will be movable in the scene
let torus: &Mesh = renderer.get_mesh("Torus").unwrap();
let mut torus_height = 0.0f32;
while running {
renderer.render(&display, &shader_program, &camera);
// Check for close events
for event in display.poll_events() {
match event {
Event::Closed => running = false,
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::Escape)) => {
running = false
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::W)) => {
torus_height += 0.05f32;
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::S)) => {
torus_height -= 0.05f32;
}
_ => (),
}
}
// Move the torus based on changes from keyboard input
// Get existing matrix
let mut matrix: Matrix4<f32> = *torus.matrix.borrow();
matrix.m24 = torus_height;
// Mutate the matrix
*torus.matrix.borrow_mut() = matrix;
}
}
Added interactive camera to Rust example
// Copyright(C) 2016 Chris Liebert
extern crate glium;
extern crate nalgebra;
extern crate quick3d;
use glium::glutin;
use glium::glutin::{ElementState,Event,MouseButton,VirtualKeyCode};
use glium::DisplayBuild;
use nalgebra::Matrix4;
use quick3d::common::{Camera, Mesh, Scene};
use quick3d::loader::DBLoader;
use quick3d::renderer;
fn main() {
let screen_width = 1200;
let screen_height = 800;
let db_file: &str = "test.db";
let dbloader: DBLoader = DBLoader::new(db_file);
let shader_dbloader: DBLoader = DBLoader::new("shaders.db");
let scene: Scene = dbloader.load_scene();
let display = glutin::WindowBuilder::new()
//.resizable()
//.with_vsync()
.with_depth_buffer(24)
.with_title("Rust Window")
.with_gl_debug_flag(true)
.with_visibility(false) // Window is shown when scene finishes loading.
.with_dimensions(screen_width, screen_height)
.build_glium()
.unwrap();
let camera: Camera = Camera::new(screen_width as f32, screen_height as f32);
let renderer = renderer::Renderer::new(&display, scene);
let shader_program = renderer.create_shader_program("default", &shader_dbloader, &display);
let mut running: bool = true;
// Show the window once the data is loaded
match display.get_window() {
Some(x) => x.show(),
None => {
running = false;
panic!("Error retrieving window");
}
}
// The torus will be movable in the scene
let torus: &Mesh = renderer.get_mesh("Torus").unwrap();
let mut torus_x = 0.0f32;
let mut torus_y = 0.0f32;
let mut torus_vertical_speed = 0.0f32;
let mut torus_horizontal_speed = 0.0f32;
let mut left_button_pressed = false;
let mut mouse_last_x: i32 = 0;
let mut mouse_last_y: i32 = 0;
let mut mouse_dx: i32 = 0;
let mut mouse_dy: i32 = 0;
let mut camera_forward_speed = 0.0f32;
let mut camera_left_speed = 0.0f32;
while running {
camera.update();
renderer.render(&display, &shader_program, &camera);
// Check for close events
for event in display.poll_events() {
match event {
Event::Closed => running = false,
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::Escape)) => {
running = false
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::W)) => {
camera.move_forward(1.0);
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::A)) => {
camera.move_left(1.0);
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::S)) => {
camera.move_backward(1.0);
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::D)) => {
camera.move_right(1.0);
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::I)) => {
torus_vertical_speed = 0.001f32;
}
Event::KeyboardInput(ElementState::Released, _, Some(VirtualKeyCode::I)) => {
torus_vertical_speed = 0.0f32;
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::J)) => {
torus_horizontal_speed = -0.001f32;
}
Event::KeyboardInput(ElementState::Released, _, Some(VirtualKeyCode::J)) => {
torus_horizontal_speed = 0.0f32;
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::K)) => {
torus_vertical_speed = -0.001f32;
}
Event::KeyboardInput(ElementState::Released, _, Some(VirtualKeyCode::K)) => {
torus_vertical_speed = 0.0f32;
}
Event::KeyboardInput(ElementState::Pressed, _, Some(VirtualKeyCode::L)) => {
torus_horizontal_speed = 0.001f32;
}
Event::KeyboardInput(ElementState::Released, _, Some(VirtualKeyCode::L)) => {
torus_horizontal_speed = 0.0f32;
}
Event::MouseInput(ElementState::Pressed, MouseButton::Left) => {
left_button_pressed = true;
}
Event::MouseInput(ElementState::Released, MouseButton::Left) => {
left_button_pressed = false;
}
Event::MouseMoved(x, y) => {
mouse_dx = mouse_last_x - x;
mouse_dy = mouse_last_y - y;
if left_button_pressed {
// Rotate the camera if the left mouse button is pressed
camera.aim(mouse_dx as f64, mouse_dy as f64);
}
mouse_last_x = x;
mouse_last_y = y;
}
_ => (),
}
}
// Move the torus based on changes from keyboard input
// Get existing matrix
let mut matrix: Matrix4<f32> = *torus.matrix.borrow();
torus_x += torus_horizontal_speed;
torus_y += torus_vertical_speed;
matrix.m14 = torus_x;
matrix.m24 = torus_y;
// Mutate the matrix
*torus.matrix.borrow_mut() = matrix;
}
}
|
//! Interval-based annotation features.
use bio::utils::{Interval, IntervalError};
pub trait NamedInterval: Sized {
/// Underlying interval struct.
fn interval(&self) -> &Interval<u64>; // TODO: Generalize over interval types.
/// Name of the interval.
fn name(&self) -> Option<&str>;
/// Name setter that returns the implementor itself.
///
/// This function is expected to mutate the implementing type.
fn with_name<T: Into<String>>(self, name: T) -> Self;
/// Coordinate setter that returns the implementor itself.
///
/// This function is expected to mutate the implementing type.
fn with_coords(self, start: u64, end: u64) -> Result<Self, IntervalError>;
/// Start coordinate of the interval.
fn start(&self) -> u64 {
self.interval().start
}
/// End coordinate of the interval.
fn end(&self) -> u64 {
self.interval().end
}
/// The number of bases covered by the interval.
fn span(&self) -> u64 {
self.end() - self.start()
}
/// Whether two intervals have an overlap or not.
fn overlaps(&self, other: &Self) -> bool {
self.start() < other.end() && other.start() < self.end()
}
/// Whether one interval completely contains the other.
fn envelops(&self, other: &Self) -> bool {
self.start() <= other.start() && self.end() >= other.end()
}
/// Whether two intervals cover a contiguous region without any overlaps.
fn is_adjacent(&self, other: &Self) -> bool {
self.end() == other.start() || self.start() == other.end()
}
}
/// Macro for default function implementations of interval types.
macro_rules! impl_ninterval {
($struct_ty:ty) => (
impl NamedInterval for $struct_ty {
/// Name of the interval.
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|n| n.as_str())
}
fn with_name<T>(mut self, name: T) -> $struct_ty
where T: Into<String>
{
self.name = Some(name.into());
self
}
fn interval(&self) -> &Interval<u64> {
&self.interval
}
fn with_coords(mut self, start: u64, end: u64) -> Result<$struct_ty, IntervalError> {
Interval::new(start..end)
.map(|iv| {
self.interval = iv;
self
})
}
}
);
}
/// Default implementation of the `Interval` trait.
///
/// This struct also provides static methods for creating exons, transcripts, and genes.
#[derive(Debug)]
pub struct Feature {
interval: Interval<u64>,
name: Option<String>,
}
impl Default for Feature {
fn default() -> Feature {
Feature { interval: Interval::new(0..0).unwrap(), name: None }
}
}
impl_ninterval!(Feature);
#[cfg(test)]
mod test_feature {
use super::*;
#[test]
fn default() {
let fx = Feature::default();
assert_eq!(fx.start(), 0);
assert_eq!(fx.end(), 0);
assert_eq!(fx.name(), None);
}
#[test]
fn with_name() {
let fx1 = Feature::default()
.with_name("fx1");
assert_eq!(fx1.start(), 0);
assert_eq!(fx1.end(), 0);
assert_eq!(fx1.name(), Some("fx1"));
let fx2 = Feature::default()
.with_name("fx2".to_owned());
assert_eq!(fx2.start(), 0);
assert_eq!(fx2.end(), 0);
assert_eq!(fx2.name(), Some("fx2"));
}
#[test]
fn with_coords() {
let fxm = Feature::default()
.with_coords(1, 3);
assert!(fxm.is_ok());
let fx = fxm.unwrap();
assert_eq!(fx.start(), 1);
assert_eq!(fx.end(), 3);
assert_eq!(fx.name(), None);
}
#[test]
fn with_coords_err() {
let fxm = Feature::default()
.with_coords(3, 1);
assert!(fxm.is_err());
}
#[test]
fn with_multiples() {
let fxm = Feature::default()
.with_coords(20, 30)
.map(|f| f.with_name("fx"));
assert!(fxm.is_ok());
let fx = fxm.unwrap();
assert_eq!(fx.start(), 20);
assert_eq!(fx.end(), 30);
assert_eq!(fx.name(), Some("fx"));
}
fn make_feature(start: u64, end: u64) -> Feature {
Feature::default().with_coords(start, end).unwrap()
}
#[test]
fn span() {
let fx = make_feature(0, 15);
assert_eq!(fx.span(), 15);
}
#[test]
fn overlaps() {
let fx1 = make_feature(100, 115);
let fx2 = make_feature(110, 120);
assert!(fx1.overlaps(&fx2));
assert!(fx1.overlaps(&fx2));
let fx3 = make_feature(115, 120);
assert!(!fx1.overlaps(&fx3));
assert!(!fx3.overlaps(&fx1));
let fx4 = make_feature(90, 100);
assert!(!fx1.overlaps(&fx4));
assert!(!fx4.overlaps(&fx1));
let fx5 = make_feature(200, 300);
assert!(!fx1.overlaps(&fx5));
assert!(!fx5.overlaps(&fx1));
}
#[test]
fn envelops() {
let fx1 = make_feature(100, 120);
let fx2 = make_feature(105, 115);
assert!(fx1.envelops(&fx2));
assert!(!fx2.envelops(&fx1));
let fx3 = make_feature(100, 105);
assert!(fx1.envelops(&fx3));
assert!(!fx3.envelops(&fx1));
let fx4 = make_feature(115, 120);
assert!(fx1.envelops(&fx4));
assert!(!fx4.envelops(&fx1));
let fx5 = make_feature(90, 105);
assert!(!fx1.envelops(&fx5));
assert!(!fx5.envelops(&fx1));
let fx6 = make_feature(115, 130);
assert!(!fx1.envelops(&fx5));
assert!(!fx6.envelops(&fx1));
let fx7 = make_feature(80, 90);
assert!(!fx1.envelops(&fx7));
assert!(!fx7.envelops(&fx1));
}
#[test]
fn is_adjacent() {
let fx1 = make_feature(100, 120);
let fx2 = make_feature(90, 100);
assert!(fx1.is_adjacent(&fx2));
assert!(fx2.is_adjacent(&fx1));
let fx3 = make_feature(120, 130);
assert!(fx1.is_adjacent(&fx3));
assert!(fx3.is_adjacent(&fx1));
let fx4 = make_feature(90, 99);
assert!(!fx1.is_adjacent(&fx4));
assert!(!fx4.is_adjacent(&fx1));
let fx5 = make_feature(119, 130);
assert!(!fx1.is_adjacent(&fx5));
assert!(!fx5.is_adjacent(&fx1));
let fx6 = make_feature(100, 110);
assert!(!fx1.is_adjacent(&fx6));
assert!(!fx6.is_adjacent(&fx1));
let fx7 = make_feature(110, 120);
assert!(!fx1.is_adjacent(&fx7));
assert!(!fx7.is_adjacent(&fx1));
}
}
Implement genes and transcripts with interval
//! Interval-based annotation features.
use std::collections::HashMap;
use bio::io::Strand;
use bio::utils::{Interval, IntervalError};
pub trait NamedInterval: Sized {
/// Underlying interval struct.
fn interval(&self) -> &Interval<u64>; // TODO: Generalize over interval types.
/// Name of the interval.
fn name(&self) -> Option<&str>;
/// Name setter that returns the implementor itself.
///
/// This function is expected to mutate the implementing type.
fn with_name<T: Into<String>>(self, name: T) -> Self;
/// Coordinate setter that returns the implementor itself.
///
/// This function is expected to mutate the implementing type.
fn with_coords(self, start: u64, end: u64) -> Result<Self, IntervalError>;
/// Start coordinate of the interval.
fn start(&self) -> u64 {
self.interval().start
}
/// End coordinate of the interval.
fn end(&self) -> u64 {
self.interval().end
}
/// The number of bases covered by the interval.
fn span(&self) -> u64 {
self.end() - self.start()
}
/// Whether two intervals have an overlap or not.
fn overlaps(&self, other: &Self) -> bool {
self.start() < other.end() && other.start() < self.end()
}
/// Whether one interval completely contains the other.
fn envelops(&self, other: &Self) -> bool {
self.start() <= other.start() && self.end() >= other.end()
}
/// Whether two intervals cover a contiguous region without any overlaps.
fn is_adjacent(&self, other: &Self) -> bool {
self.end() == other.start() || self.start() == other.end()
}
}
/// Macro for default function implementations of interval types.
macro_rules! impl_ninterval {
($struct_ty:ty) => (
impl NamedInterval for $struct_ty {
/// Name of the interval.
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|n| n.as_str())
}
fn with_name<T>(mut self, name: T) -> $struct_ty
where T: Into<String>
{
self.name = Some(name.into());
self
}
fn interval(&self) -> &Interval<u64> {
&self.interval
}
fn with_coords(mut self, start: u64, end: u64) -> Result<$struct_ty, IntervalError> {
Interval::new(start..end)
.map(|iv| {
self.interval = iv;
self
})
}
}
);
}
/// Default implementation of the `Interval` trait.
///
/// This struct also provides static methods for creating exons, transcripts, and genes.
#[derive(Debug)]
pub struct Feature {
interval: Interval<u64>,
name: Option<String>,
}
impl Default for Feature {
fn default() -> Feature {
Feature { interval: Interval::new(0..0).unwrap(), name: None }
}
}
impl Feature {
/// Creates a gene interval with default values.
///
/// A gene interval is a container for transcript intervals.
///
/// # Examples
///
/// ```
/// let gene = Feature::gene();
///
/// assert_eq!(gene.transcript().len(), 0);
/// assert_eq!(gene.start(), 0);
/// assert_eq!(gene.end(), 0);
/// assert_eq!(gene.name(), None);
/// ```
pub fn gene() -> Gene {
Gene::default()
}
/// Creates a transcript interval with default values.
///
/// A transcript interval is a container for exon intervals.
///
/// # Examples
///
/// ```
/// use bio::io::Strand;
///
/// let transcript = Feature::transcript();
///
/// assert_eq!(transcript.exons().len(), 0);
/// assert_eq!(transcript.strand(), &Strand::Unknown)
/// assert_eq!(transcript.start(), 0);
/// assert_eq!(transcript.end(), 0);
/// assert_eq!(transcript.name(), None);
/// ```
pub fn transcript() -> Transcript {
Transcript::default()
}
/// Creates an exon interval with default values.
///
/// # Examples
///
/// ```
/// let exon = Feature::exon();
///
/// assert_eq!(exon.start(), 0);
/// assert_eq!(exon.end(), 0);
/// assert_eq!(exon.name(), None);
/// ```
pub fn exon() -> Exon {
Exon::default()
}
}
impl_ninterval!(Feature);
/// Exon annotation.
#[derive(Debug)]
pub struct Exon {
interval: Interval<u64>,
name: Option<String>,
}
impl Default for Exon {
fn default() -> Exon {
Exon { interval: Interval::new(0..0).unwrap(), name: None }
}
}
impl_ninterval!(Exon);
/// Transcript annotation.
#[derive(Debug)]
pub struct Transcript {
interval: Interval<u64>,
name: Option<String>,
strand: Strand,
cds_start: Option<u64>,
cds_end: Option<u64>,
exons: Vec<Exon>,
}
impl Default for Transcript {
fn default() -> Transcript {
Transcript {
interval: Interval::new(0..0).unwrap(),
name: None,
strand: Strand::Unknown,
cds_start: None,
cds_end: None,
exons: Vec::new(),
}
}
}
impl_ninterval!(Transcript);
impl Transcript {
pub fn strand(&self) -> &Strand {
&self.strand
}
pub fn with_strand(mut self, strand: Strand) -> Transcript {
self.strand = strand;
self
}
pub fn cds_start(&self) -> Option<u64> {
self.cds_start
}
pub fn with_cds_start(mut self, cds_start: u64) -> Transcript {
self.cds_start = Some(cds_start);
self
}
pub fn cds_end(&self) -> Option<u64> {
self.cds_end
}
pub fn with_cds_end(mut self, cds_end: u64) -> Transcript {
self.cds_end = Some(cds_end);
self
}
pub fn exons(&self) -> &Vec<Exon> {
&self.exons
}
pub fn with_exons(mut self, exons: Vec<Exon>) -> Transcript {
self.exons = exons;
self
}
pub fn insert_exon(&mut self, exon: Exon) {
self.exons.push(exon);
}
}
/// Gene annotation.
#[derive(Debug)]
pub struct Gene {
interval: Interval<u64>,
name: Option<String>,
transcripts: HashMap<String, Transcript>,
}
impl Default for Gene {
fn default() -> Gene {
Gene { interval: Interval::new(0..0).unwrap(),
name: None,
transcripts: HashMap::new(),
}
}
}
impl_ninterval!(Gene);
impl Gene {
pub fn transcripts(&self) -> &HashMap<String, Transcript> {
&self.transcripts
}
}
#[cfg(test)]
mod test_feature {
use super::*;
#[test]
fn default() {
let fx = Feature::default();
assert_eq!(fx.start(), 0);
assert_eq!(fx.end(), 0);
assert_eq!(fx.name(), None);
}
#[test]
fn with_name() {
let fx1 = Feature::default()
.with_name("fx1");
assert_eq!(fx1.start(), 0);
assert_eq!(fx1.end(), 0);
assert_eq!(fx1.name(), Some("fx1"));
let fx2 = Feature::default()
.with_name("fx2".to_owned());
assert_eq!(fx2.start(), 0);
assert_eq!(fx2.end(), 0);
assert_eq!(fx2.name(), Some("fx2"));
}
#[test]
fn with_coords() {
let fxm = Feature::default()
.with_coords(1, 3);
assert!(fxm.is_ok());
let fx = fxm.unwrap();
assert_eq!(fx.start(), 1);
assert_eq!(fx.end(), 3);
assert_eq!(fx.name(), None);
}
#[test]
fn with_coords_err() {
let fxm = Feature::default()
.with_coords(3, 1);
assert!(fxm.is_err());
}
#[test]
fn with_multiples() {
let fxm = Feature::default()
.with_coords(20, 30)
.map(|f| f.with_name("fx"));
assert!(fxm.is_ok());
let fx = fxm.unwrap();
assert_eq!(fx.start(), 20);
assert_eq!(fx.end(), 30);
assert_eq!(fx.name(), Some("fx"));
}
fn make_feature(start: u64, end: u64) -> Feature {
Feature::default().with_coords(start, end).unwrap()
}
#[test]
fn span() {
let fx = make_feature(0, 15);
assert_eq!(fx.span(), 15);
}
#[test]
fn overlaps() {
let fx1 = make_feature(100, 115);
let fx2 = make_feature(110, 120);
assert!(fx1.overlaps(&fx2));
assert!(fx1.overlaps(&fx2));
let fx3 = make_feature(115, 120);
assert!(!fx1.overlaps(&fx3));
assert!(!fx3.overlaps(&fx1));
let fx4 = make_feature(90, 100);
assert!(!fx1.overlaps(&fx4));
assert!(!fx4.overlaps(&fx1));
let fx5 = make_feature(200, 300);
assert!(!fx1.overlaps(&fx5));
assert!(!fx5.overlaps(&fx1));
}
#[test]
fn envelops() {
let fx1 = make_feature(100, 120);
let fx2 = make_feature(105, 115);
assert!(fx1.envelops(&fx2));
assert!(!fx2.envelops(&fx1));
let fx3 = make_feature(100, 105);
assert!(fx1.envelops(&fx3));
assert!(!fx3.envelops(&fx1));
let fx4 = make_feature(115, 120);
assert!(fx1.envelops(&fx4));
assert!(!fx4.envelops(&fx1));
let fx5 = make_feature(90, 105);
assert!(!fx1.envelops(&fx5));
assert!(!fx5.envelops(&fx1));
let fx6 = make_feature(115, 130);
assert!(!fx1.envelops(&fx5));
assert!(!fx6.envelops(&fx1));
let fx7 = make_feature(80, 90);
assert!(!fx1.envelops(&fx7));
assert!(!fx7.envelops(&fx1));
}
#[test]
fn is_adjacent() {
let fx1 = make_feature(100, 120);
let fx2 = make_feature(90, 100);
assert!(fx1.is_adjacent(&fx2));
assert!(fx2.is_adjacent(&fx1));
let fx3 = make_feature(120, 130);
assert!(fx1.is_adjacent(&fx3));
assert!(fx3.is_adjacent(&fx1));
let fx4 = make_feature(90, 99);
assert!(!fx1.is_adjacent(&fx4));
assert!(!fx4.is_adjacent(&fx1));
let fx5 = make_feature(119, 130);
assert!(!fx1.is_adjacent(&fx5));
assert!(!fx5.is_adjacent(&fx1));
let fx6 = make_feature(100, 110);
assert!(!fx1.is_adjacent(&fx6));
assert!(!fx6.is_adjacent(&fx1));
let fx7 = make_feature(110, 120);
assert!(!fx1.is_adjacent(&fx7));
assert!(!fx7.is_adjacent(&fx1));
}
}
#[cfg(test)]
mod test_exon {
use super::*;
#[test]
fn default() {
let exon = Feature::exon();
assert_eq!(exon.start(), 0);
assert_eq!(exon.end(), 0);
assert_eq!(exon.name(), None);
}
}
#[cfg(test)]
mod test_transcript {
use super::*;
#[test]
fn default() {
let trx = Feature::transcript();
assert_eq!(trx.start(), 0);
assert_eq!(trx.end(), 0);
assert_eq!(trx.name(), None);
assert_eq!(trx.strand(), &Strand::Unknown);
assert_eq!(trx.cds_start(), None);
assert_eq!(trx.cds_end(), None);
assert_eq!(trx.exons().len(), 0);
}
#[test]
fn with_strand() {
let trx = Feature::transcript()
.with_strand(Strand::Forward);
assert_eq!(trx.strand(), &Strand::Forward);
}
#[test]
fn with_cds_start() {
let trx = Feature::transcript()
.with_cds_start(20);
assert_eq!(trx.cds_start(), Some(20));
}
#[test]
fn with_cds_end() {
let trx = Feature::transcript()
.with_cds_end(40);
assert_eq!(trx.cds_end(), Some(40))
}
fn make_exon<T: Into<String>>(start: u64, end: u64, name: T) -> Exon {
Feature::exon()
.with_name(name)
.with_coords(start, end).unwrap()
}
#[test]
fn with_exons() {
let trx = Feature::transcript()
.with_exons(vec![
make_exon(1, 2, "ex1"),
make_exon(10, 20, "ex2"),
make_exon(100, 200, "ex3"),
]);
assert_eq!(trx.exons().len(), 3);
}
#[test]
fn insert_exon() {
let mut trx = Feature::transcript();
assert_eq!(trx.exons().len(), 0);
trx.insert_exon(make_exon(1, 2, "ex"));
assert_eq!(trx.exons().len(), 1);
}
}
#[cfg(test)]
mod test_gene {
use super::*;
#[test]
fn default() {
let gene = Feature::gene();
assert_eq!(gene.start(), 0);
assert_eq!(gene.end(), 0);
assert_eq!(gene.name(), None);
assert_eq!(gene.transcripts().len(), 0);
}
}
|
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use parking_lot::{Condvar, Mutex};
use super::*;
#[derive(Debug, Clone, Copy)]
pub(crate) enum ShutdownState {
Running,
ShuttingDown,
ShutDown,
}
impl ShutdownState {
fn is_running(self) -> bool {
if let ShutdownState::Running = self {
true
} else {
false
}
}
fn is_shutdown(self) -> bool {
if let ShutdownState::ShutDown = self {
true
} else {
false
}
}
}
#[derive(Debug)]
pub(crate) struct Flusher {
shutdown: Arc<Mutex<ShutdownState>>,
sc: Arc<Condvar>,
join_handle: Mutex<Option<std::thread::JoinHandle<()>>>,
}
impl Flusher {
/// Spawns a thread that periodically calls `callback` until dropped.
pub(crate) fn new(
name: String,
pagecache: Arc<PageCache>,
flush_every_ms: u64,
) -> Self {
#[allow(clippy::mutex_atomic)] // mutex used in CondVar below
let shutdown = Arc::new(Mutex::new(ShutdownState::Running));
let sc = Arc::new(Condvar::new());
let join_handle = thread::Builder::new()
.name(name)
.spawn({
let shutdown = shutdown.clone();
let sc = sc.clone();
move || run(&shutdown, &sc, &pagecache, flush_every_ms)
})
.unwrap();
Self { shutdown, sc, join_handle: Mutex::new(Some(join_handle)) }
}
}
fn run(
shutdown: &Arc<Mutex<ShutdownState>>,
sc: &Arc<Condvar>,
pagecache: &Arc<PageCache>,
flush_every_ms: u64,
) {
let flush_every = Duration::from_millis(flush_every_ms);
let mut shutdown = shutdown.lock();
let mut wrote_data = false;
while shutdown.is_running() || wrote_data {
let before = std::time::Instant::now();
match pagecache.flush() {
Ok(0) => {
wrote_data = false;
if !shutdown.is_running() {
break;
}
}
Ok(_) => {
wrote_data = true;
// at some point, we may want to
// put adaptive logic here to tune
// sleeps based on how much work
// we accomplished
}
Err(e) => {
error!("failed to flush from periodic flush thread: {}", e);
#[cfg(feature = "failpoints")]
pagecache.set_failpoint(e);
*shutdown = ShutdownState::ShutDown;
// having held the mutex makes this linearized
// with the notify below.
drop(shutdown);
let _notified = sc.notify_all();
return;
}
}
// so we can spend a little effort
// cleaning up the segments. try not to
// spend more than half of our sleep
// time rewriting pages though.
//
// this looks weird because it's a rust-style do-while
// where the conditional is the full body
while {
let made_progress = match pagecache.attempt_gc() {
Err(e) => {
error!(
"failed to clean file from periodic flush thread: {}",
e
);
#[cfg(feature = "failpoints")]
pagecache.set_failpoint(e);
*shutdown = ShutdownState::ShutDown;
// having held the mutex makes this linearized
// with the notify below.
drop(shutdown);
let _notified = sc.notify_all();
return;
}
Ok(false) => false,
Ok(true) => true,
};
made_progress
&& shutdown.is_running()
&& before.elapsed() < flush_every / 2
} {}
if let Err(e) = pagecache.config.file.sync_all() {
error!("failed to fsync from periodic flush thread: {}", e);
}
let sleep_duration = flush_every
.checked_sub(before.elapsed())
.unwrap_or_else(|| Duration::from_millis(1));
let _ = sc.wait_for(&mut shutdown, sleep_duration);
}
*shutdown = ShutdownState::ShutDown;
// having held the mutex makes this linearized
// with the notify below.
drop(shutdown);
let _notified = sc.notify_all();
}
impl Drop for Flusher {
fn drop(&mut self) {
let mut shutdown = self.shutdown.lock();
if shutdown.is_running() {
*shutdown = ShutdownState::ShuttingDown;
let _notified = self.sc.notify_all();
}
while !shutdown.is_shutdown() {
let _ = self.sc.wait_for(&mut shutdown, Duration::from_millis(100));
}
let mut join_handle_opt = self.join_handle.lock();
if let Some(join_handle) = join_handle_opt.take() {
if let Err(e) = join_handle.join() {
error!("error joining Periodic thread: {:?}", e);
}
}
}
}
flush data more aggressively during shutdown
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use parking_lot::{Condvar, Mutex};
use super::*;
#[derive(Debug, Clone, Copy)]
pub(crate) enum ShutdownState {
Running,
ShuttingDown,
ShutDown,
}
impl ShutdownState {
fn is_running(self) -> bool {
if let ShutdownState::Running = self {
true
} else {
false
}
}
fn is_shutdown(self) -> bool {
if let ShutdownState::ShutDown = self {
true
} else {
false
}
}
}
#[derive(Debug)]
pub(crate) struct Flusher {
shutdown: Arc<Mutex<ShutdownState>>,
sc: Arc<Condvar>,
join_handle: Mutex<Option<std::thread::JoinHandle<()>>>,
}
impl Flusher {
/// Spawns a thread that periodically calls `callback` until dropped.
pub(crate) fn new(
name: String,
pagecache: Arc<PageCache>,
flush_every_ms: u64,
) -> Self {
#[allow(clippy::mutex_atomic)] // mutex used in CondVar below
let shutdown = Arc::new(Mutex::new(ShutdownState::Running));
let sc = Arc::new(Condvar::new());
let join_handle = thread::Builder::new()
.name(name)
.spawn({
let shutdown = shutdown.clone();
let sc = sc.clone();
move || run(&shutdown, &sc, &pagecache, flush_every_ms)
})
.unwrap();
Self { shutdown, sc, join_handle: Mutex::new(Some(join_handle)) }
}
}
fn run(
shutdown: &Arc<Mutex<ShutdownState>>,
sc: &Arc<Condvar>,
pagecache: &Arc<PageCache>,
flush_every_ms: u64,
) {
let flush_every = Duration::from_millis(flush_every_ms);
let mut shutdown = shutdown.lock();
let mut wrote_data = false;
while shutdown.is_running() || wrote_data {
let before = std::time::Instant::now();
match pagecache.flush() {
Ok(0) => {
wrote_data = false;
if !shutdown.is_running() {
break;
}
}
Ok(_) => {
wrote_data = true;
if !shutdown.is_running() {
// loop right away if we're in
// shutdown mode, to flush data
// more quickly.
continue;
}
}
Err(e) => {
error!("failed to flush from periodic flush thread: {}", e);
#[cfg(feature = "failpoints")]
pagecache.set_failpoint(e);
*shutdown = ShutdownState::ShutDown;
// having held the mutex makes this linearized
// with the notify below.
drop(shutdown);
let _notified = sc.notify_all();
return;
}
}
// so we can spend a little effort
// cleaning up the segments. try not to
// spend more than half of our sleep
// time rewriting pages though.
//
// this looks weird because it's a rust-style do-while
// where the conditional is the full body
while {
let made_progress = match pagecache.attempt_gc() {
Err(e) => {
error!(
"failed to clean file from periodic flush thread: {}",
e
);
#[cfg(feature = "failpoints")]
pagecache.set_failpoint(e);
*shutdown = ShutdownState::ShutDown;
// having held the mutex makes this linearized
// with the notify below.
drop(shutdown);
let _notified = sc.notify_all();
return;
}
Ok(false) => false,
Ok(true) => true,
};
made_progress
&& shutdown.is_running()
&& before.elapsed() < flush_every / 2
} {}
if let Err(e) = pagecache.config.file.sync_all() {
error!("failed to fsync from periodic flush thread: {}", e);
}
let sleep_duration = flush_every
.checked_sub(before.elapsed())
.unwrap_or_else(|| Duration::from_millis(1));
if shutdown.is_running() {
// only sleep if we have not yet entered the
// shutdown state yet.
sc.wait_for(&mut shutdown, sleep_duration);
}
}
*shutdown = ShutdownState::ShutDown;
// having held the mutex makes this linearized
// with the notify below.
drop(shutdown);
let _notified = sc.notify_all();
}
impl Drop for Flusher {
fn drop(&mut self) {
let mut shutdown = self.shutdown.lock();
if shutdown.is_running() {
*shutdown = ShutdownState::ShuttingDown;
let _notified = self.sc.notify_all();
}
while !shutdown.is_shutdown() {
let _ = self.sc.wait_for(&mut shutdown, Duration::from_millis(100));
}
let mut join_handle_opt = self.join_handle.lock();
if let Some(join_handle) = join_handle_opt.take() {
if let Err(e) = join_handle.join() {
error!("error joining Periodic thread: {:?}", e);
}
}
}
}
|
use rustc::ty;
use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc::hir::def_id::DefId;
use rustc::mir;
use syntax::attr;
use crate::*;
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a + 'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
let this = self.eval_context_mut();
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// First, run the common hooks also supported by CTFE.
if this.hook_fn(instance, args, dest)? {
this.goto_block(ret)?;
return Ok(None);
}
// There are some more lang items we want to hook that CTFE does not hook (yet).
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested.
let n = u128::max_value();
let dest = dest.unwrap();
let n = this.truncate(n, dest.layout);
this.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
this.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items.
if this.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
this.emulate_foreign_item(instance.def_id(), args, dest, ret)?;
// `goto_block` already handled.
return Ok(None);
}
// Otherwise, load the MIR.
Ok(Some(this.load_mir(instance.def)?))
}
/// Emulates calling a foreign item, failing if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
let this = self.eval_context_mut();
let attrs = this.tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str().get(),
None => this.tcx.item_name(def_id).as_str().get(),
};
// Strip linker suffixes (seen on 32-bit macOS).
let link_name = link_name.trim_end_matches("$UNIX2003");
let tcx = &{this.tcx.tcx};
// First: functions that could diverge.
match link_name {
"__rust_start_panic" | "panic_impl" => {
return err!(MachineError("the evaluated program panicked".to_string()));
}
_ => if dest.is_none() {
return err!(Unimplemented(
format!("can't call diverging foreign function: {}", link_name),
));
}
}
// Next: functions that assume a ret and dest.
let dest = dest.expect("we already checked for a dest");
let ret = ret.expect("dest is `Some` but ret is `None`");
match link_name {
"malloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
if size == 0 {
this.write_null(dest)?;
} else {
let align = this.tcx.data_layout.pointer_align.abi;
let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into());
this.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?;
}
}
"calloc" => {
let items = this.read_scalar(args[0])?.to_usize(this)?;
let count = this.read_scalar(args[1])?.to_usize(this)?;
let size = if let Some(size) = items.checked_add(count) {
size
} else {
return err!(MachineError(format!(
"calloc: overflow of items * count: {} * {}",
items, count,
)));
};
if size == 0 {
this.write_null(dest)?;
} else {
let align = this.tcx.data_layout.pointer_align.abi;
let ptr = this.memory_mut()
.allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())
.with_default_tag();
this.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
}
"posix_memalign" => {
let ret = this.deref_operand(args[0])?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
let size = this.read_scalar(args[2])?.to_usize(this)?;
// Align must be power of 2, and also at least ptr-sized (POSIX rules).
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
if align < this.pointer_size().bytes() {
return err!(MachineError(format!(
"posix_memalign: alignment must be at least the size of a pointer, but is {}",
align,
)));
}
if size == 0 {
this.write_null(ret.into())?;
} else {
let ptr = this.memory_mut().allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::C.into()
);
this.write_scalar(Scalar::Ptr(ptr.with_default_tag()), ret.into())?;
}
this.write_null(dest)?;
}
"free" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
if !ptr.is_null_ptr(this) {
this.memory_mut().deallocate(
ptr.to_ptr()?,
None,
MiriMemoryKind::C.into(),
)?;
}
}
"__rust_alloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)
.with_default_tag();
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_alloc_zeroed" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)
.with_default_tag();
this.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
this.memory_mut().deallocate(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
"__rust_realloc" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
let new_size = this.read_scalar(args[3])?.to_usize(this)?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let new_ptr = this.memory_mut().reallocate(
ptr,
Size::from_bytes(old_size),
Align::from_bytes(align).unwrap(),
Size::from_bytes(new_size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into(),
)?;
this.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?;
}
"syscall" => {
// TODO: read `syscall` IDs like `sysconf` IDs and
// figure out some way to actually process some of them.
//
// `libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)`
// is called if a `HashMap` is created the regular way.
match this.read_scalar(args[0])?.to_usize(this)? {
318 | 511 => {
return err!(Unimplemented(
"miri does not support random number generators".to_owned(),
))
}
id => {
return err!(Unimplemented(
format!("miri does not support syscall ID {}", id),
))
}
}
}
"dlsym" => {
let _handle = this.read_scalar(args[0])?;
let symbol = this.read_scalar(args[1])?.to_ptr()?;
let symbol_name = this.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
"miri does not support dynamically loading libraries (requested symbol: {})",
symbol_name
)));
}
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
// data: *mut u8,
// data_ptr: *mut usize,
// vtable_ptr: *mut usize,
// ) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure.
let f = this.read_scalar(args[0])?.to_ptr()?;
let data = this.read_scalar(args[1])?.not_undef()?;
let f_instance = this.memory().get_fn(f)?;
this.write_null(dest)?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call.
// TODO: consider making this reusable? `InterpretCx::step` does something similar
// for the TLS destructors, and of course `eval_main`.
let mir = this.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.push_stack_frame(
f_instance,
mir.span,
mir,
Some(ret_place),
// Directly return to caller.
StackPopCleanup::Goto(Some(ret)),
)?;
let mut args = this.frame().mir.args_iter();
let arg_local = args.next().ok_or_else(||
InterpError::AbiViolation(
"Argument to __rust_maybe_catch_panic does not take enough arguments."
.to_owned(),
),
)?;
let arg_dest = this.eval_place(&mir::Place::Base(mir::PlaceBase::Local(arg_local)))?;
this.write_scalar(data, arg_dest)?;
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves will return `0`, eventually (because we will not return if we paniced).
this.write_null(dest)?;
// Don't fall through, we do *not* want to `goto_block`!
return Ok(());
}
"memcmp" => {
let left = this.read_scalar(args[0])?.not_undef()?;
let right = this.read_scalar(args[1])?.not_undef()?;
let n = Size::from_bytes(this.read_scalar(args[2])?.to_usize(this)?);
let result = {
let left_bytes = this.memory().read_bytes(left, n)?;
let right_bytes = this.memory().read_bytes(right, n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
Less => -1i32,
Equal => 0,
Greater => 1,
}
};
this.write_scalar(
Scalar::from_int(result, Size::from_bits(32)),
dest,
)?;
}
"memrchr" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
if let Some(idx) = this.memory().read_bytes(ptr, Size::from_bytes(num))?
.iter().rev().position(|&c| c == val)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
this.write_null(dest)?;
}
}
"memchr" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
let idx = this
.memory()
.read_bytes(ptr, Size::from_bytes(num))?
.iter()
.position(|&c| c == val);
if let Some(idx) = idx {
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
this.write_null(dest)?;
}
}
"getenv" => {
let result = {
let name_ptr = this.read_scalar(args[0])?.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
match this.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(&*this.tcx),
}
};
this.write_scalar(result, dest)?;
}
"unsetenv" => {
let mut success = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = this
.memory()
.get(name_ptr.alloc_id)?
.read_c_str(tcx, name_ptr)?
.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(this.machine.env_vars.remove(&name));
}
}
}
if let Some(old) = success {
if let Some(var) = old {
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
this.write_null(dest)?;
} else {
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"setenv" => {
let mut new = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
let value_ptr = this.read_scalar(args[1])?.to_ptr()?;
let value = this.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
}
}
if let Some((name, value)) = new {
// `+1` for the null terminator.
let value_copy = this.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
).with_default_tag();
{
let alloc = this.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(
Size::from_bytes(value.len() as u64),
tcx,
)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
if let Some(var) = this.machine.env_vars.insert(
name.to_owned(),
value_copy,
)
{
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
this.write_null(dest)?;
} else {
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"write" => {
let fd = this.read_scalar(args[0])?.to_i32()?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_usize(&*this.tcx)?;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(n))?;
// We need to flush to make sure this actually appears on the screen
let res = if fd == 1 {
// Stdout is buffered, flush to make sure it appears on the screen.
// This is the write() syscall of the interpreted program, we want it
// to correspond to a write() syscall on the host -- there is no good
// in adding extra buffering here.
let res = io::stdout().write(buf_cont);
io::stdout().flush().unwrap();
res
} else {
// No need to flush, stderr is not buffered.
io::stderr().write(buf_cont)
};
match res {
Ok(n) => n as i64,
Err(_) => -1,
}
} else {
eprintln!("Miri: Ignored output to FD {}", fd);
// Pretend it all went well.
n as i64
};
// Now, `result` is the value we return back to the program.
this.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
)?;
}
"strlen" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let n = this.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
this.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// Some things needed for `sys::thread` initialization to go through.
"signal" | "sigaction" | "sigaltstack" => {
this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
"sysconf" => {
let name = this.read_scalar(args[0])?.to_i32()?;
trace!("sysconf() called with name {}", name);
// Cache the sysconf integers via Miri's global cache.
let paths = &[
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
(&["libc", "_SC_NPROCESSORS_ONLN"], Scalar::from_int(1, dest.layout.size)),
];
let mut result = None;
for &(path, path_value) in paths {
if let Ok(instance) = this.resolve_path(path) {
let cid = GlobalId {
instance,
promoted: None,
};
let const_val = this.const_eval_raw(cid)?;
let const_val = this.read_scalar(const_val.into())?;
let value = const_val.to_i32()?;
if value == name {
result = Some(path_value);
break;
}
}
}
if let Some(result) = result {
this.write_scalar(result, dest)?;
} else {
return err!(Unimplemented(
format!("Unimplemented sysconf name: {}", name),
));
}
}
"isatty" => {
this.write_null(dest)?;
}
// Hook pthread calls that go to the thread-local storage memory subsystem.
"pthread_key_create" => {
let key_ptr = this.read_scalar(args[0])?.to_ptr()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
let dtor = match this.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?),
Scalar::Bits { bits: 0, size } => {
assert_eq!(size as u64, this.memory().pointer_size().bytes());
None
},
Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
};
// Figure out how large a pthread TLS key actually is.
// This is `libc::pthread_key_t`.
let key_type = args[0].layout.ty
.builtin_deref(true)
.ok_or_else(|| InterpError::AbiViolation("wrong signature used for `pthread_key_create`: first argument must be a raw pointer.".to_owned()))?
.ty;
let key_layout = this.layout_of(key_type)?;
// Create key and write it into the memory where `key_ptr` wants it.
let key = this.machine.tls.create_tls_key(dtor, tcx) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
this.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
this.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
key_ptr,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
)?;
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_key_delete" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
this.machine.tls.delete_tls_key(key)?;
// Return success (0)
this.write_null(dest)?;
}
"pthread_getspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key)?;
this.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (`0`).
this.write_null(dest)?;
}
// Determine stack base address.
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
"pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
this.write_null(dest)?;
}
"pthread_attr_getstack" => {
// Second argument is where we are supposed to write the stack size.
let ptr = this.deref_operand(args[1])?;
// Just any address.
let stack_addr = Scalar::from_int(0x80000, args[1].layout.size);
this.write_scalar(stack_addr, ptr.into())?;
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
// Just any address.
let stack_addr = Scalar::from_int(0x80000, dest.layout.size);
this.write_scalar(stack_addr, dest)?;
}
// Stub out calls for condvar, mutex and rwlock, to just return `0`.
"pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
"pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
"pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
"pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
"pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
"pthread_cond_destroy" => {
this.write_null(dest)?;
}
"mmap" => {
// This is a horrible hack, but since the guard page mechanism calls mmap and expects a particular return value, we just give it that value.
let addr = this.read_scalar(args[0])?.not_undef()?;
this.write_scalar(addr, dest)?;
}
"mprotect" => {
this.write_null(dest)?;
}
// macOS API stubs.
"_tlv_atexit" => {
// FIXME: register the destructor.
},
"_NSGetArgc" => {
this.write_scalar(Scalar::Ptr(this.machine.argc.unwrap()), dest)?;
},
"_NSGetArgv" => {
this.write_scalar(Scalar::Ptr(this.machine.argv.unwrap()), dest)?;
},
// Windows API stubs.
"SetLastError" => {
let err = this.read_scalar(args[0])?.to_u32()?;
this.machine.last_error = err;
}
"GetLastError" => {
this.write_scalar(Scalar::from_uint(this.machine.last_error, Size::from_bits(32)), dest)?;
}
"AddVectoredExceptionHandler" => {
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
"LeaveCriticalSection" |
"DeleteCriticalSection" => {
// Nothing to do, not even a return value.
},
"GetModuleHandleW" |
"GetProcAddress" |
"TryEnterCriticalSection" |
"GetConsoleScreenBufferInfo" |
"SetConsoleTextAttribute" => {
// Pretend these do not exist / nothing happened, by returning zero.
this.write_null(dest)?;
},
"GetSystemInfo" => {
let system_info = this.deref_operand(args[0])?;
let system_info_ptr = system_info.ptr.to_ptr()?;
// Initialize with `0`.
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_repeat(tcx, system_info_ptr, 0, system_info.layout.size)?;
// Set number of processors to `1`.
let dword_size = Size::from_bytes(4);
let offset = 2*dword_size + 3*tcx.pointer_size();
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_scalar(
tcx,
system_info_ptr.offset(offset, tcx)?,
Scalar::from_int(1, dword_size).into(),
dword_size,
)?;
}
"TlsAlloc" => {
// This just creates a key; Windows does not natively support TLS destructors.
// Create key and return it.
let key = this.machine.tls.create_tls_key(None, tcx) as u128;
// Figure out how large a TLS key actually is. This is `c::DWORD`.
if dest.layout.size.bits() < 128
&& key >= (1u128 << dest.layout.size.bits() as u128) {
return err!(OutOfTls);
}
this.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
}
"TlsGetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let ptr = this.machine.tls.load_tls(key)?;
this.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (`1`).
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
"GetStdHandle" => {
let which = this.read_scalar(args[0])?.to_i32()?;
// We just make this the identity function, so we know later in `WriteFile`
// which one it is.
this.write_scalar(Scalar::from_int(which, this.pointer_size()), dest)?;
}
"WriteFile" => {
let handle = this.read_scalar(args[0])?.to_isize(this)?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_u32()?;
let written_place = this.deref_operand(args[3])?;
// Spec says to always write `0` first.
this.write_null(written_place.into())?;
let written = if handle == -11 || handle == -12 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(u64::from(n)))?;
let res = if handle == -11 {
io::stdout().write(buf_cont)
} else {
io::stderr().write(buf_cont)
};
res.ok().map(|n| n as u32)
} else {
eprintln!("Miri: Ignored output to handle {}", handle);
// Pretend it all went well.
Some(n)
};
// If there was no error, write back how much was written.
if let Some(n) = written {
this.write_scalar(Scalar::from_uint(n, Size::from_bits(32)), written_place.into())?;
}
// Return whether this was a success.
this.write_scalar(
Scalar::from_int(if written.is_some() { 1 } else { 0 }, dest.layout.size),
dest,
)?;
}
"GetConsoleMode" => {
// Everything is a pipe.
this.write_null(dest)?;
}
"GetEnvironmentVariableW" => {
// This is not the env var you are looking for.
this.machine.last_error = 203; // ERROR_ENVVAR_NOT_FOUND
this.write_null(dest)?;
}
"GetCommandLineW" => {
this.write_scalar(Scalar::Ptr(this.machine.cmd_line.unwrap()), dest)?;
}
// We can't execute anything else.
_ => {
return err!(Unimplemented(
format!("can't call foreign function: {}", link_name),
));
}
}
this.goto_block(Some(ret))?;
this.dump_place(*dest);
Ok(())
}
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> {
self.eval_context_mut().write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
}
Tidy up calloc code
use rustc::ty;
use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc::hir::def_id::DefId;
use rustc::mir;
use syntax::attr;
use crate::*;
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a + 'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
let this = self.eval_context_mut();
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// First, run the common hooks also supported by CTFE.
if this.hook_fn(instance, args, dest)? {
this.goto_block(ret)?;
return Ok(None);
}
// There are some more lang items we want to hook that CTFE does not hook (yet).
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested.
let n = u128::max_value();
let dest = dest.unwrap();
let n = this.truncate(n, dest.layout);
this.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
this.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items.
if this.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
this.emulate_foreign_item(instance.def_id(), args, dest, ret)?;
// `goto_block` already handled.
return Ok(None);
}
// Otherwise, load the MIR.
Ok(Some(this.load_mir(instance.def)?))
}
/// Emulates calling a foreign item, failing if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
let this = self.eval_context_mut();
let attrs = this.tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str().get(),
None => this.tcx.item_name(def_id).as_str().get(),
};
// Strip linker suffixes (seen on 32-bit macOS).
let link_name = link_name.trim_end_matches("$UNIX2003");
let tcx = &{this.tcx.tcx};
// First: functions that could diverge.
match link_name {
"__rust_start_panic" | "panic_impl" => {
return err!(MachineError("the evaluated program panicked".to_string()));
}
_ => if dest.is_none() {
return err!(Unimplemented(
format!("can't call diverging foreign function: {}", link_name),
));
}
}
// Next: functions that assume a ret and dest.
let dest = dest.expect("we already checked for a dest");
let ret = ret.expect("dest is `Some` but ret is `None`");
match link_name {
"malloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
if size == 0 {
this.write_null(dest)?;
} else {
let align = this.tcx.data_layout.pointer_align.abi;
let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into());
this.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?;
}
}
"calloc" => {
let items = this.read_scalar(args[0])?.to_usize(this)?;
let len = this.read_scalar(args[1])?.to_usize(this)?;
let bytes = items.checked_mul(len).ok_or_else(|| InterpError::Overflow(mir::BinOp::Mul))?;
if bytes== 0 {
this.write_null(dest)?;
} else {
let size = Size::from_bytes(bytes);
let align = this.tcx.data_layout.pointer_align.abi;
let ptr = this.memory_mut().allocate(size, align, MiriMemoryKind::C.into()).with_default_tag();
this.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, size)?;
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
}
"posix_memalign" => {
let ret = this.deref_operand(args[0])?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
let size = this.read_scalar(args[2])?.to_usize(this)?;
// Align must be power of 2, and also at least ptr-sized (POSIX rules).
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
if align < this.pointer_size().bytes() {
return err!(MachineError(format!(
"posix_memalign: alignment must be at least the size of a pointer, but is {}",
align,
)));
}
if size == 0 {
this.write_null(ret.into())?;
} else {
let ptr = this.memory_mut().allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::C.into()
);
this.write_scalar(Scalar::Ptr(ptr.with_default_tag()), ret.into())?;
}
this.write_null(dest)?;
}
"free" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
if !ptr.is_null_ptr(this) {
this.memory_mut().deallocate(
ptr.to_ptr()?,
None,
MiriMemoryKind::C.into(),
)?;
}
}
"__rust_alloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)
.with_default_tag();
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_alloc_zeroed" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)
.with_default_tag();
this.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
this.memory_mut().deallocate(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
"__rust_realloc" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
let new_size = this.read_scalar(args[3])?.to_usize(this)?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let new_ptr = this.memory_mut().reallocate(
ptr,
Size::from_bytes(old_size),
Align::from_bytes(align).unwrap(),
Size::from_bytes(new_size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into(),
)?;
this.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?;
}
"syscall" => {
// TODO: read `syscall` IDs like `sysconf` IDs and
// figure out some way to actually process some of them.
//
// `libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)`
// is called if a `HashMap` is created the regular way.
match this.read_scalar(args[0])?.to_usize(this)? {
318 | 511 => {
return err!(Unimplemented(
"miri does not support random number generators".to_owned(),
))
}
id => {
return err!(Unimplemented(
format!("miri does not support syscall ID {}", id),
))
}
}
}
"dlsym" => {
let _handle = this.read_scalar(args[0])?;
let symbol = this.read_scalar(args[1])?.to_ptr()?;
let symbol_name = this.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
"miri does not support dynamically loading libraries (requested symbol: {})",
symbol_name
)));
}
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
// data: *mut u8,
// data_ptr: *mut usize,
// vtable_ptr: *mut usize,
// ) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure.
let f = this.read_scalar(args[0])?.to_ptr()?;
let data = this.read_scalar(args[1])?.not_undef()?;
let f_instance = this.memory().get_fn(f)?;
this.write_null(dest)?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call.
// TODO: consider making this reusable? `InterpretCx::step` does something similar
// for the TLS destructors, and of course `eval_main`.
let mir = this.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.push_stack_frame(
f_instance,
mir.span,
mir,
Some(ret_place),
// Directly return to caller.
StackPopCleanup::Goto(Some(ret)),
)?;
let mut args = this.frame().mir.args_iter();
let arg_local = args.next().ok_or_else(||
InterpError::AbiViolation(
"Argument to __rust_maybe_catch_panic does not take enough arguments."
.to_owned(),
),
)?;
let arg_dest = this.eval_place(&mir::Place::Base(mir::PlaceBase::Local(arg_local)))?;
this.write_scalar(data, arg_dest)?;
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves will return `0`, eventually (because we will not return if we paniced).
this.write_null(dest)?;
// Don't fall through, we do *not* want to `goto_block`!
return Ok(());
}
"memcmp" => {
let left = this.read_scalar(args[0])?.not_undef()?;
let right = this.read_scalar(args[1])?.not_undef()?;
let n = Size::from_bytes(this.read_scalar(args[2])?.to_usize(this)?);
let result = {
let left_bytes = this.memory().read_bytes(left, n)?;
let right_bytes = this.memory().read_bytes(right, n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
Less => -1i32,
Equal => 0,
Greater => 1,
}
};
this.write_scalar(
Scalar::from_int(result, Size::from_bits(32)),
dest,
)?;
}
"memrchr" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
if let Some(idx) = this.memory().read_bytes(ptr, Size::from_bytes(num))?
.iter().rev().position(|&c| c == val)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
this.write_null(dest)?;
}
}
"memchr" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
let idx = this
.memory()
.read_bytes(ptr, Size::from_bytes(num))?
.iter()
.position(|&c| c == val);
if let Some(idx) = idx {
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
this.write_null(dest)?;
}
}
"getenv" => {
let result = {
let name_ptr = this.read_scalar(args[0])?.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
match this.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(&*this.tcx),
}
};
this.write_scalar(result, dest)?;
}
"unsetenv" => {
let mut success = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = this
.memory()
.get(name_ptr.alloc_id)?
.read_c_str(tcx, name_ptr)?
.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(this.machine.env_vars.remove(&name));
}
}
}
if let Some(old) = success {
if let Some(var) = old {
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
this.write_null(dest)?;
} else {
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"setenv" => {
let mut new = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
let value_ptr = this.read_scalar(args[1])?.to_ptr()?;
let value = this.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
}
}
if let Some((name, value)) = new {
// `+1` for the null terminator.
let value_copy = this.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
).with_default_tag();
{
let alloc = this.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(
Size::from_bytes(value.len() as u64),
tcx,
)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
if let Some(var) = this.machine.env_vars.insert(
name.to_owned(),
value_copy,
)
{
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
this.write_null(dest)?;
} else {
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"write" => {
let fd = this.read_scalar(args[0])?.to_i32()?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_usize(&*this.tcx)?;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(n))?;
// We need to flush to make sure this actually appears on the screen
let res = if fd == 1 {
// Stdout is buffered, flush to make sure it appears on the screen.
// This is the write() syscall of the interpreted program, we want it
// to correspond to a write() syscall on the host -- there is no good
// in adding extra buffering here.
let res = io::stdout().write(buf_cont);
io::stdout().flush().unwrap();
res
} else {
// No need to flush, stderr is not buffered.
io::stderr().write(buf_cont)
};
match res {
Ok(n) => n as i64,
Err(_) => -1,
}
} else {
eprintln!("Miri: Ignored output to FD {}", fd);
// Pretend it all went well.
n as i64
};
// Now, `result` is the value we return back to the program.
this.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
)?;
}
"strlen" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let n = this.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
this.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// Some things needed for `sys::thread` initialization to go through.
"signal" | "sigaction" | "sigaltstack" => {
this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
"sysconf" => {
let name = this.read_scalar(args[0])?.to_i32()?;
trace!("sysconf() called with name {}", name);
// Cache the sysconf integers via Miri's global cache.
let paths = &[
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
(&["libc", "_SC_NPROCESSORS_ONLN"], Scalar::from_int(1, dest.layout.size)),
];
let mut result = None;
for &(path, path_value) in paths {
if let Ok(instance) = this.resolve_path(path) {
let cid = GlobalId {
instance,
promoted: None,
};
let const_val = this.const_eval_raw(cid)?;
let const_val = this.read_scalar(const_val.into())?;
let value = const_val.to_i32()?;
if value == name {
result = Some(path_value);
break;
}
}
}
if let Some(result) = result {
this.write_scalar(result, dest)?;
} else {
return err!(Unimplemented(
format!("Unimplemented sysconf name: {}", name),
));
}
}
"isatty" => {
this.write_null(dest)?;
}
// Hook pthread calls that go to the thread-local storage memory subsystem.
"pthread_key_create" => {
let key_ptr = this.read_scalar(args[0])?.to_ptr()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
let dtor = match this.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?),
Scalar::Bits { bits: 0, size } => {
assert_eq!(size as u64, this.memory().pointer_size().bytes());
None
},
Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
};
// Figure out how large a pthread TLS key actually is.
// This is `libc::pthread_key_t`.
let key_type = args[0].layout.ty
.builtin_deref(true)
.ok_or_else(|| InterpError::AbiViolation("wrong signature used for `pthread_key_create`: first argument must be a raw pointer.".to_owned()))?
.ty;
let key_layout = this.layout_of(key_type)?;
// Create key and write it into the memory where `key_ptr` wants it.
let key = this.machine.tls.create_tls_key(dtor, tcx) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
this.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
this.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
key_ptr,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
)?;
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_key_delete" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
this.machine.tls.delete_tls_key(key)?;
// Return success (0)
this.write_null(dest)?;
}
"pthread_getspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key)?;
this.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (`0`).
this.write_null(dest)?;
}
// Determine stack base address.
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
"pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
this.write_null(dest)?;
}
"pthread_attr_getstack" => {
// Second argument is where we are supposed to write the stack size.
let ptr = this.deref_operand(args[1])?;
// Just any address.
let stack_addr = Scalar::from_int(0x80000, args[1].layout.size);
this.write_scalar(stack_addr, ptr.into())?;
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
// Just any address.
let stack_addr = Scalar::from_int(0x80000, dest.layout.size);
this.write_scalar(stack_addr, dest)?;
}
// Stub out calls for condvar, mutex and rwlock, to just return `0`.
"pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
"pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
"pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
"pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
"pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
"pthread_cond_destroy" => {
this.write_null(dest)?;
}
"mmap" => {
// This is a horrible hack, but since the guard page mechanism calls mmap and expects a particular return value, we just give it that value.
let addr = this.read_scalar(args[0])?.not_undef()?;
this.write_scalar(addr, dest)?;
}
"mprotect" => {
this.write_null(dest)?;
}
// macOS API stubs.
"_tlv_atexit" => {
// FIXME: register the destructor.
},
"_NSGetArgc" => {
this.write_scalar(Scalar::Ptr(this.machine.argc.unwrap()), dest)?;
},
"_NSGetArgv" => {
this.write_scalar(Scalar::Ptr(this.machine.argv.unwrap()), dest)?;
},
// Windows API stubs.
"SetLastError" => {
let err = this.read_scalar(args[0])?.to_u32()?;
this.machine.last_error = err;
}
"GetLastError" => {
this.write_scalar(Scalar::from_uint(this.machine.last_error, Size::from_bits(32)), dest)?;
}
"AddVectoredExceptionHandler" => {
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
"LeaveCriticalSection" |
"DeleteCriticalSection" => {
// Nothing to do, not even a return value.
},
"GetModuleHandleW" |
"GetProcAddress" |
"TryEnterCriticalSection" |
"GetConsoleScreenBufferInfo" |
"SetConsoleTextAttribute" => {
// Pretend these do not exist / nothing happened, by returning zero.
this.write_null(dest)?;
},
"GetSystemInfo" => {
let system_info = this.deref_operand(args[0])?;
let system_info_ptr = system_info.ptr.to_ptr()?;
// Initialize with `0`.
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_repeat(tcx, system_info_ptr, 0, system_info.layout.size)?;
// Set number of processors to `1`.
let dword_size = Size::from_bytes(4);
let offset = 2*dword_size + 3*tcx.pointer_size();
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_scalar(
tcx,
system_info_ptr.offset(offset, tcx)?,
Scalar::from_int(1, dword_size).into(),
dword_size,
)?;
}
"TlsAlloc" => {
// This just creates a key; Windows does not natively support TLS destructors.
// Create key and return it.
let key = this.machine.tls.create_tls_key(None, tcx) as u128;
// Figure out how large a TLS key actually is. This is `c::DWORD`.
if dest.layout.size.bits() < 128
&& key >= (1u128 << dest.layout.size.bits() as u128) {
return err!(OutOfTls);
}
this.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
}
"TlsGetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let ptr = this.machine.tls.load_tls(key)?;
this.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (`1`).
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
"GetStdHandle" => {
let which = this.read_scalar(args[0])?.to_i32()?;
// We just make this the identity function, so we know later in `WriteFile`
// which one it is.
this.write_scalar(Scalar::from_int(which, this.pointer_size()), dest)?;
}
"WriteFile" => {
let handle = this.read_scalar(args[0])?.to_isize(this)?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_u32()?;
let written_place = this.deref_operand(args[3])?;
// Spec says to always write `0` first.
this.write_null(written_place.into())?;
let written = if handle == -11 || handle == -12 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(u64::from(n)))?;
let res = if handle == -11 {
io::stdout().write(buf_cont)
} else {
io::stderr().write(buf_cont)
};
res.ok().map(|n| n as u32)
} else {
eprintln!("Miri: Ignored output to handle {}", handle);
// Pretend it all went well.
Some(n)
};
// If there was no error, write back how much was written.
if let Some(n) = written {
this.write_scalar(Scalar::from_uint(n, Size::from_bits(32)), written_place.into())?;
}
// Return whether this was a success.
this.write_scalar(
Scalar::from_int(if written.is_some() { 1 } else { 0 }, dest.layout.size),
dest,
)?;
}
"GetConsoleMode" => {
// Everything is a pipe.
this.write_null(dest)?;
}
"GetEnvironmentVariableW" => {
// This is not the env var you are looking for.
this.machine.last_error = 203; // ERROR_ENVVAR_NOT_FOUND
this.write_null(dest)?;
}
"GetCommandLineW" => {
this.write_scalar(Scalar::Ptr(this.machine.cmd_line.unwrap()), dest)?;
}
// We can't execute anything else.
_ => {
return err!(Unimplemented(
format!("can't call foreign function: {}", link_name),
));
}
}
this.goto_block(Some(ret))?;
this.dump_place(*dest);
Ok(())
}
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> {
self.eval_context_mut().write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
}
|
// Copyright 2020 Google LLC
//
// Use of this source code is governed by an MIT-style license that can be found
// in the LICENSE file or at https://opensource.org/licenses/MIT.
//! Unix-specific utilities for working with the filesystem.
use std::ffi::OsString;
use std::path::Path;
/// An extended attribute of a file.
///
/// On Linux, extended attributes can be obtained using `getfattr` and set with
/// `setfattr` utilities. On macOS, they can be manipulated through the `xattr`
/// utility.
///
/// See the [Wikipedia] article for more details.
///
/// [Wikipedia]: https://en.wikipedia.org/wiki/Extended_file_attributes
#[derive(Debug)]
pub struct ExtAttr {
/// A name of the extended attribute.
pub name: OsString,
/// A value of the extended attribute.
pub value: Vec<u8>,
}
/// Returns an iterator over extended attributes of the specified file.
///
/// # Errors
///
/// The function will fail if a list of extended attributes of the file cannot
/// be obtained (e.g. when the file doesn't exist). Each iterator element is a
/// result itself and errors are possible e.g. when the attribute has been dele-
/// ted since we first listed it.
///
/// # Examples
///
/// ```no_run
/// use std::path::Path;
///
/// for attr in rrg::fs::unix::ext_attrs(Path::new("/tmp/foo")).unwrap() {
/// let attr = attr.unwrap();
/// let name = attr.name.to_string_lossy();
/// let value = String::from_utf8_lossy(&attr.value);
/// println!("{}: {}", name, value);
/// }
/// ```
pub fn ext_attrs<'p>(path: &'p std::path::Path) -> std::io::Result<ExtAttrs<'p>> {
let names = ext_attr_names(path)?;
Ok(ExtAttrs {
path: path.as_ref(),
names: names.into_iter(),
})
}
/// Iterator over extended attributes of a file.
///
/// The iterator can be constructed with the [`ext_attrs`] function.
///
/// [`ext_attrs`]: fn.ext_attrs.html
pub struct ExtAttrs<'p> {
path: &'p Path,
names: std::vec::IntoIter<std::ffi::OsString>,
}
impl<'p> Iterator for ExtAttrs<'p> {
type Item = std::io::Result<ExtAttr>;
fn next(&mut self) -> Option<std::io::Result<ExtAttr>> {
let name = self.names.next()?;
let value = match ext_attr_value(self.path, &name) {
Ok(value) => value,
Err(error) => return Some(Err(error)),
};
Some(Ok(ExtAttr { name, value }))
}
}
/// Collects names of all extended attributes for the specified file.
///
/// This function is an idiomatic wrapper over lower-level system calls (like
/// `llistxattr` on Linux or `listxattr` on macOS).
///
/// # Errors
///
/// This function will fail if the specified file does not exist, the process
/// does not have permission to access the file or any other system error is
/// raised.
///
/// # Examples
///
/// ```no_run
/// let names = rrg::fs::unix::ext_attr_names("/tmp/foo").unwrap();
///
/// println!("{} attributes found", names.len());
/// for name in names {
/// println!("'{}'", name.to_string_lossy());
/// }
/// ```
pub fn ext_attr_names<P>(path: P) -> std::io::Result<Vec<std::ffi::OsString>>
where
P: AsRef<Path>,
{
#[cfg(target_os = "linux")]
use super::linux::ext_attr_names;
#[cfg(target_os = "macos")]
use super::macos::ext_attr_names;
ext_attr_names(path)
}
/// Collects value of a file extended attribute with the specified name.
///
/// This function is an idiomatic wrapper over lower-level system calls (like
/// `lgetxattr` on Linux or `getxattr` or macOS).
///
/// # Errors
///
/// This function will fail if the specified file or the attribute do not exist,
/// the process does not have permission to access the file or any other system
/// error is raised.
///
/// # Examples
///
/// ```no_run
/// let value = rrg::fs::unix::ext_attr_value("/tmp/foo", "user.bar").unwrap();
/// println!("'user.bar': '{}'", String::from_utf8_lossy(&value));
/// ```
pub fn ext_attr_value<P, S>(path: P, name: S) -> std::io::Result<Vec<u8>>
where
P: AsRef<Path>,
S: AsRef<std::ffi::OsStr>,
{
#[cfg(target_os = "linux")]
use super::linux::ext_attr_value;
#[cfg(target_os = "macos")]
use super::macos::ext_attr_value;
ext_attr_value(path, name)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ext_attrs_non_existing() {
let tempdir = tempfile::tempdir().unwrap();
assert!(ext_attrs(&tempdir.path().join("foo")).is_err());
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
#[test]
fn ext_attrs_with_multiple_values() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
setfattr(tempfile.path(), "user.abc", b"quux");
setfattr(tempfile.path(), "user.def", b"norf");
let mut results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
results.sort_by_key(|attr| attr.name.clone());
assert_eq!(results.len(), 2);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"quux");
assert_eq!(results[1].name, "user.def");
assert_eq!(results[1].value, b"norf");
}
#[cfg(target_os = "macos")]
#[test]
fn ext_attrs_with_multiple_values() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
xattr(tempfile.path(), "user.abc", b"quux");
xattr(tempfile.path(), "user.def", b"norf");
let mut results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
results.sort_by_key(|attr| attr.name.clone());
assert_eq!(results.len(), 2);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"quux");
assert_eq!(results[1].name, "user.def");
assert_eq!(results[1].value, b"norf");
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
#[test]
fn ext_attrs_with_empty_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
setfattr(tempfile.path(), "user.abc", b"");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"");
}
#[cfg(target_os = "macos")]
#[test]
fn ext_attrs_with_empty_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
xattr(tempfile.path(), "user.abc", b"");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"");
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
#[test]
fn ext_attrs_with_bytes_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
setfattr(tempfile.path(), "user.abc", b"\xff\xfe\xff\xfe\xff");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"\xff\xfe\xff\xfe\xff");
}
#[cfg(target_os = "macos")]
#[test]
fn ext_attrs_with_bytes_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
xattr(tempfile.path(), "user.abc", b"\xff\xfe\xff\xfe\xff");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"\xff\xfe\xff\xfe\xff");
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
fn setfattr<P, S>(path: P, name: S, value: &[u8])
where
P: AsRef<std::path::Path>,
S: AsRef<std::ffi::OsStr>,
{
use std::os::unix::ffi::OsStrExt as _;
assert! {
std::process::Command::new("setfattr")
.arg("--name").arg(name)
.arg("--value").arg(std::ffi::OsStr::from_bytes(value))
.arg(path.as_ref().as_os_str())
.status()
.unwrap()
.success()
};
}
#[cfg(target_os = "macos")]
fn xattr<P, S>(path: P, name: S, value: &[u8])
where
P: AsRef<std::path::Path>,
S: AsRef<std::ffi::OsStr>,
{
use std::os::unix::ffi::OsStrExt as _;
assert! {
std::process::Command::new("xattr")
.arg(name)
.arg(std::ffi::OsStr::from_bytes(value))
.arg(path.as_ref().as_os_str())
.status()
.unwrap()
.success()
};
}
// TODO: Document and add tests for collecting attributes of a symlink.
}
// TODO: Move this into the `rrg-proto` crate once generic purpose utilities are
// moved to a separate crate.
impl Into<rrg_proto::jobs::StatEntry_ExtAttr> for ExtAttr {
fn into(self) -> rrg_proto::jobs::StatEntry_ExtAttr {
use std::os::unix::ffi::OsStringExt as _;
let mut proto = rrg_proto::jobs::StatEntry_ExtAttr::new();
proto.set_name(self.name.into_vec());
proto.set_value(self.value);
proto
}
}
Specify behaviour of xattr functions on symlinks.
// Copyright 2020 Google LLC
//
// Use of this source code is governed by an MIT-style license that can be found
// in the LICENSE file or at https://opensource.org/licenses/MIT.
//! Unix-specific utilities for working with the filesystem.
use std::ffi::OsString;
use std::path::Path;
/// An extended attribute of a file.
///
/// On Linux, extended attributes can be obtained using `getfattr` and set with
/// `setfattr` utilities. On macOS, they can be manipulated through the `xattr`
/// utility.
///
/// See the [Wikipedia] article for more details.
///
/// [Wikipedia]: https://en.wikipedia.org/wiki/Extended_file_attributes
#[derive(Debug)]
pub struct ExtAttr {
/// A name of the extended attribute.
pub name: OsString,
/// A value of the extended attribute.
pub value: Vec<u8>,
}
/// Returns an iterator over extended attributes of the specified file.
///
/// In case of a symlink this function returns the extended attributes of the
/// link itself and not the file pointed by it.
///
/// # Errors
///
/// The function will fail if a list of extended attributes of the file cannot
/// be obtained (e.g. when the file doesn't exist). Each iterator element is a
/// result itself and errors are possible e.g. when the attribute has been dele-
/// ted since we first listed it.
///
/// # Examples
///
/// ```no_run
/// use std::path::Path;
///
/// for attr in rrg::fs::unix::ext_attrs(Path::new("/tmp/foo")).unwrap() {
/// let attr = attr.unwrap();
/// let name = attr.name.to_string_lossy();
/// let value = String::from_utf8_lossy(&attr.value);
/// println!("{}: {}", name, value);
/// }
/// ```
pub fn ext_attrs<'p>(path: &'p std::path::Path) -> std::io::Result<ExtAttrs<'p>> {
let names = ext_attr_names(path)?;
Ok(ExtAttrs {
path: path.as_ref(),
names: names.into_iter(),
})
}
/// Iterator over extended attributes of a file.
///
/// The iterator can be constructed with the [`ext_attrs`] function.
///
/// [`ext_attrs`]: fn.ext_attrs.html
pub struct ExtAttrs<'p> {
path: &'p Path,
names: std::vec::IntoIter<std::ffi::OsString>,
}
impl<'p> Iterator for ExtAttrs<'p> {
type Item = std::io::Result<ExtAttr>;
fn next(&mut self) -> Option<std::io::Result<ExtAttr>> {
let name = self.names.next()?;
let value = match ext_attr_value(self.path, &name) {
Ok(value) => value,
Err(error) => return Some(Err(error)),
};
Some(Ok(ExtAttr { name, value }))
}
}
/// Collects names of all extended attributes for the specified file.
///
/// This function is an idiomatic wrapper over lower-level system calls (like
/// `llistxattr` on Linux or `listxattr` on macOS).
///
/// In case of a symlink this function returns the extended attributes of the
/// link itself and not the file pointed by it.
///
/// # Errors
///
/// This function will fail if the specified file does not exist, the process
/// does not have permission to access the file or any other system error is
/// raised.
///
/// # Examples
///
/// ```no_run
/// let names = rrg::fs::unix::ext_attr_names("/tmp/foo").unwrap();
///
/// println!("{} attributes found", names.len());
/// for name in names {
/// println!("'{}'", name.to_string_lossy());
/// }
/// ```
pub fn ext_attr_names<P>(path: P) -> std::io::Result<Vec<std::ffi::OsString>>
where
P: AsRef<Path>,
{
#[cfg(target_os = "linux")]
use super::linux::ext_attr_names;
#[cfg(target_os = "macos")]
use super::macos::ext_attr_names;
ext_attr_names(path)
}
/// Collects value of a file extended attribute with the specified name.
///
/// This function is an idiomatic wrapper over lower-level system calls (like
/// `lgetxattr` on Linux or `getxattr` or macOS).
///
/// In case of a symlink this function returns the extended attributes of the
/// link itself and not the file pointed by it.
///
/// # Errors
///
/// This function will fail if the specified file or the attribute do not exist,
/// the process does not have permission to access the file or any other system
/// error is raised.
///
/// # Examples
///
/// ```no_run
/// let value = rrg::fs::unix::ext_attr_value("/tmp/foo", "user.bar").unwrap();
/// println!("'user.bar': '{}'", String::from_utf8_lossy(&value));
/// ```
pub fn ext_attr_value<P, S>(path: P, name: S) -> std::io::Result<Vec<u8>>
where
P: AsRef<Path>,
S: AsRef<std::ffi::OsStr>,
{
#[cfg(target_os = "linux")]
use super::linux::ext_attr_value;
#[cfg(target_os = "macos")]
use super::macos::ext_attr_value;
ext_attr_value(path, name)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ext_attrs_non_existing() {
let tempdir = tempfile::tempdir().unwrap();
assert!(ext_attrs(&tempdir.path().join("foo")).is_err());
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
#[test]
fn ext_attrs_with_multiple_values() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
setfattr(tempfile.path(), "user.abc", b"quux");
setfattr(tempfile.path(), "user.def", b"norf");
let mut results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
results.sort_by_key(|attr| attr.name.clone());
assert_eq!(results.len(), 2);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"quux");
assert_eq!(results[1].name, "user.def");
assert_eq!(results[1].value, b"norf");
}
#[cfg(target_os = "macos")]
#[test]
fn ext_attrs_with_multiple_values() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
xattr(tempfile.path(), "user.abc", b"quux");
xattr(tempfile.path(), "user.def", b"norf");
let mut results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
results.sort_by_key(|attr| attr.name.clone());
assert_eq!(results.len(), 2);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"quux");
assert_eq!(results[1].name, "user.def");
assert_eq!(results[1].value, b"norf");
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
#[test]
fn ext_attrs_with_empty_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
setfattr(tempfile.path(), "user.abc", b"");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"");
}
#[cfg(target_os = "macos")]
#[test]
fn ext_attrs_with_empty_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
xattr(tempfile.path(), "user.abc", b"");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"");
}
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
#[test]
fn ext_attrs_with_bytes_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
setfattr(tempfile.path(), "user.abc", b"\xff\xfe\xff\xfe\xff");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"\xff\xfe\xff\xfe\xff");
}
#[cfg(target_os = "macos")]
#[test]
fn ext_attrs_with_bytes_value() {
let tempfile = tempfile::NamedTempFile::new().unwrap();
xattr(tempfile.path(), "user.abc", b"\xff\xfe\xff\xfe\xff");
let results = ext_attrs(&tempfile.path()).unwrap()
.map(Result::unwrap)
.collect::<Vec<_>>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].name, "user.abc");
assert_eq!(results[0].value, b"\xff\xfe\xff\xfe\xff");
}
// Ideally, we would like to have tests for symlinks but turns out that it
// is not possible (at least currently) to have extended attributes on them
// as the kernel simply does not allow that [1].
//
// [1]: https://unix.stackexchange.com/questions/16537/extended-attribute-on-symbolic-link
#[cfg(all(target_os = "linux", feature = "test-setfattr"))]
fn setfattr<P, S>(path: P, name: S, value: &[u8])
where
P: AsRef<std::path::Path>,
S: AsRef<std::ffi::OsStr>,
{
use std::os::unix::ffi::OsStrExt as _;
assert! {
std::process::Command::new("setfattr")
.arg("--no-dereference")
.arg("--name").arg(name)
.arg("--value").arg(std::ffi::OsStr::from_bytes(value))
.arg(path.as_ref().as_os_str())
.status()
.unwrap()
.success()
};
}
#[cfg(target_os = "macos")]
fn xattr<P, S>(path: P, name: S, value: &[u8])
where
P: AsRef<std::path::Path>,
S: AsRef<std::ffi::OsStr>,
{
use std::os::unix::ffi::OsStrExt as _;
assert! {
std::process::Command::new("xattr")
.arg(name)
.arg(std::ffi::OsStr::from_bytes(value))
.arg(path.as_ref().as_os_str())
.status()
.unwrap()
.success()
};
}
}
// TODO: Move this into the `rrg-proto` crate once generic purpose utilities are
// moved to a separate crate.
impl Into<rrg_proto::jobs::StatEntry_ExtAttr> for ExtAttr {
fn into(self) -> rrg_proto::jobs::StatEntry_ExtAttr {
use std::os::unix::ffi::OsStringExt as _;
let mut proto = rrg_proto::jobs::StatEntry_ExtAttr::new();
proto.set_name(self.name.into_vec());
proto.set_value(self.value);
proto
}
}
|
use std;
use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::Display;
use std::fs::{DirEntry, metadata, read_link};
use std::path::{Components, Path, PathBuf};
use chrono::{DateTime, Local, UTC};
use walkdir;
use walkdir::WalkDir;
use errors::*;
use notify::DebouncedEvent;
use query::{FileResult, Query, QueryExpression, QueryResult};
use times::system_time_to_date_time;
#[cfg(test)]
use std::fs::{OpenOptions, create_dir_all};
#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
pub struct FsNode {
pub path: PathBuf,
pub basename: String,
pub entry: FsEntryType,
pub mtime: DateTime<Local>,
}
#[derive(Clone, Copy, Debug, Serialize)]
pub enum ChangeEvent {
Create,
Delete,
Write,
Metadata,
}
#[derive(Debug, Serialize)]
pub struct FileEvent {
event: ChangeEvent,
file: FileResult,
}
#[derive(Debug, Serialize)]
pub struct Notification {
changes: Vec<FileEvent>,
root: PathBuf,
}
#[derive(Debug, Serialize)]
pub struct FsRootNode {
base: FsNode,
roots: BTreeMap<PathBuf, QueryExpression>,
}
impl<'a> FsRootNode {
pub fn iter(&'a self) -> FsIterator<'a> {
FsIterator {
to_visit: Vec::new(),
current: Some(&self.base),
}
}
}
impl<'a> IntoIterator for &'a FsRootNode {
type Item = &'a FsNode;
type IntoIter = FsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct FsIterator<'a> {
to_visit: Vec<&'a FsNode>,
current: Option<&'a FsNode>,
}
impl<'a> Iterator for FsIterator<'a> {
type Item = &'a FsNode;
fn next(&mut self) -> Option<Self::Item> {
// if the current node is a dir, add all to to_visit
match self.current {
Some(ref node) => {
// add children to the stack to visit next
match node.entry {
FsEntryType::RootRoot { ref children } |
FsEntryType::Directory { ref children } => {
self.to_visit.extend(children.values().rev());
}
FsEntryType::File { .. } |
FsEntryType::Symlink { .. } => (),
}
}
None => (),
};
// return the previous current
let returned = self.current;
self.current = self.to_visit.pop();
returned
}
}
impl FsRootNode {
pub fn new() -> Self {
FsRootNode {
base: FsNode {
path: PathBuf::from(""),
basename: String::new(),
entry: FsEntryType::RootRoot { children: BTreeMap::new(), },
mtime: Local::now(),
},
roots: BTreeMap::new(),
}
}
fn insert_node(&mut self, node: FsNode) -> Result<()> {
let basename = node.basename.clone();
let path = node.path.clone();
let mut components = path.components();
components.next_back();
match self.base
.ensure_and_return_parent(PathBuf::new(), components)
.chain_err(|| "unable to ensure parent for new node insertion")?
.entry {
FsEntryType::RootRoot { ref mut children } |
FsEntryType::Directory { ref mut children } => children.insert(basename, node),
_ => bail!("Found a non-directory node when ensuring parent of a node was in the tree"),
};
Ok(())
}
fn remove_node(&mut self, path: &Path) -> Option<FsNode> {
let mut components = path.components();
let basename = match components.next_back() {
Some(b) => b.as_os_str().to_string_lossy(),
None => return None,
};
if let Ok(parent) = self.base.ensure_and_return_parent(PathBuf::new(), components) {
match parent.entry {
FsEntryType::RootRoot { ref mut children } |
FsEntryType::Directory { ref mut children } => children.remove(&*basename),
_ => None,
}
} else {
None
}
}
fn needs_notification(&self, node: &FsNode) -> bool {
for (ref root, expr) in &self.roots {
if node.path.starts_with(root) && expr.matches(&node) {
return true;
}
}
false
}
pub fn consume_event(&mut self, event: DebouncedEvent) -> Result<Option<Notification>> {
let mut changes = Vec::new();
let event_root;
fn find_matching_root(
p: &PathBuf,
roots: &BTreeMap<PathBuf, QueryExpression>
) -> Result<PathBuf> {
match roots.iter().map(|t| t.0).filter(|r| p.starts_with(r)).next() {
Some(r) => Ok(r.clone()),
None => {
bail!("lol adam is stupid, couldn't find {:?} in roots {:?}",
&p,
roots)
}
}
}
match event {
// TODO decide how to handle I/O notices vs. confirmations
DebouncedEvent::NoticeWrite(_) => return Ok(None),
DebouncedEvent::NoticeRemove(_) => return Ok(None),
DebouncedEvent::Create(ref p) => {
let node = FsNode::try_from_node_source(&p).chain_err(|| "constructing node from path")?;
if self.needs_notification(&node) {
changes.push(FileEvent {
event: ChangeEvent::Create,
file: FileResult::make(&node),
});
}
self.insert_node(node)?;
event_root = find_matching_root(p, &self.roots)?;
}
DebouncedEvent::Write(ref p) => {
let node = FsNode::try_from_node_source(&p).chain_err(|| "constructing node from path")?;
if self.needs_notification(&node) {
changes.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&node),
});
}
self.insert_node(node)?;
event_root = find_matching_root(p, &self.roots)?;
}
DebouncedEvent::Chmod(ref p) => {
let node = FsNode::try_from_node_source(&p).chain_err(|| "constructing node from path")?;
if self.needs_notification(&node) {
changes.push(FileEvent {
event: ChangeEvent::Metadata,
file: FileResult::make(&node),
});
}
self.insert_node(node)?;
event_root = find_matching_root(p, &self.roots)?;
}
DebouncedEvent::Remove(ref p) => {
if let Some(old_node) = self.remove_node(p) {
if self.needs_notification(&old_node) {
changes.push(FileEvent {
event: ChangeEvent::Delete,
file: FileResult::make(&old_node),
});
}
}
event_root = find_matching_root(p, &self.roots)?;
}
DebouncedEvent::Rename(ref p, ref to) => {
let new_node =
FsNode::try_from_node_source(&to).chain_err(|| "constructing node from path")?;
if self.needs_notification(&new_node) {
changes.push(FileEvent {
event: ChangeEvent::Create,
file: FileResult::make(&new_node),
});
}
self.insert_node(new_node).unwrap();
if let Some(old_node) = self.remove_node(p) {
if self.needs_notification(&old_node) {
changes.push(FileEvent {
event: ChangeEvent::Delete,
file: FileResult::make(&old_node),
});
}
}
event_root = find_matching_root(p, &self.roots)?;
}
DebouncedEvent::Rescan => {
// ok so the old tree is invalid! yay!
// build a new tree
let mut new_fake_root = FsRootNode::new();
for ref root in self.roots.keys() {
new_fake_root.add_root(root)?;
}
// and make a best effort to notify our client of changes there
self.base.diff(&new_fake_root.base, &mut changes);
self.base = new_fake_root.base;
// TODO segment this into multiple notifications, one per root?
event_root = PathBuf::new();
}
DebouncedEvent::Error(e, opt_p) => {
bail!("notification error at path {:?}: {:?}", opt_p, e);
}
}
Ok(Some(Notification {
changes: changes,
root: event_root,
}))
}
pub fn eval(&mut self, query: Query) -> Result<QueryResult> {
// make sure all the files we care about are in the tree
if !self.roots.contains_key(&query.root) {
let start = UTC::now();
info!("Adding {:?} root to file system view...", &query.root);
self.add_root(&query.root)?;
let duration = UTC::now().signed_duration_since(start);
info!("Added {:?} to file system view, took {} seconds.",
&query.root,
duration);
}
// we only have one client right now, so the filesystem view should have very low overhead
// compared to what watchman does, so i don't think we need generators
let files = self.iter()
.filter(|n| n.path.starts_with(&query.root))
.filter(|n| query.expr.matches(n))
.map(|n| FileResult::make(n))
.collect::<Vec<_>>();
self.roots.insert(query.root, query.expr);
Ok(QueryResult {
id: query.id,
files: files,
})
}
pub fn add_root(&mut self, path: &Path) -> Result<()> {
WalkDir::new(path).into_iter()
.map(|entry| {
entry.map(|f| -> Result<()> {
let node = FsNode::try_from_node_source(&f)?;
let mut components = f.path().components();
// we need to remove the final element from the path, as it corresponds to the DirEntry
components.next_back();
let parent = self.base.ensure_and_return_parent(PathBuf::new(), components)?;
match parent.entry {
FsEntryType::Directory { ref mut children } => {
let basename = node.basename.to_owned();
children.insert(basename, node);
}
_ => bail!("Found a non-directory as the parent of a node."),
}
Ok(())
})
})
.collect::<::std::result::Result<Vec<_>, _>>()?;
Ok(())
}
}
impl FsNode {
/// This should only be called if the node is a root node.
// TODO move this to FsRootNode, so this can be statically enforced.
fn ensure_and_return_parent<'a, 'b>(
&'a mut self,
mut path_so_far: PathBuf,
mut components: Components<'b>
) -> Result<&'a mut FsNode> {
let component = components.next();
if let Some(c) = component {
let comp_str = match c.as_os_str().to_str() {
Some(s) => s,
None => bail!("non unicode filename found"),
};
path_so_far.push(comp_str);
match self.entry {
FsEntryType::Directory { ref mut children } |
FsEntryType::RootRoot { ref mut children } => {
children.entry(comp_str.to_owned())
.or_insert_with(|| FsNode::empty_dir(path_so_far.clone(), comp_str.to_owned()))
.ensure_and_return_parent(path_so_far, components)
}
_ => bail!("Files should not exist in the middle of a path component."),
}
} else {
Ok(self)
}
}
pub fn diff(&self, new_node: &FsNode, results: &mut Vec<FileEvent>) {
if self.path != new_node.path {
panic!("bug: called diff on nodes without equal paths");
}
if self.basename != new_node.basename {
panic!("bug: called diff on nodes without equal basenames");
}
// we'll ignore mtime at this level, because it only really matters for files
match (&self.entry, &new_node.entry) {
(&FsEntryType::RootRoot { children: ref old_children },
&FsEntryType::RootRoot { children: ref new_children }) |
(&FsEntryType::Directory { children: ref old_children },
&FsEntryType::Directory { children: ref new_children }) => {
let old_files = old_children.keys().collect::<BTreeSet<_>>();
let new_files = new_children.keys().collect::<BTreeSet<_>>();
let in_both = old_files.intersection(&new_files).map(|s| *s).collect::<BTreeSet<_>>();
for &deleted_basename in old_files.difference(&in_both) {
let deleted_node = old_children.get(deleted_basename).unwrap();
results.push(FileEvent {
event: ChangeEvent::Delete,
file: FileResult::make(deleted_node),
});
}
for &created_basename in new_files.difference(&in_both) {
let created_node = new_children.get(created_basename).unwrap();
results.push(FileEvent {
event: ChangeEvent::Create,
file: FileResult::make(created_node),
});
}
for basename in in_both {
let before = old_children.get(basename).unwrap();
let after = old_children.get(basename).unwrap();
before.diff(after, results);
}
}
(&FsEntryType::File { len: old_length }, &FsEntryType::File { len: new_length }) => {
match self.mtime.cmp(&new_node.mtime) {
::std::cmp::Ordering::Less => {
results.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&new_node),
});
}
_ => {
// only generate a notification b/c of length if the mtimes are the same
if old_length != new_length {
results.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&new_node),
});
}
}
}
}
(&FsEntryType::Symlink { target: ref old_target, ty: old_ty },
&FsEntryType::Symlink { target: ref new_target, ty: new_ty }) => {
if old_target != new_target || old_ty != new_ty {
results.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&new_node),
});
}
}
// if the node types differ, just generate create/delete events for everything
_ => {
self.gen_events_for_self_and_children(ChangeEvent::Delete, results);
new_node.gen_events_for_self_and_children(ChangeEvent::Create, results);
}
}
}
fn gen_events_for_self_and_children(&self, event: ChangeEvent, changes: &mut Vec<FileEvent>) {
changes.push(FileEvent {
event: event,
file: FileResult::make(self),
});
match &self.entry {
&FsEntryType::RootRoot { ref children } |
&FsEntryType::Directory { ref children } => {
for node in children.values() {
node.gen_events_for_self_and_children(event, changes);
}
}
_ => (),
}
}
pub fn empty_dir(path: PathBuf, dirname: String) -> Self {
FsNode {
path: path,
basename: dirname,
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
}
}
fn try_from_node_source<S: NodeSource>(entry: &S) -> Result<Self> {
let basename = match entry.path().file_name() {
Some(n) => {
match n.to_str() {
Some(b) => b.to_owned(),
None => bail!("non-unicode filename"),
}
}
None => bail!("walkdir gave us a relative path"),
};
let metadata = entry.metadata()?;
let file_type = metadata.ty;
let entry_ty = match file_type {
FsItemType::Directory => FsEntryType::Directory { children: BTreeMap::new(), },
FsItemType::File => FsEntryType::File { len: metadata.len, },
FsItemType::SymlinkUgh => {
let sym_path = read_link(entry.path()).chain_err(|| "unable to read through symlink")?;
let sym_target_type = match sym_path.metadata() {
Ok(sym_meta) => {
if sym_meta.file_type().is_file() {
FsItemType::File
} else if sym_meta.file_type().is_dir() {
FsItemType::Directory
} else if sym_meta.file_type().is_symlink() {
FsItemType::SymlinkUgh
} else {
FsItemType::Other
}
}
Err(_) => FsItemType::Other,
};
FsEntryType::Symlink {
target: sym_path,
ty: sym_target_type,
}
}
FsItemType::Other => {
panic!("This is only ever used when reading symlink targets, this is a bug.");
}
};
Ok(FsNode {
path: entry.path().into_owned(),
basename: basename,
entry: entry_ty,
mtime: metadata.mtime,
})
}
fn format_into_buffer(&self, buf: &mut String, depth: u32) {
for _ in 0..(depth * 2) {
buf.push(' ');
}
buf.push_str(&self.basename);
match &self.entry {
&FsEntryType::RootRoot { ref children } |
&FsEntryType::Directory { ref children } => {
buf.push('\n');
for (_, child) in children {
child.format_into_buffer(buf, depth + 1);
}
}
&FsEntryType::File { len } => {
let node_md_str = format!(" {} {}\n", len, self.mtime);
buf.push_str(&node_md_str);
}
&FsEntryType::Symlink { ref target, ty } => {
let node_md_str = format!(" -> {:?} ({:?})\n", target, ty);
buf.push_str(&node_md_str);
}
}
}
}
impl Display for FsNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
let mut buf = String::new();
self.format_into_buffer(&mut buf, 0);
let _ = f.write_str(&buf);
Ok(())
}
}
#[cfg(all(test, not(windows)))]
#[allow(unused_variables)]
fn make_symlink(src: &Path, dst: &Path, ty: FsItemType) -> Result<()> {
use std::os::unix::fs::symlink;
Ok(symlink(src, dst).chain_err(|| "unable to create symlink")?)
}
#[cfg(all(test, windows))]
fn make_symlink(src: &Path, dst: &Path, ty: FsItemType) -> Result<()> {
use std::os::windows::fs::{symlink_dir, symlink_file};
match ty {
FsItemType::Directory => {
Ok(symlink_dir(src, dst).chain_err(|| "unable to create directory symlink")?)
}
FsItemType::File | FsItemType::SymlinkUgh => {
Ok(symlink_file(src, dst).chain_err(|| "unable to create file symlink")?)
}
// we don't want to try to write any of this stuff to disk
FsItemType::Other => Ok(()),
}
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
pub enum FsEntryType {
Directory { children: BTreeMap<String, FsNode>, },
File { len: u64, },
Symlink { target: PathBuf, ty: FsItemType, },
// different than a root path element (thanks, windows)
RootRoot { children: BTreeMap<String, FsNode>, },
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum FsItemType {
#[serde(rename="file")]
File,
#[serde(rename="dir")]
Directory,
#[serde(rename="symlink")]
SymlinkUgh,
#[serde(rename="other")]
Other,
}
struct MetadataFromFs {
ty: FsItemType,
mtime: DateTime<Local>,
len: u64,
}
trait NodeSource {
fn path(&self) -> Cow<Path>;
fn metadata(&self) -> Result<MetadataFromFs>;
}
impl<'a> NodeSource for &'a PathBuf {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(self.as_ref())
}
fn metadata(&self) -> Result<MetadataFromFs> {
let md = metadata(self).chain_err(|| "unable to read path metadata")?;
let mtime = system_time_to_date_time(md.modified().chain_err(|| "unable to read mtime")?);
Ok(MetadataFromFs {
mtime: mtime,
len: md.len(),
ty: if md.is_file() {
FsItemType::File
} else if md.is_dir() {
FsItemType::Directory
} else {
FsItemType::SymlinkUgh
},
})
}
}
impl NodeSource for walkdir::DirEntry {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path())
}
fn metadata(&self) -> Result<MetadataFromFs> {
let md = self.metadata().chain_err(|| "unable to read file metadata")?;
let mtime = system_time_to_date_time(md.modified()
.chain_err(|| "unable to read modification time")?);
Ok(MetadataFromFs {
mtime: mtime,
len: md.len(),
ty: if md.is_file() {
FsItemType::File
} else if md.is_dir() {
FsItemType::Directory
} else {
FsItemType::SymlinkUgh
},
})
}
}
impl NodeSource for DirEntry {
fn path(&self) -> Cow<Path> {
Cow::Owned(self.path())
}
fn metadata(&self) -> Result<MetadataFromFs> {
let md = self.metadata().chain_err(|| "unable to read file metadata")?;
let mtime = system_time_to_date_time(md.modified()
.chain_err(|| "unable to read file modified time")?);
Ok(MetadataFromFs {
mtime: mtime,
len: md.len(),
ty: if md.is_file() {
FsItemType::File
} else if md.is_dir() {
FsItemType::Directory
} else {
FsItemType::SymlinkUgh
},
})
}
}
#[cfg(test)]
mod test {
use chrono::Local;
use std::fs::File;
use std::io::prelude::*;
use std::path::PathBuf;
use tempdir::TempDir;
use walkdir::WalkDir;
use super::*;
impl FsNode {
/// WARNING: Only call this in tests when you've carefully constructed a tree inside a
/// temporary directory. Potentially destructive, and not easily reversed.
///
/// Also, requires that the process has `SeCreateSymbolicLinkPrivilege`.
fn mirror_to_disk(&self) -> Result<()> {
match self.entry {
FsEntryType::RootRoot { ref children } => {
for child in children.values() {
child.mirror_to_disk()?;
}
}
FsEntryType::Directory { ref children } => {
create_dir_all(&self.path).chain_err(|| "unable to recursively create directories")?;
for (_, child) in children {
child.mirror_to_disk()?
}
}
FsEntryType::File { len } => {
let file = OpenOptions::new().read(true)
.write(true)
.create(true)
.open(&self.path)
.chain_err(|| "unable to open/create file")?;
file.set_len(len).chain_err(|| "unable to set file length")?;
}
FsEntryType::Symlink { ref target, ty } => {
make_symlink(target, &self.path, ty)?;
}
}
Ok(())
}
fn assert_eq_with_mtime_epsilon(&self, other: &FsNode, acceptable_time_gap_millis: i64) {
if self.path != other.path {
panic!("paths are not equal\nlhs: {:?}\nrhs: {:?}", self, other);
}
if self.basename != other.basename {
panic!("basenames are not equal\nlhs: {:?}\nrhs: {:?}", self, other);
}
if self.mtime.signed_duration_since(other.mtime).num_milliseconds().abs() >
acceptable_time_gap_millis {
panic!("mtimes are not equal (w/in {:?} tolerance): {} and {}\nlhs:{:?}\nrhs:{:?}",
acceptable_time_gap_millis,
self.mtime,
other.mtime,
self,
other);
}
match (&self.entry, &other.entry) {
(&FsEntryType::File { len: len1 }, &FsEntryType::File { len: len2 }) => {
if len1 != len2 {
panic!("file lengths are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other);
}
}
(&FsEntryType::RootRoot { children: ref children1 },
&FsEntryType::RootRoot { children: ref children2 }) |
(&FsEntryType::Directory { children: ref children1 },
&FsEntryType::Directory { children: ref children2 }) => {
// these are btreemaps so will have the same sort order
for ((basename1, child1), (basename2, child2)) in children1.iter().zip(children2.iter()) {
if basename1 != basename2 {
panic!("children basenames are not equal\nlhs: {:?}\nrhs: {:?}",
child1,
child2);
}
child1.assert_eq_with_mtime_epsilon(child2, acceptable_time_gap_millis);
}
}
(&FsEntryType::Symlink { target: ref target1, ty: ty1 },
&FsEntryType::Symlink { target: ref target2, ty: ty2 }) => {
if ty1 != ty2 {
panic!("symlink types are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other);
}
if target1 != target2 {
panic!("symlink targets are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other);
}
}
_ => {
panic!("node entry types are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other)
}
}
}
}
#[cfg(not(windows))]
mod symlink_target_paths {
pub const FILE: &'static str = "/bin/bash";
pub const DIRECTORY: &'static str = "/usr";
}
#[cfg(windows)]
mod symlink_target_paths {
#[allow(dead_code)]
pub const FILE: &'static str = "C:\\Windows\\explorer.exe";
#[allow(dead_code)]
pub const DIRECTORY: &'static str = "C:\\Windows";
}
#[test]
fn fixed_roundtrip() {
let start_time = Local::now();
let tmp = TempDir::new("mirroring").expect("couldn't create temp dir");
// make the fake filesystem, keeping a list of paths created
let many_child_dir_path = tmp.path().join("many_child_dir");
let single_child_dir_path = many_child_dir_path.join("single_child_dir");
let empty_dir_path = many_child_dir_path.join("empty_dir");
let empty_file_path = single_child_dir_path.join("empty_file");
let single_byte_file_path = many_child_dir_path.join("single_byte_file");
let twenty_two_byte_file_path = many_child_dir_path.join("twenty_two_byte_file");
let symlinks_path = many_child_dir_path.join("symlinks");
let symlink_to_file_path = symlinks_path.join("symlink_to_file");
let symlink_to_dir_path = symlinks_path.join("symlink_to_dir");
let to_find = {
let mut to_find = vec![ tmp.path().to_path_buf(),
many_child_dir_path.clone(),
single_child_dir_path.clone(),
empty_dir_path.clone(),
empty_file_path.clone(),
single_byte_file_path.clone(),
twenty_two_byte_file_path.clone() ];
#[cfg(not(windows))]
{
to_find.push(symlinks_path);
to_find.push(symlink_to_file_path.clone());
to_find.push(symlink_to_dir_path.clone());
}
to_find.sort();
to_find
};
let mut root_node = FsRootNode::new();
root_node.insert_node(FsNode {
path: many_child_dir_path,
basename: String::from("many_child_dir"),
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
})
.expect("couldn't insert many_child_dir");
root_node.insert_node(FsNode {
path: single_child_dir_path,
basename: String::from("single_child_dir"),
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
})
.expect("couldn't insert single_child_dir");
root_node.insert_node(FsNode {
path: empty_dir_path,
basename: String::from("empty_dir"),
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
})
.expect("couldn't insert empty_dir");
root_node.insert_node(FsNode {
path: empty_file_path,
basename: String::from("empty_file"),
entry: FsEntryType::File { len: 0, },
mtime: Local::now(),
})
.expect("couldn't insert empty_file");
root_node.insert_node(FsNode {
path: single_byte_file_path,
basename: String::from("single_byte_file"),
entry: FsEntryType::File { len: 1, },
mtime: Local::now(),
})
.expect("couldn't insert single_byte_file");
root_node.insert_node(FsNode {
path: twenty_two_byte_file_path,
basename: String::from("twenty_two_byte_file"),
entry: FsEntryType::File { len: 22, },
mtime: Local::now(),
})
.expect("couldn't insert twenty_two_byte_file");
#[cfg(not(windows))]
{
root_node.insert_node(FsNode {
path: symlink_to_file_path,
basename: String::from("symlink_to_file"),
entry: FsEntryType::Symlink {
target: PathBuf::from(symlink_target_paths::FILE),
ty: FsItemType::File,
},
mtime: Local::now(),
})
.expect("couldn't insert symlink_to_file");
root_node.insert_node(FsNode {
path: symlink_to_dir_path,
basename: String::from("symlink_to_dir"),
entry: FsEntryType::Symlink {
target: PathBuf::from(symlink_target_paths::DIRECTORY),
ty: FsItemType::Directory,
},
mtime: Local::now(),
})
.expect("couldn't insert symlink_to_dir");
}
// write to disk
let res = root_node.base.mirror_to_disk();
match res {
Ok(_) => (),
Err(why) => {
panic!("unable to write tree to disk: {:?}", why);
}
}
// figure there's maybe a second of wobble on either side in addition to however slowly
// this test has run so far
let acceptable_epsilon =
Local::now().signed_duration_since(start_time).num_milliseconds().abs() + 1000;
// assemble list of paths from walkdir
let mut found_items = WalkDir::new(tmp.path())
.into_iter()
.map(|r| r.expect("unable to read from walkdir iterator").path().to_path_buf())
.collect::<Vec<_>>();
let iteration_order = root_node.iter()
.filter(|n| n.path.starts_with(tmp.path()))
.map(|n| n.path.clone())
.collect::<Vec<_>>();
found_items.sort();
assert_eq!(iteration_order, found_items);
assert_eq!(to_find, found_items);
// construct a fsrootnode, see if we round-tripped correctly
let mut second_root_node = FsRootNode::new();
second_root_node.add_root(tmp.path()).expect("unable to construct pair fs view");
root_node.base.assert_eq_with_mtime_epsilon(&second_root_node.base, acceptable_epsilon)
}
#[test]
fn single_file_tmp_dir() {
let tmp = TempDir::new("single").expect("unable to create temp directory");
let tmp_path = tmp.path().join("test-file");
let mut tmp_file = File::create(&tmp_path).expect("unable to create test file");
let mut traversed = 0;
for entry in WalkDir::new(&tmp_path) {
let entry = entry.expect("walking directory");
let node = FsNode::try_from_node_source(&entry).expect("creating node");
assert_eq!(node.basename, "test-file");
assert_eq!(node.path, tmp_path);
assert_eq!(node.entry, FsEntryType::File { len: 0, });
traversed += 1;
}
assert_eq!(traversed, 1);
write!(tmp_file, "7 bytes").expect("writing to temp file");
traversed = 0;
for entry in WalkDir::new(&tmp_path) {
let entry = entry.expect("walking directory");
let node = FsNode::try_from_node_source(&entry).expect("creating node");
assert_eq!(node.basename, "test-file");
assert_eq!(node.path, tmp_path);
assert_eq!(node.entry, FsEntryType::File { len: 7, });
traversed += 1;
}
assert_eq!(traversed, 1);
}
}
Only branch on matching the root a single time.
use std;
use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::Display;
use std::fs::{DirEntry, metadata, read_link};
use std::path::{Components, Path, PathBuf};
use chrono::{DateTime, Local, UTC};
use walkdir;
use walkdir::WalkDir;
use errors::*;
use notify::DebouncedEvent;
use query::{FileResult, Query, QueryExpression, QueryResult};
use times::system_time_to_date_time;
#[cfg(test)]
use std::fs::{OpenOptions, create_dir_all};
#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
pub struct FsNode {
pub path: PathBuf,
pub basename: String,
pub entry: FsEntryType,
pub mtime: DateTime<Local>,
}
#[derive(Clone, Copy, Debug, Serialize)]
pub enum ChangeEvent {
Create,
Delete,
Write,
Metadata,
}
#[derive(Debug, Serialize)]
pub struct FileEvent {
event: ChangeEvent,
file: FileResult,
}
#[derive(Debug, Serialize)]
pub struct Notification {
changes: Vec<FileEvent>,
root: Option<PathBuf>,
}
#[derive(Debug, Serialize)]
pub struct FsRootNode {
base: FsNode,
roots: BTreeMap<PathBuf, QueryExpression>,
}
impl<'a> FsRootNode {
pub fn iter(&'a self) -> FsIterator<'a> {
FsIterator {
to_visit: Vec::new(),
current: Some(&self.base),
}
}
}
impl<'a> IntoIterator for &'a FsRootNode {
type Item = &'a FsNode;
type IntoIter = FsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct FsIterator<'a> {
to_visit: Vec<&'a FsNode>,
current: Option<&'a FsNode>,
}
impl<'a> Iterator for FsIterator<'a> {
type Item = &'a FsNode;
fn next(&mut self) -> Option<Self::Item> {
// if the current node is a dir, add all to to_visit
match self.current {
Some(ref node) => {
// add children to the stack to visit next
match node.entry {
FsEntryType::RootRoot { ref children } |
FsEntryType::Directory { ref children } => {
self.to_visit.extend(children.values().rev());
}
FsEntryType::File { .. } |
FsEntryType::Symlink { .. } => (),
}
}
None => (),
};
// return the previous current
let returned = self.current;
self.current = self.to_visit.pop();
returned
}
}
impl FsRootNode {
pub fn new() -> Self {
FsRootNode {
base: FsNode {
path: PathBuf::from(""),
basename: String::new(),
entry: FsEntryType::RootRoot { children: BTreeMap::new(), },
mtime: Local::now(),
},
roots: BTreeMap::new(),
}
}
fn insert_node(&mut self, node: FsNode) -> Result<()> {
let basename = node.basename.clone();
let path = node.path.clone();
let mut components = path.components();
components.next_back();
match self.base
.ensure_and_return_parent(PathBuf::new(), components)
.chain_err(|| "unable to ensure parent for new node insertion")?
.entry {
FsEntryType::RootRoot { ref mut children } |
FsEntryType::Directory { ref mut children } => children.insert(basename, node),
_ => bail!("Found a non-directory node when ensuring parent of a node was in the tree"),
};
Ok(())
}
fn remove_node(&mut self, path: &Path) -> Option<FsNode> {
let mut components = path.components();
let basename = match components.next_back() {
Some(b) => b.as_os_str().to_string_lossy(),
None => return None,
};
if let Ok(parent) = self.base.ensure_and_return_parent(PathBuf::new(), components) {
match parent.entry {
FsEntryType::RootRoot { ref mut children } |
FsEntryType::Directory { ref mut children } => children.remove(&*basename),
_ => None,
}
} else {
None
}
}
fn matching_root_if_needs_notification(&self, node: &FsNode) -> Option<&Path> {
for (ref root, expr) in &self.roots {
if node.path.starts_with(root) && expr.matches(&node) {
return Some(root);
}
}
None
}
pub fn consume_event(&mut self, event: DebouncedEvent) -> Result<Option<Notification>> {
let mut changes = Vec::new();
let mut event_root = None;
match event {
// TODO decide how to handle I/O notices vs. confirmations
DebouncedEvent::NoticeWrite(_) => return Ok(None),
DebouncedEvent::NoticeRemove(_) => return Ok(None),
DebouncedEvent::Create(ref p) => {
let node = FsNode::try_from_node_source(&p).chain_err(|| "constructing node from path")?;
if let Some(root) = self.matching_root_if_needs_notification(&node) {
changes.push(FileEvent {
event: ChangeEvent::Create,
file: FileResult::make(&node),
});
event_root = Some(root.to_owned());
}
self.insert_node(node)?;
}
DebouncedEvent::Write(ref p) => {
let node = FsNode::try_from_node_source(&p).chain_err(|| "constructing node from path")?;
if let Some(root) = self.matching_root_if_needs_notification(&node) {
changes.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&node),
});
event_root = Some(root.to_owned());
}
self.insert_node(node)?;
}
DebouncedEvent::Chmod(ref p) => {
let node = FsNode::try_from_node_source(&p).chain_err(|| "constructing node from path")?;
if let Some(root) = self.matching_root_if_needs_notification(&node) {
changes.push(FileEvent {
event: ChangeEvent::Metadata,
file: FileResult::make(&node),
});
event_root = Some(root.to_owned());
}
self.insert_node(node)?;
}
DebouncedEvent::Remove(ref p) => {
if let Some(old_node) = self.remove_node(p) {
if let Some(root) = self.matching_root_if_needs_notification(&old_node) {
changes.push(FileEvent {
event: ChangeEvent::Delete,
file: FileResult::make(&old_node),
});
event_root = Some(root.to_owned());
}
}
}
DebouncedEvent::Rename(ref p, ref to) => {
let new_node =
FsNode::try_from_node_source(&to).chain_err(|| "constructing node from path")?;
// FIXME(dikaiosune) the event_root should probably always be initialized
event_root = None;
if let Some(_) = self.matching_root_if_needs_notification(&new_node) {
changes.push(FileEvent {
event: ChangeEvent::Create,
file: FileResult::make(&new_node),
});
}
self.insert_node(new_node).unwrap();
if let Some(old_node) = self.remove_node(p) {
if let Some(root) = self.matching_root_if_needs_notification(&old_node) {
changes.push(FileEvent {
event: ChangeEvent::Delete,
file: FileResult::make(&old_node),
});
event_root = Some(root.to_owned());
}
}
}
DebouncedEvent::Rescan => {
// ok so the old tree is invalid! yay!
// build a new tree
let mut new_fake_root = FsRootNode::new();
for ref root in self.roots.keys() {
new_fake_root.add_root(root)?;
}
// and make a best effort to notify our client of changes there
self.base.diff(&new_fake_root.base, &mut changes);
self.base = new_fake_root.base;
// TODO segment this into multiple notifications, one per root?
event_root = None;
}
DebouncedEvent::Error(e, opt_p) => {
bail!("notification error at path {:?}: {:?}", opt_p, e);
}
}
Ok(Some(Notification {
changes: changes,
root: event_root,
}))
}
pub fn eval(&mut self, query: Query) -> Result<QueryResult> {
// make sure all the files we care about are in the tree
if !self.roots.contains_key(&query.root) {
let start = UTC::now();
info!("Adding {:?} root to file system view...", &query.root);
self.add_root(&query.root)?;
let duration = UTC::now().signed_duration_since(start);
info!("Added {:?} to file system view, took {} seconds.",
&query.root,
duration);
}
// we only have one client right now, so the filesystem view should have very low overhead
// compared to what watchman does, so i don't think we need generators
let files = self.iter()
.filter(|n| n.path.starts_with(&query.root))
.filter(|n| query.expr.matches(n))
.map(|n| FileResult::make(n))
.collect::<Vec<_>>();
self.roots.insert(query.root, query.expr);
Ok(QueryResult {
id: query.id,
files: files,
})
}
pub fn add_root(&mut self, path: &Path) -> Result<()> {
WalkDir::new(path).into_iter()
.map(|entry| {
entry.map(|f| -> Result<()> {
let node = FsNode::try_from_node_source(&f)?;
let mut components = f.path().components();
// we need to remove the final element from the path, as it corresponds to the DirEntry
components.next_back();
let parent = self.base.ensure_and_return_parent(PathBuf::new(), components)?;
match parent.entry {
FsEntryType::Directory { ref mut children } => {
let basename = node.basename.to_owned();
children.insert(basename, node);
}
_ => bail!("Found a non-directory as the parent of a node."),
}
Ok(())
})
})
.collect::<::std::result::Result<Vec<_>, _>>()?;
Ok(())
}
}
impl FsNode {
/// This should only be called if the node is a root node.
// TODO move this to FsRootNode, so this can be statically enforced.
fn ensure_and_return_parent<'a, 'b>(
&'a mut self,
mut path_so_far: PathBuf,
mut components: Components<'b>
) -> Result<&'a mut FsNode> {
let component = components.next();
if let Some(c) = component {
let comp_str = match c.as_os_str().to_str() {
Some(s) => s,
None => bail!("non unicode filename found"),
};
path_so_far.push(comp_str);
match self.entry {
FsEntryType::Directory { ref mut children } |
FsEntryType::RootRoot { ref mut children } => {
children.entry(comp_str.to_owned())
.or_insert_with(|| FsNode::empty_dir(path_so_far.clone(), comp_str.to_owned()))
.ensure_and_return_parent(path_so_far, components)
}
_ => bail!("Files should not exist in the middle of a path component."),
}
} else {
Ok(self)
}
}
pub fn diff(&self, new_node: &FsNode, results: &mut Vec<FileEvent>) {
if self.path != new_node.path {
panic!("bug: called diff on nodes without equal paths");
}
if self.basename != new_node.basename {
panic!("bug: called diff on nodes without equal basenames");
}
// we'll ignore mtime at this level, because it only really matters for files
match (&self.entry, &new_node.entry) {
(&FsEntryType::RootRoot { children: ref old_children },
&FsEntryType::RootRoot { children: ref new_children }) |
(&FsEntryType::Directory { children: ref old_children },
&FsEntryType::Directory { children: ref new_children }) => {
let old_files = old_children.keys().collect::<BTreeSet<_>>();
let new_files = new_children.keys().collect::<BTreeSet<_>>();
let in_both = old_files.intersection(&new_files).map(|s| *s).collect::<BTreeSet<_>>();
for &deleted_basename in old_files.difference(&in_both) {
let deleted_node = old_children.get(deleted_basename).unwrap();
results.push(FileEvent {
event: ChangeEvent::Delete,
file: FileResult::make(deleted_node),
});
}
for &created_basename in new_files.difference(&in_both) {
let created_node = new_children.get(created_basename).unwrap();
results.push(FileEvent {
event: ChangeEvent::Create,
file: FileResult::make(created_node),
});
}
for basename in in_both {
let before = old_children.get(basename).unwrap();
let after = old_children.get(basename).unwrap();
before.diff(after, results);
}
}
(&FsEntryType::File { len: old_length }, &FsEntryType::File { len: new_length }) => {
match self.mtime.cmp(&new_node.mtime) {
::std::cmp::Ordering::Less => {
results.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&new_node),
});
}
_ => {
// only generate a notification b/c of length if the mtimes are the same
if old_length != new_length {
results.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&new_node),
});
}
}
}
}
(&FsEntryType::Symlink { target: ref old_target, ty: old_ty },
&FsEntryType::Symlink { target: ref new_target, ty: new_ty }) => {
if old_target != new_target || old_ty != new_ty {
results.push(FileEvent {
event: ChangeEvent::Write,
file: FileResult::make(&new_node),
});
}
}
// if the node types differ, just generate create/delete events for everything
_ => {
self.gen_events_for_self_and_children(ChangeEvent::Delete, results);
new_node.gen_events_for_self_and_children(ChangeEvent::Create, results);
}
}
}
fn gen_events_for_self_and_children(&self, event: ChangeEvent, changes: &mut Vec<FileEvent>) {
changes.push(FileEvent {
event: event,
file: FileResult::make(self),
});
match &self.entry {
&FsEntryType::RootRoot { ref children } |
&FsEntryType::Directory { ref children } => {
for node in children.values() {
node.gen_events_for_self_and_children(event, changes);
}
}
_ => (),
}
}
pub fn empty_dir(path: PathBuf, dirname: String) -> Self {
FsNode {
path: path,
basename: dirname,
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
}
}
fn try_from_node_source<S: NodeSource>(entry: &S) -> Result<Self> {
let basename = match entry.path().file_name() {
Some(n) => {
match n.to_str() {
Some(b) => b.to_owned(),
None => bail!("non-unicode filename"),
}
}
None => bail!("walkdir gave us a relative path"),
};
let metadata = entry.metadata()?;
let file_type = metadata.ty;
let entry_ty = match file_type {
FsItemType::Directory => FsEntryType::Directory { children: BTreeMap::new(), },
FsItemType::File => FsEntryType::File { len: metadata.len, },
FsItemType::SymlinkUgh => {
let sym_path = read_link(entry.path()).chain_err(|| "unable to read through symlink")?;
let sym_target_type = match sym_path.metadata() {
Ok(sym_meta) => {
if sym_meta.file_type().is_file() {
FsItemType::File
} else if sym_meta.file_type().is_dir() {
FsItemType::Directory
} else if sym_meta.file_type().is_symlink() {
FsItemType::SymlinkUgh
} else {
FsItemType::Other
}
}
Err(_) => FsItemType::Other,
};
FsEntryType::Symlink {
target: sym_path,
ty: sym_target_type,
}
}
FsItemType::Other => {
panic!("This is only ever used when reading symlink targets, this is a bug.");
}
};
Ok(FsNode {
path: entry.path().into_owned(),
basename: basename,
entry: entry_ty,
mtime: metadata.mtime,
})
}
fn format_into_buffer(&self, buf: &mut String, depth: u32) {
for _ in 0..(depth * 2) {
buf.push(' ');
}
buf.push_str(&self.basename);
match &self.entry {
&FsEntryType::RootRoot { ref children } |
&FsEntryType::Directory { ref children } => {
buf.push('\n');
for (_, child) in children {
child.format_into_buffer(buf, depth + 1);
}
}
&FsEntryType::File { len } => {
let node_md_str = format!(" {} {}\n", len, self.mtime);
buf.push_str(&node_md_str);
}
&FsEntryType::Symlink { ref target, ty } => {
let node_md_str = format!(" -> {:?} ({:?})\n", target, ty);
buf.push_str(&node_md_str);
}
}
}
}
impl Display for FsNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
let mut buf = String::new();
self.format_into_buffer(&mut buf, 0);
let _ = f.write_str(&buf);
Ok(())
}
}
#[cfg(all(test, not(windows)))]
#[allow(unused_variables)]
fn make_symlink(src: &Path, dst: &Path, ty: FsItemType) -> Result<()> {
use std::os::unix::fs::symlink;
Ok(symlink(src, dst).chain_err(|| "unable to create symlink")?)
}
#[cfg(all(test, windows))]
fn make_symlink(src: &Path, dst: &Path, ty: FsItemType) -> Result<()> {
use std::os::windows::fs::{symlink_dir, symlink_file};
match ty {
FsItemType::Directory => {
Ok(symlink_dir(src, dst).chain_err(|| "unable to create directory symlink")?)
}
FsItemType::File | FsItemType::SymlinkUgh => {
Ok(symlink_file(src, dst).chain_err(|| "unable to create file symlink")?)
}
// we don't want to try to write any of this stuff to disk
FsItemType::Other => Ok(()),
}
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
pub enum FsEntryType {
Directory { children: BTreeMap<String, FsNode>, },
File { len: u64, },
Symlink { target: PathBuf, ty: FsItemType, },
// different than a root path element (thanks, windows)
RootRoot { children: BTreeMap<String, FsNode>, },
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum FsItemType {
#[serde(rename="file")]
File,
#[serde(rename="dir")]
Directory,
#[serde(rename="symlink")]
SymlinkUgh,
#[serde(rename="other")]
Other,
}
struct MetadataFromFs {
ty: FsItemType,
mtime: DateTime<Local>,
len: u64,
}
trait NodeSource {
fn path(&self) -> Cow<Path>;
fn metadata(&self) -> Result<MetadataFromFs>;
}
impl<'a> NodeSource for &'a PathBuf {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(self.as_ref())
}
fn metadata(&self) -> Result<MetadataFromFs> {
let md = metadata(self).chain_err(|| "unable to read path metadata")?;
let mtime = system_time_to_date_time(md.modified().chain_err(|| "unable to read mtime")?);
Ok(MetadataFromFs {
mtime: mtime,
len: md.len(),
ty: if md.is_file() {
FsItemType::File
} else if md.is_dir() {
FsItemType::Directory
} else {
FsItemType::SymlinkUgh
},
})
}
}
impl NodeSource for walkdir::DirEntry {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path())
}
fn metadata(&self) -> Result<MetadataFromFs> {
let md = self.metadata().chain_err(|| "unable to read file metadata")?;
let mtime = system_time_to_date_time(md.modified()
.chain_err(|| "unable to read modification time")?);
Ok(MetadataFromFs {
mtime: mtime,
len: md.len(),
ty: if md.is_file() {
FsItemType::File
} else if md.is_dir() {
FsItemType::Directory
} else {
FsItemType::SymlinkUgh
},
})
}
}
impl NodeSource for DirEntry {
fn path(&self) -> Cow<Path> {
Cow::Owned(self.path())
}
fn metadata(&self) -> Result<MetadataFromFs> {
let md = self.metadata().chain_err(|| "unable to read file metadata")?;
let mtime = system_time_to_date_time(md.modified()
.chain_err(|| "unable to read file modified time")?);
Ok(MetadataFromFs {
mtime: mtime,
len: md.len(),
ty: if md.is_file() {
FsItemType::File
} else if md.is_dir() {
FsItemType::Directory
} else {
FsItemType::SymlinkUgh
},
})
}
}
#[cfg(test)]
mod test {
use chrono::Local;
use std::fs::File;
use std::io::prelude::*;
use std::path::PathBuf;
use tempdir::TempDir;
use walkdir::WalkDir;
use super::*;
impl FsNode {
/// WARNING: Only call this in tests when you've carefully constructed a tree inside a
/// temporary directory. Potentially destructive, and not easily reversed.
///
/// Also, requires that the process has `SeCreateSymbolicLinkPrivilege`.
fn mirror_to_disk(&self) -> Result<()> {
match self.entry {
FsEntryType::RootRoot { ref children } => {
for child in children.values() {
child.mirror_to_disk()?;
}
}
FsEntryType::Directory { ref children } => {
create_dir_all(&self.path).chain_err(|| "unable to recursively create directories")?;
for (_, child) in children {
child.mirror_to_disk()?
}
}
FsEntryType::File { len } => {
let file = OpenOptions::new().read(true)
.write(true)
.create(true)
.open(&self.path)
.chain_err(|| "unable to open/create file")?;
file.set_len(len).chain_err(|| "unable to set file length")?;
}
FsEntryType::Symlink { ref target, ty } => {
make_symlink(target, &self.path, ty)?;
}
}
Ok(())
}
fn assert_eq_with_mtime_epsilon(&self, other: &FsNode, acceptable_time_gap_millis: i64) {
if self.path != other.path {
panic!("paths are not equal\nlhs: {:?}\nrhs: {:?}", self, other);
}
if self.basename != other.basename {
panic!("basenames are not equal\nlhs: {:?}\nrhs: {:?}", self, other);
}
if self.mtime.signed_duration_since(other.mtime).num_milliseconds().abs() >
acceptable_time_gap_millis {
panic!("mtimes are not equal (w/in {:?} tolerance): {} and {}\nlhs:{:?}\nrhs:{:?}",
acceptable_time_gap_millis,
self.mtime,
other.mtime,
self,
other);
}
match (&self.entry, &other.entry) {
(&FsEntryType::File { len: len1 }, &FsEntryType::File { len: len2 }) => {
if len1 != len2 {
panic!("file lengths are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other);
}
}
(&FsEntryType::RootRoot { children: ref children1 },
&FsEntryType::RootRoot { children: ref children2 }) |
(&FsEntryType::Directory { children: ref children1 },
&FsEntryType::Directory { children: ref children2 }) => {
// these are btreemaps so will have the same sort order
for ((basename1, child1), (basename2, child2)) in children1.iter().zip(children2.iter()) {
if basename1 != basename2 {
panic!("children basenames are not equal\nlhs: {:?}\nrhs: {:?}",
child1,
child2);
}
child1.assert_eq_with_mtime_epsilon(child2, acceptable_time_gap_millis);
}
}
(&FsEntryType::Symlink { target: ref target1, ty: ty1 },
&FsEntryType::Symlink { target: ref target2, ty: ty2 }) => {
if ty1 != ty2 {
panic!("symlink types are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other);
}
if target1 != target2 {
panic!("symlink targets are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other);
}
}
_ => {
panic!("node entry types are not equal\nlhs: {:?}\nrhs: {:?}",
self,
other)
}
}
}
}
#[cfg(not(windows))]
mod symlink_target_paths {
pub const FILE: &'static str = "/bin/bash";
pub const DIRECTORY: &'static str = "/usr";
}
#[cfg(windows)]
mod symlink_target_paths {
#[allow(dead_code)]
pub const FILE: &'static str = "C:\\Windows\\explorer.exe";
#[allow(dead_code)]
pub const DIRECTORY: &'static str = "C:\\Windows";
}
#[test]
fn fixed_roundtrip() {
let start_time = Local::now();
let tmp = TempDir::new("mirroring").expect("couldn't create temp dir");
// make the fake filesystem, keeping a list of paths created
let many_child_dir_path = tmp.path().join("many_child_dir");
let single_child_dir_path = many_child_dir_path.join("single_child_dir");
let empty_dir_path = many_child_dir_path.join("empty_dir");
let empty_file_path = single_child_dir_path.join("empty_file");
let single_byte_file_path = many_child_dir_path.join("single_byte_file");
let twenty_two_byte_file_path = many_child_dir_path.join("twenty_two_byte_file");
let symlinks_path = many_child_dir_path.join("symlinks");
let symlink_to_file_path = symlinks_path.join("symlink_to_file");
let symlink_to_dir_path = symlinks_path.join("symlink_to_dir");
let to_find = {
let mut to_find = vec![ tmp.path().to_path_buf(),
many_child_dir_path.clone(),
single_child_dir_path.clone(),
empty_dir_path.clone(),
empty_file_path.clone(),
single_byte_file_path.clone(),
twenty_two_byte_file_path.clone() ];
#[cfg(not(windows))]
{
to_find.push(symlinks_path);
to_find.push(symlink_to_file_path.clone());
to_find.push(symlink_to_dir_path.clone());
}
to_find.sort();
to_find
};
let mut root_node = FsRootNode::new();
root_node.insert_node(FsNode {
path: many_child_dir_path,
basename: String::from("many_child_dir"),
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
})
.expect("couldn't insert many_child_dir");
root_node.insert_node(FsNode {
path: single_child_dir_path,
basename: String::from("single_child_dir"),
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
})
.expect("couldn't insert single_child_dir");
root_node.insert_node(FsNode {
path: empty_dir_path,
basename: String::from("empty_dir"),
entry: FsEntryType::Directory { children: BTreeMap::new(), },
mtime: Local::now(),
})
.expect("couldn't insert empty_dir");
root_node.insert_node(FsNode {
path: empty_file_path,
basename: String::from("empty_file"),
entry: FsEntryType::File { len: 0, },
mtime: Local::now(),
})
.expect("couldn't insert empty_file");
root_node.insert_node(FsNode {
path: single_byte_file_path,
basename: String::from("single_byte_file"),
entry: FsEntryType::File { len: 1, },
mtime: Local::now(),
})
.expect("couldn't insert single_byte_file");
root_node.insert_node(FsNode {
path: twenty_two_byte_file_path,
basename: String::from("twenty_two_byte_file"),
entry: FsEntryType::File { len: 22, },
mtime: Local::now(),
})
.expect("couldn't insert twenty_two_byte_file");
#[cfg(not(windows))]
{
root_node.insert_node(FsNode {
path: symlink_to_file_path,
basename: String::from("symlink_to_file"),
entry: FsEntryType::Symlink {
target: PathBuf::from(symlink_target_paths::FILE),
ty: FsItemType::File,
},
mtime: Local::now(),
})
.expect("couldn't insert symlink_to_file");
root_node.insert_node(FsNode {
path: symlink_to_dir_path,
basename: String::from("symlink_to_dir"),
entry: FsEntryType::Symlink {
target: PathBuf::from(symlink_target_paths::DIRECTORY),
ty: FsItemType::Directory,
},
mtime: Local::now(),
})
.expect("couldn't insert symlink_to_dir");
}
// write to disk
let res = root_node.base.mirror_to_disk();
match res {
Ok(_) => (),
Err(why) => {
panic!("unable to write tree to disk: {:?}", why);
}
}
// figure there's maybe a second of wobble on either side in addition to however slowly
// this test has run so far
let acceptable_epsilon =
Local::now().signed_duration_since(start_time).num_milliseconds().abs() + 1000;
// assemble list of paths from walkdir
let mut found_items = WalkDir::new(tmp.path())
.into_iter()
.map(|r| r.expect("unable to read from walkdir iterator").path().to_path_buf())
.collect::<Vec<_>>();
let iteration_order = root_node.iter()
.filter(|n| n.path.starts_with(tmp.path()))
.map(|n| n.path.clone())
.collect::<Vec<_>>();
found_items.sort();
assert_eq!(iteration_order, found_items);
assert_eq!(to_find, found_items);
// construct a fsrootnode, see if we round-tripped correctly
let mut second_root_node = FsRootNode::new();
second_root_node.add_root(tmp.path()).expect("unable to construct pair fs view");
root_node.base.assert_eq_with_mtime_epsilon(&second_root_node.base, acceptable_epsilon)
}
#[test]
fn single_file_tmp_dir() {
let tmp = TempDir::new("single").expect("unable to create temp directory");
let tmp_path = tmp.path().join("test-file");
let mut tmp_file = File::create(&tmp_path).expect("unable to create test file");
let mut traversed = 0;
for entry in WalkDir::new(&tmp_path) {
let entry = entry.expect("walking directory");
let node = FsNode::try_from_node_source(&entry).expect("creating node");
assert_eq!(node.basename, "test-file");
assert_eq!(node.path, tmp_path);
assert_eq!(node.entry, FsEntryType::File { len: 0, });
traversed += 1;
}
assert_eq!(traversed, 1);
write!(tmp_file, "7 bytes").expect("writing to temp file");
traversed = 0;
for entry in WalkDir::new(&tmp_path) {
let entry = entry.expect("walking directory");
let node = FsNode::try_from_node_source(&entry).expect("creating node");
assert_eq!(node.basename, "test-file");
assert_eq!(node.path, tmp_path);
assert_eq!(node.entry, FsEntryType::File { len: 7, });
traversed += 1;
}
assert_eq!(traversed, 1);
}
}
|
//! Game Boy GPU emulation
use std::fmt::{Show, Formatter, Error};
use ui::Display;
use gpu::sprite::Sprite;
mod sprite;
/// GPU state.
pub struct Gpu<'a> {
/// Emulator Display
display: &'a mut (Display + 'a),
/// Current line. [0,143] is active video, [144,153] is blanking.
line: u8,
/// Counter for the horizontal period
htick: u16,
/// Object attritube memory
oam: [Sprite, ..0xa0],
/// Video Ram
vram: [u8, ..0x2000],
/// `true` if the LCD is enabled.
enabled: bool,
/// Which tile map the window uses
window_tile_map: TileMap,
/// `true` if window display is enabled
window_enabled: bool,
/// Which tile set Background and Window use
bg_win_tile_set: TileSet,
/// Which tile map background uses
bg_tile_map: TileMap,
/// Resolution of the sprites
sprite_size: SpriteSize,
/// `true` if sprites are displayed
sprites_enabled: bool,
/// `true` if background display is enabled
bg_enabled: bool,
/// Background palette
bgp: Palette,
/// Object palette 0
obp0: Palette,
/// Object palette 1
obp1: Palette,
/// Line compare
lyc: u8,
/// VBlank interrupt status
it_vblank: bool,
/// LYC match interrupt enable (IT when LY == LYC)
iten_lyc: bool,
/// Interrupt during prelude (mode == 2)
iten_prelude: bool,
/// Interrupt during vblank (mode == 1). This is not the same as
/// `it_vblank` above: it_vblank fires with a higher priority and
/// is not shared with other interrupt sources like this one.
iten_vblank: bool,
/// Interrupt during hblank (mode == 0)
iten_hblank: bool,
/// Lcdc interrupt status
lcd_it_status: LcdItStatus,
/// Background y position
scy: u8,
/// Background x position
scx: u8,
/// Window top-left x position + 7
wx: u8,
/// Window top-left y position.
wy: u8,
/// Sprites displayed on each line. Contains an index into OAM or
/// None. There can't be more than 10 sprites displayed on each
/// line.
line_cache: [[Option<u8>, ..10], ..144],
}
/// Current GPU mode
#[deriving(Show, PartialEq)]
pub enum Mode {
/// In horizontal blanking
HBlank = 0,
/// In vertical blanking
VBlank = 1,
/// Accessing sprite memory, Sprite attributes RAM [0xfe00, 0xfe9f]
/// can't be accessed
Prelude = 2,
/// Accessing sprite memory and video memory [0x8000, 0x9fff],
/// both can't be accessed from CPU
Active = 3,
}
/// State of the LCD interrupt (as controlled by the STAT
/// register).
///
/// I'm not absolutely certain I got things right but if I understand
/// correctly: the interrupt source is configurable (LYC, prelude,
/// vblank, hblank). The way I see it all those interrupt sources are
/// ORed together and an interrupt is only signaled on a rising edge
/// of the ORed signal.
///
/// So for instance if the LYC and HBlank interrupts are enabled and
/// we're at the matched line, the interrupt will trigger at the
/// beginning of the line (LY == LYC) but not at the beginning of
/// hblank (since the IT line is already high).
/// However, if the LYC register value is changed in the middle of the
/// line and the LY == LYC is no longer true, the IT signal will go
/// low and can be triggered again in the same line.
#[deriving(PartialEq)]
enum LcdItStatus {
/// Interrupt is inactive
Inactive,
/// Interrupt event occured
Triggered,
/// Interrupt event occured and has been acknowledged. It will be
/// rearmed when the signal goes low.
Acked,
}
impl<'a> Gpu<'a> {
/// Create a new Gpu instance.
pub fn new<'n>(display: &'n mut Display) -> Gpu<'n> {
Gpu { line: 0,
htick: 0,
oam: [Sprite::new(), ..0xa0],
vram: [0xca, ..0x2000],
display: display,
enabled: false,
window_tile_map: TileMap::Low,
window_enabled: false,
bg_win_tile_set: TileSet::Set0,
bg_tile_map: TileMap::Low,
sprite_size: SpriteSize::Sz8x8,
sprites_enabled: false,
bg_enabled: false,
bgp: Palette::from_reg(0xff),
obp0: Palette::from_reg(0xff),
obp1: Palette::from_reg(0xff),
lyc: 0x00,
it_vblank: false,
iten_lyc: false,
iten_prelude: false,
iten_vblank: false,
iten_hblank: false,
lcd_it_status: LcdItStatus::Inactive,
scy: 0,
scx: 0,
wx: 0,
wy: 0,
line_cache: [[None, ..10], ..144],
}
}
/// Called at each tick of the system clock. Move the emulated
/// state one step forward.
pub fn step(&mut self) {
if !self.enabled {
return;
}
self.htick = (self.htick + 1) % timings::HTOTAL;
if self.htick == timings::HSYNC_ON {
// Entering horizontal blanking
self.line = (self.line + 1) % timings::VTOTAL;
if self.line == timings::VSYNC_ON {
// We're entering vertical blanking, we're done drawing the
// current frame
self.end_of_frame()
}
}
// Compute at which cycle the first pixel will actually be
// output on the screen. I don't know where this comes from
// but it's what GearBoy seems to use. Using 48 for the first
// line messes up The Legend of Zelda's intro.
let line_start = match self.line {
0 => 160,
_ => 48,
};
if self.htick == line_start && self.line < timings::VSYNC_ON {
// It's time to draw the current line
let y = self.line;
for x in range(0, 160) {
self.render_pixel(x, y);
}
}
self.update_ldc_interrupt();
}
/// Return current GPU mode
pub fn mode(&self) -> Mode {
if self.line < timings::VSYNC_ON {
if self.htick < timings::HACTIVE_ON {
Mode::Prelude
} else if self.htick < timings::HSYNC_ON {
Mode::Active
} else {
Mode::HBlank
}
} else {
Mode::VBlank
}
}
/// Handle reconfig through LCDC register
pub fn set_lcdc(&mut self, lcdc: u8) {
self.enabled = lcdc & 0x80 != 0;
self.window_tile_map = match lcdc & 0x40 != 0 {
true => TileMap::High,
false => TileMap::Low,
};
self.window_enabled = lcdc & 0x20 != 0;
self.bg_win_tile_set = match lcdc & 0x10 != 0 {
true => TileSet::Set1,
false => TileSet::Set0,
};
self.bg_tile_map = match lcdc & 0x08 != 0 {
true => TileMap::High,
false => TileMap::Low,
};
let new_sprite_size = match lcdc & 0x04 != 0 {
false => SpriteSize::Sz8x8,
true => SpriteSize::Sz8x16,
};
self.sprites_enabled = lcdc & 0x02 != 0;
self.bg_enabled = lcdc & 0x01 != 0;
if !self.enabled {
// Reset to the first pixel to start back here once we're
// re-enabled.
self.line = 0;
self.htick = 0;
}
if new_sprite_size != self.sprite_size {
self.sprite_size = new_sprite_size;
self.rebuild_line_cache();
}
}
/// Generate value of lcdc register
pub fn lcdc(&self) -> u8 {
let mut r = 0;
r |= (self.enabled as u8) << 7;
r |= match self.window_tile_map {
TileMap::High => 1,
TileMap::Low => 0,
} << 6;
r |= (self.window_enabled as u8) << 5;
r |= match self.bg_win_tile_set {
TileSet::Set1 => 1,
TileSet::Set0 => 0
}<< 4;
r |= match self.bg_tile_map {
TileMap::High => 1,
TileMap::Low => 0,
} << 3;
r |= match self.sprite_size {
SpriteSize::Sz8x16 => 1,
SpriteSize::Sz8x8 => 0,
} << 2;
r |= (self.sprites_enabled as u8) << 1;
r |= (self.bg_enabled as u8) << 0;
r
}
pub fn stat(&self) -> u8 {
let mut r = 0;
let c = self.lyc == self.line;
r |= (self.iten_lyc as u8) << 6;
r |= (self.iten_prelude as u8) << 5;
r |= (self.iten_vblank as u8) << 4;
r |= (self.iten_hblank as u8) << 3;
r |= (c as u8) << 2;
// Apparently mode is 0 when disabled
if self.enabled {
r |= self.mode() as u8;
}
r
}
pub fn set_stat(&mut self, stat: u8) {
self.iten_lyc = stat & 0x40 != 0;
self.iten_prelude = stat & 0x20 != 0;
self.iten_vblank = stat & 0x10 != 0;
self.iten_hblank = stat & 0x03 != 0;
// Other fields are R/O
// Update interrupt status with new stat params
self.update_ldc_interrupt();
}
/// Reconfiguration of SCY register
pub fn scy(&self) -> u8 {
self.scy
}
/// Return value of SCY register
pub fn set_scy(&mut self, scy: u8) {
self.scy = scy;
}
/// Reconfiguration of SCX register
pub fn scx(&self) -> u8 {
self.scx
}
/// Return value of SCX register
pub fn set_scx(&mut self, scx: u8) {
self.scx = scx;
}
/// Handle reconfiguration of the lyc register
pub fn set_lyc(&mut self, lyc: u8) {
self.lyc = lyc;
}
/// Return value of the lyc register
pub fn lyc(&self) -> u8 {
self.lyc
}
/// Handle reconfiguration of the background palette
pub fn set_bgp(&mut self, bgp: u8) {
self.bgp = Palette::from_reg(bgp);
}
/// Return value of the background palette register
pub fn bgp(&self) -> u8 {
self.bgp.into_reg()
}
/// Handle reconfiguration of the sprite palette 0
pub fn set_obp0(&mut self, obp0: u8) {
self.obp0 = Palette::from_reg(obp0);
}
/// Return value of the background palette register
pub fn obp0(&self) -> u8 {
self.obp0.into_reg()
}
/// Handle reconfiguration of the sprite palette 1
pub fn set_obp1(&mut self, obp1: u8) {
self.obp1 = Palette::from_reg(obp1);
}
/// Return value of the background palette register
pub fn obp1(&self) -> u8 {
self.obp1.into_reg()
}
/// Return number of line currently being drawn
pub fn line(&self) -> u8 {
self.line
}
/// Return value of wy register
pub fn wy(&self) -> u8 {
self.wy
}
/// Handle reconfiguration of wy register
pub fn set_wy(&mut self, wy: u8) {
self.wy = wy
}
/// Return value of wx register
pub fn wx(&self) -> u8 {
self.wx
}
/// Handle reconfiguration of wx register
pub fn set_wx(&mut self, wx: u8) {
self.wx = wx
}
/// Called when the last line of the active display has been drawn
fn end_of_frame(&mut self) {
self.it_vblank = true;
self.display.flip();
}
/// Get byte from VRAM
pub fn vram(&self, addr: u16) -> u8 {
self.vram[addr as uint]
}
/// Set byte in VRAM
pub fn set_vram(&mut self, addr: u16, val: u8) {
self.vram[addr as uint] = val;
}
/// Get byte from OAM
pub fn oam(&self, addr: u16) -> u8 {
// Each sprite takes 4 byte in OAM
let index = (addr / 4) as uint;
let attribute = addr % 4;
let sprite = self.sprite(index);
match attribute {
0 => sprite.y_pos(),
1 => sprite.x_pos(),
2 => sprite.tile(),
3 => sprite.flags(),
_ => panic!("unreachable"),
}
}
/// Set byte in OAM
pub fn set_oam(&mut self, addr: u16, val: u8) {
// Each sprite takes 4 byte in OAM
let index = (addr / 4) as uint;
let attribute = addr % 4;
let update_cache = {
let sprite = self.sprite_mut(index);
match attribute {
0 => {
if sprite.y_pos() != val {
sprite.set_y_pos(val);
true
} else {
false
}
}
1 => {
if sprite.x_pos() != val {
sprite.set_x_pos(val);
true
} else {
false
}
}
2 => {
sprite.set_tile(val);
false
}
3 => {
sprite.set_flags(val);
false
}
_ => panic!("unreachable"),
}
};
// We need to invalidate the cache only if the sprite location
// has changed
if update_cache {
self.rebuild_line_cache();
}
}
/// Return status of VBlank interrupt
pub fn it_vblank(&self) -> bool {
self.it_vblank
}
/// Acknowledge VBlank interrupt
pub fn ack_it_vblank(&mut self) {
self.it_vblank = false;
}
/// Force VBlank interrupt state
pub fn force_it_vblank(&mut self, set: bool) {
self.it_vblank = set;
}
/// Return status of Lcd interrupt
pub fn it_lcd(&self) -> bool {
self.lcd_it_status == LcdItStatus::Triggered
}
/// Acknowledge Lcd interrupt
pub fn ack_it_lcd(&mut self) {
if self.lcd_it_status == LcdItStatus::Triggered {
self.lcd_it_status = LcdItStatus::Acked;
}
}
/// Force Lcd interrupt state. As with all the rest of the Lcd
/// interrupt state machine, I'm not sure if that's right.
pub fn force_it_lcd(&mut self, set: bool) {
match set {
true => self.lcd_it_status = LcdItStatus::Triggered,
false => self.ack_it_lcd(),
}
}
/// Return the current level of the LCD interrupt (`true` if one
/// of the interrupt conditions is met and is enabled).
fn lcd_interrupt_level(&self) -> bool {
let mode = self.mode();
(self.iten_lyc && self.lyc == self.line) ||
(self.iten_prelude && mode == Mode::Prelude) ||
(self.iten_vblank && mode == Mode::VBlank) ||
(self.iten_hblank && mode == Mode::HBlank)
}
/// Look for a transition in the LCD interrupt to see if we should
/// trigger a new one (or rearm it)
fn update_ldc_interrupt(&mut self) {
let level = self.lcd_interrupt_level();
match level {
true => {
if self.lcd_it_status == LcdItStatus::Inactive {
// Rising edge of IT line, we trigger a new interrupt.
self.lcd_it_status = LcdItStatus::Triggered;
}
}
false => {
// Not entirely sure about that one. If the interrupt
// has not been acked yet, what should be done? At the
// moment I just assume it's shadowed somewhere and
// won't go down until acked.
if self.lcd_it_status == LcdItStatus::Acked {
// IT line returned to low, it could trigger again
// within the same line.
self.lcd_it_status = LcdItStatus::Inactive;
}
}
}
}
fn sprite(&self, index: uint) -> &Sprite {
&self.oam[index]
}
fn sprite_mut(&mut self, index: uint) -> &mut Sprite {
&mut self.oam[index]
}
/// Return `true` if the pixel at (`x`, `y`) is in the window
fn in_window(&self, x: u8, y: u8) -> bool {
let x = x as i32;
let y = y as i32;
let wx = (self.wx as i32) - 7;
let wy = self.wy as i32;
x >= wx && y >= wy
}
/// Get pixel in the window. Assumes (`x`, `y`) is inside the
/// window.
fn window_color(&mut self, x: u8, y: u8) -> AlphaColor {
// Window X value is offset by 7 for some reason
let px = x - self.wx + 7;
let py = y - self.wy;
let map = self.window_tile_map;
let set = self.bg_win_tile_set;
self.bg_win_color(px, py, map, set)
}
fn background_color(&mut self, x: u8, y: u8) -> AlphaColor {
let px = x + self.scx;
let py = y + self.scy;
let map = self.bg_tile_map;
let set = self.bg_win_tile_set;
self.bg_win_color(px, py, map, set)
}
/// Get one pixel from either the window or the background.
fn bg_win_color(&self,
x: u8,
y: u8,
map: TileMap,
set: TileSet) -> AlphaColor {
let tile_map_x = x / 8;
let tile_map_y = y / 8;
let tile_x = x % 8;
let tile_y = y % 8;
// The screen is divided in 8x8 pixel tiles. It creates a
// matrix of 32x32 tiles (As far as the GPU is concerned the
// screen resolution is 256x256). The tile map contains one u8
// per tile which is the index of the tile to use in the tile
// set.
let tile_index = self.tile_index(tile_map_x, tile_map_y, map);
let tile_color = self.pix_color(tile_index, tile_x, tile_y, set);
AlphaColor {
// Transform tile_color through the palette
color: self.bgp.transform(tile_color),
// The pixel is transparent if the value pre-palette is white
opaque: tile_color != Color::White,
}
}
/// Return the tile index for the tile at (`tx`, `ty`) in `map`
fn tile_index(&self, tx: u8, ty: u8, map: TileMap) -> u8 {
let base = map.base();
let tx = tx as u16;
let ty = ty as u16;
let map_addr = base + (ty * 32) + tx;
self.vram[map_addr as uint]
}
/// Get the color of pixel (`x`, `y`) in `tile`.
fn pix_color(&self, tile: u8, x: u8, y: u8, set: TileSet) -> Color {
if x >= 8 || y >= 16 {
panic!("tile pos out of range ({}, {})", x, y);
}
let base = set.tile_addr(tile);
let addr = base + 2 * (y as u16);
let addr = addr as uint;
let x = (7 - x) as uint;
// Each row of 8 pixels is split across two contiguous bytes:
// the first for the LSB, the 2nd for the MSB
let lsb = (self.vram[addr] >> x) & 1;
let msb = (self.vram[addr + 1] >> x) & 1;
Color::from_u8(msb << 1 | lsb)
}
/// Rebuild the entire Sprite cache for each line. This is pretty
/// expensive.
fn rebuild_line_cache(&mut self) {
// Clear the cache
self.line_cache = [[None, ..10], ..144];
// Rebuild it
for i in range(0, self.oam.len()) {
self.cache_sprite(i as u8);
}
}
/// Insert sprite at `index` into the line cache
fn cache_sprite(&mut self, index: u8) {
let sprite = self.oam[index as uint];
let height = self.sprite_size.height();
let start = sprite.top_line();
let end = start + (height as i32);
for y in range(start, end) {
if y < 0 || y >= 144 {
// Sprite line is not displayed
continue;
}
let y = y as uint;
let l = self.line_cache[y].len();
if self.line_cache[y][l - 1].is_some() {
// We reached the sprite limit for that line, we can
// display no more.
continue;
}
// Insert sprite into the cache for this line. We order
// the sprites from left to right and from highest to
// lowest priority.
for i in range(0u, l) {
match self.line_cache[y][i] {
None => {
// This cache entry is empty, use it to hold
// our sprite and move on to the next line
self.line_cache[y][i] = Some(index);
break;
}
Some(other) => {
let other_sprite = &self.oam[other as uint];
// When sprites overlap the one with the
// smallest x pos is on top. If the x values
// are equal then the offset in OAM is used.
if sprite.x_pos() < other_sprite.x_pos() ||
(sprite.x_pos() == other_sprite.x_pos() &&
index < other) {
// Our sprite is higher priority, move the
// rest of the cacheline one place. We
// know that the last item is None since
// it's checked above.
for j in range(i, l - 1).rev() {
self.line_cache[y][j + 1] =
self.line_cache[y][j];
}
self.line_cache[y][i] = Some(index);
break;
}
}
}
}
}
}
/// Render a single pixel from the display
fn render_pixel(&mut self, x: u8, y: u8) {
let bg_col =
// Window is always on top of background
if self.window_enabled && self.in_window(x, y) {
self.window_color(x, y)
} else if self.bg_enabled && self.bg_enabled {
self.background_color(x, y)
} else {
// No background or window
AlphaColor { color: Color::White, opaque: false }
};
let col = if self.sprites_enabled {
self.render_sprite(x, y, bg_col)
} else {
bg_col.color
};
self.display.set_pixel(x as u32, y as u32, col);
}
fn render_sprite(&self, x: u8, y: u8, bg_col: AlphaColor) -> Color {
for &entry in self.line_cache[y as uint].iter() {
match entry {
None => break, // Nothing left in cache
Some(index) => {
let sprite = &self.oam[index as uint];
let sprite_x = (x as i32) - sprite.left_column();
if sprite_x >= 8 {
// Sprite was earlier on the line
continue
}
if sprite_x < 0 {
// It's too early to draw that sprite. Since
// sprites are in order on the line we know
// there's no sprite remaining to be drawn
break;
}
if sprite.background() && bg_col.opaque {
// Sprite is covered by the background
continue;
}
let sprite_y = (y as i32) - sprite.top_line();
let (height, tile) = match self.sprite_size {
SpriteSize::Sz8x8 => (7, sprite.tile()),
// For 16pix tiles the LSB is ignored
SpriteSize::Sz8x16 => (15, sprite.tile() & 0xfe),
};
let sprite_y = match sprite.y_flip() {
true => height - sprite_y,
false => sprite_y,
};
let sprite_x = match sprite.x_flip() {
true => 7 - sprite_x,
false => sprite_x,
};
// Sprites always use TileSet 1
let pix = self.pix_color(tile,
sprite_x as u8,
sprite_y as u8,
TileSet::Set1);
// White color (0) pre-palette denotes a
// transparent pixel
if pix != Color::White {
// Pixel is not transparent, compute the color
// and return that
let palette = match sprite.palette() {
sprite::Palette::Obp0 => self.obp0,
sprite::Palette::Obp1 => self.obp1,
};
return palette.transform(pix);
}
}
}
}
bg_col.color
}
}
impl<'a> Show for Gpu<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
try!(write!(f, "Gpu at ({}, {}) [{}] ",
self.htick, self.line, self.mode()));
Ok(())
}
}
/// All possible color values on the original game boy
#[deriving(PartialEq,Eq,Copy)]
pub enum Color {
White = 0,
LightGrey = 1,
DarkGrey = 2,
Black = 3,
}
impl Color {
/// Create a color from a u8 in the range 0...3
fn from_u8(c: u8) -> Color {
match c {
0 => Color::White,
1 => Color::LightGrey,
2 => Color::DarkGrey,
3 => Color::Black,
_ => panic!("Invalid color: 0x{:02x}", c),
}
}
}
/// Palette description
#[deriving(Copy)]
struct Palette {
/// Each color can be mapped to an other one independently of the
/// others
map: [Color, ..4],
}
impl Palette {
/// Build a palette from a register value.
///
/// Register value is 0bC3C2C1C0 where CX is the output
/// value for a given value X. So for instance
/// 0b00_01_10_11 is a palette that reverses the colors.
fn from_reg(r: u8) -> Palette {
let mut p = Palette {
map: [ Color::White,
Color::White,
Color::White,
Color::White, ]
};
for i in range(0, p.map.len()) {
p.map[i] = Color::from_u8((r >> (i * 2)) & 0x3)
}
p
}
/// Convert palette into register value
fn into_reg(&self) -> u8 {
let mut p = 0u8;
for i in range(0, self.map.len()) {
p |= (self.map[i] as u8) << (i * 2);
}
p
}
/// Transform color `c` through the palette
fn transform(&self, c: Color) -> Color {
self.map[c as uint]
}
}
/// Struct used to describe colos that can be transparent
struct AlphaColor {
/// Pixel color
color: Color,
/// If `true` the color is fully opaque, otherwise fully
/// transparent.
opaque: bool,
}
/// There are two tile maps available on the GameBoy. Each map is
/// 32x32x8bits large and contain index values into the tile set for
/// each map.
#[deriving(Copy)]
enum TileMap {
/// Low map at addresse range [0x9800, 0x9bff]
Low,
/// High map at addresse range [0x9c00, 0x9fff]
High,
}
impl TileMap {
/// Return tile map base offset in VRAM
fn base(self) -> u16 {
match self {
TileMap::Low => 0x1800,
TileMap::High => 0x1c00,
}
}
}
/// There are two overlapping tile sets on the Game Boy. Tile sets are
/// 256x16byte large, entries are indexed into the `TileMap`.
#[deriving(Copy)]
enum TileSet {
/// Tile set #0 in [0x8800, 0x9bff], index is signed [-128, 127]
Set0,
/// Tile set #1 in [0x8000, 0x8fff], index is unsigned [0, 255]
Set1,
}
impl TileSet {
/// Return VRAM offset of `tile` for the tileset.
fn tile_addr(self, tile: u8) -> u16 {
match self {
// For `Set0` `tile` is signed and in the range [-128,
// 127]. Tile 0 is at offset 0x1000.
TileSet::Set0 => (0x1000 + (((tile as i8) as i16) * 16)) as u16,
// `Set1` is unsigned and starts at offset 0x0000
TileSet::Set1 => 0x0 + (tile as u16) * 16,
}
}
}
/// Sprites can be 8x8 pixels or 8x16 pixels (a pair of 8x8
/// tiles). The setting is global for all sprites.
#[deriving(PartialEq,Eq,Copy)]
enum SpriteSize {
/// Sprites resolution is 8x8 (i.e. single tile)
Sz8x8,
/// Sprites resolution is 8x16 (i.e. two tiles)
Sz8x16,
}
impl SpriteSize {
/// Return the height of sprites depending on the SpriteSize
/// setting
fn height(self) -> uint {
match self {
SpriteSize::Sz8x8 => 8,
SpriteSize::Sz8x16 => 16,
}
}
}
mod timings {
//! LCD timings
/// Total line size (including hblank)
pub const HTOTAL: u16 = 456;
/// Beginning of Active period
pub const HACTIVE_ON: u16 = 80;
/// Beginning of HSync period
pub const HSYNC_ON: u16 = 173;
/// Total number of lines (including vblank)
pub const VTOTAL: u8 = 154;
/// Beginning of VSync period
pub const VSYNC_ON: u8 = 144;
}
#[cfg(test)]
mod tests {
/// Make sure the palette conversion to and from register values works
/// as expected
#[test]
fn palette_conversion() {
for i in range(0u, 0x100) {
let r = i as u8;
let p = super::Palette::from_reg(r);
assert!(p.into_reg() == r);
}
}
/// Make sure color conversion to and from symbolic values works
#[test]
fn color_conversion() {
for v in range(0, 4) {
let c = super::Color::from_u8(v);
assert!(c as u8 == v);
}
}
}
Fix LCDC HBLANK interrupt
//! Game Boy GPU emulation
use std::fmt::{Show, Formatter, Error};
use ui::Display;
use gpu::sprite::Sprite;
mod sprite;
/// GPU state.
pub struct Gpu<'a> {
/// Emulator Display
display: &'a mut (Display + 'a),
/// Current line. [0,143] is active video, [144,153] is blanking.
line: u8,
/// Counter for the horizontal period
htick: u16,
/// Object attritube memory
oam: [Sprite, ..0xa0],
/// Video Ram
vram: [u8, ..0x2000],
/// `true` if the LCD is enabled.
enabled: bool,
/// Which tile map the window uses
window_tile_map: TileMap,
/// `true` if window display is enabled
window_enabled: bool,
/// Which tile set Background and Window use
bg_win_tile_set: TileSet,
/// Which tile map background uses
bg_tile_map: TileMap,
/// Resolution of the sprites
sprite_size: SpriteSize,
/// `true` if sprites are displayed
sprites_enabled: bool,
/// `true` if background display is enabled
bg_enabled: bool,
/// Background palette
bgp: Palette,
/// Object palette 0
obp0: Palette,
/// Object palette 1
obp1: Palette,
/// Line compare
lyc: u8,
/// VBlank interrupt status
it_vblank: bool,
/// LYC match interrupt enable (IT when LY == LYC)
iten_lyc: bool,
/// Interrupt during prelude (mode == 2)
iten_prelude: bool,
/// Interrupt during vblank (mode == 1). This is not the same as
/// `it_vblank` above: it_vblank fires with a higher priority and
/// is not shared with other interrupt sources like this one.
iten_vblank: bool,
/// Interrupt during hblank (mode == 0)
iten_hblank: bool,
/// Lcdc interrupt status
lcd_it_status: LcdItStatus,
/// Background y position
scy: u8,
/// Background x position
scx: u8,
/// Window top-left x position + 7
wx: u8,
/// Window top-left y position.
wy: u8,
/// Sprites displayed on each line. Contains an index into OAM or
/// None. There can't be more than 10 sprites displayed on each
/// line.
line_cache: [[Option<u8>, ..10], ..144],
}
/// Current GPU mode
#[deriving(Show, PartialEq)]
pub enum Mode {
/// In horizontal blanking
HBlank = 0,
/// In vertical blanking
VBlank = 1,
/// Accessing sprite memory, Sprite attributes RAM [0xfe00, 0xfe9f]
/// can't be accessed
Prelude = 2,
/// Accessing sprite memory and video memory [0x8000, 0x9fff],
/// both can't be accessed from CPU
Active = 3,
}
/// State of the LCD interrupt (as controlled by the STAT
/// register).
///
/// I'm not absolutely certain I got things right but if I understand
/// correctly: the interrupt source is configurable (LYC, prelude,
/// vblank, hblank). The way I see it all those interrupt sources are
/// ORed together and an interrupt is only signaled on a rising edge
/// of the ORed signal.
///
/// So for instance if the LYC and HBlank interrupts are enabled and
/// we're at the matched line, the interrupt will trigger at the
/// beginning of the line (LY == LYC) but not at the beginning of
/// hblank (since the IT line is already high).
/// However, if the LYC register value is changed in the middle of the
/// line and the LY == LYC is no longer true, the IT signal will go
/// low and can be triggered again in the same line.
#[deriving(PartialEq)]
enum LcdItStatus {
/// Interrupt is inactive
Inactive,
/// Interrupt event occured
Triggered,
/// Interrupt event occured and has been acknowledged. It will be
/// rearmed when the signal goes low.
Acked,
}
impl<'a> Gpu<'a> {
/// Create a new Gpu instance.
pub fn new<'n>(display: &'n mut Display) -> Gpu<'n> {
Gpu { line: 0,
htick: 0,
oam: [Sprite::new(), ..0xa0],
vram: [0xca, ..0x2000],
display: display,
enabled: false,
window_tile_map: TileMap::Low,
window_enabled: false,
bg_win_tile_set: TileSet::Set0,
bg_tile_map: TileMap::Low,
sprite_size: SpriteSize::Sz8x8,
sprites_enabled: false,
bg_enabled: false,
bgp: Palette::from_reg(0xff),
obp0: Palette::from_reg(0xff),
obp1: Palette::from_reg(0xff),
lyc: 0x00,
it_vblank: false,
iten_lyc: false,
iten_prelude: false,
iten_vblank: false,
iten_hblank: false,
lcd_it_status: LcdItStatus::Inactive,
scy: 0,
scx: 0,
wx: 0,
wy: 0,
line_cache: [[None, ..10], ..144],
}
}
/// Called at each tick of the system clock. Move the emulated
/// state one step forward.
pub fn step(&mut self) {
if !self.enabled {
return;
}
self.htick = (self.htick + 1) % timings::HTOTAL;
if self.htick == timings::HSYNC_ON {
// Entering horizontal blanking
self.line = (self.line + 1) % timings::VTOTAL;
if self.line == timings::VSYNC_ON {
// We're entering vertical blanking, we're done drawing the
// current frame
self.end_of_frame()
}
}
// Compute at which cycle the first pixel will actually be
// output on the screen. I don't know where this comes from
// but it's what GearBoy seems to use. Using 48 for the first
// line messes up The Legend of Zelda's intro.
let line_start = match self.line {
0 => 160,
_ => 48,
};
if self.htick == line_start && self.line < timings::VSYNC_ON {
// It's time to draw the current line
let y = self.line;
for x in range(0, 160) {
self.render_pixel(x, y);
}
}
self.update_ldc_interrupt();
}
/// Return current GPU mode
pub fn mode(&self) -> Mode {
if self.line < timings::VSYNC_ON {
if self.htick < timings::HACTIVE_ON {
Mode::Prelude
} else if self.htick < timings::HSYNC_ON {
Mode::Active
} else {
Mode::HBlank
}
} else {
Mode::VBlank
}
}
/// Handle reconfig through LCDC register
pub fn set_lcdc(&mut self, lcdc: u8) {
self.enabled = lcdc & 0x80 != 0;
self.window_tile_map = match lcdc & 0x40 != 0 {
true => TileMap::High,
false => TileMap::Low,
};
self.window_enabled = lcdc & 0x20 != 0;
self.bg_win_tile_set = match lcdc & 0x10 != 0 {
true => TileSet::Set1,
false => TileSet::Set0,
};
self.bg_tile_map = match lcdc & 0x08 != 0 {
true => TileMap::High,
false => TileMap::Low,
};
let new_sprite_size = match lcdc & 0x04 != 0 {
false => SpriteSize::Sz8x8,
true => SpriteSize::Sz8x16,
};
self.sprites_enabled = lcdc & 0x02 != 0;
self.bg_enabled = lcdc & 0x01 != 0;
if !self.enabled {
// Reset to the first pixel to start back here once we're
// re-enabled.
self.line = 0;
self.htick = 0;
}
if new_sprite_size != self.sprite_size {
self.sprite_size = new_sprite_size;
self.rebuild_line_cache();
}
}
/// Generate value of lcdc register
pub fn lcdc(&self) -> u8 {
let mut r = 0;
r |= (self.enabled as u8) << 7;
r |= match self.window_tile_map {
TileMap::High => 1,
TileMap::Low => 0,
} << 6;
r |= (self.window_enabled as u8) << 5;
r |= match self.bg_win_tile_set {
TileSet::Set1 => 1,
TileSet::Set0 => 0
}<< 4;
r |= match self.bg_tile_map {
TileMap::High => 1,
TileMap::Low => 0,
} << 3;
r |= match self.sprite_size {
SpriteSize::Sz8x16 => 1,
SpriteSize::Sz8x8 => 0,
} << 2;
r |= (self.sprites_enabled as u8) << 1;
r |= (self.bg_enabled as u8) << 0;
r
}
pub fn stat(&self) -> u8 {
let mut r = 0;
let c = self.lyc == self.line;
r |= (self.iten_lyc as u8) << 6;
r |= (self.iten_prelude as u8) << 5;
r |= (self.iten_vblank as u8) << 4;
r |= (self.iten_hblank as u8) << 3;
r |= (c as u8) << 2;
// Apparently mode is 0 when disabled
if self.enabled {
r |= self.mode() as u8;
}
r
}
pub fn set_stat(&mut self, stat: u8) {
self.iten_lyc = stat & 0x40 != 0;
self.iten_prelude = stat & 0x20 != 0;
self.iten_vblank = stat & 0x10 != 0;
self.iten_hblank = stat & 0x08 != 0;
// Other fields are R/O
// Update interrupt status with new stat params
self.update_ldc_interrupt();
}
/// Reconfiguration of SCY register
pub fn scy(&self) -> u8 {
self.scy
}
/// Return value of SCY register
pub fn set_scy(&mut self, scy: u8) {
self.scy = scy;
}
/// Reconfiguration of SCX register
pub fn scx(&self) -> u8 {
self.scx
}
/// Return value of SCX register
pub fn set_scx(&mut self, scx: u8) {
self.scx = scx;
}
/// Handle reconfiguration of the lyc register
pub fn set_lyc(&mut self, lyc: u8) {
self.lyc = lyc;
}
/// Return value of the lyc register
pub fn lyc(&self) -> u8 {
self.lyc
}
/// Handle reconfiguration of the background palette
pub fn set_bgp(&mut self, bgp: u8) {
self.bgp = Palette::from_reg(bgp);
}
/// Return value of the background palette register
pub fn bgp(&self) -> u8 {
self.bgp.into_reg()
}
/// Handle reconfiguration of the sprite palette 0
pub fn set_obp0(&mut self, obp0: u8) {
self.obp0 = Palette::from_reg(obp0);
}
/// Return value of the background palette register
pub fn obp0(&self) -> u8 {
self.obp0.into_reg()
}
/// Handle reconfiguration of the sprite palette 1
pub fn set_obp1(&mut self, obp1: u8) {
self.obp1 = Palette::from_reg(obp1);
}
/// Return value of the background palette register
pub fn obp1(&self) -> u8 {
self.obp1.into_reg()
}
/// Return number of line currently being drawn
pub fn line(&self) -> u8 {
self.line
}
/// Return value of wy register
pub fn wy(&self) -> u8 {
self.wy
}
/// Handle reconfiguration of wy register
pub fn set_wy(&mut self, wy: u8) {
self.wy = wy
}
/// Return value of wx register
pub fn wx(&self) -> u8 {
self.wx
}
/// Handle reconfiguration of wx register
pub fn set_wx(&mut self, wx: u8) {
self.wx = wx
}
/// Called when the last line of the active display has been drawn
fn end_of_frame(&mut self) {
self.it_vblank = true;
self.display.flip();
}
/// Get byte from VRAM
pub fn vram(&self, addr: u16) -> u8 {
self.vram[addr as uint]
}
/// Set byte in VRAM
pub fn set_vram(&mut self, addr: u16, val: u8) {
self.vram[addr as uint] = val;
}
/// Get byte from OAM
pub fn oam(&self, addr: u16) -> u8 {
// Each sprite takes 4 byte in OAM
let index = (addr / 4) as uint;
let attribute = addr % 4;
let sprite = self.sprite(index);
match attribute {
0 => sprite.y_pos(),
1 => sprite.x_pos(),
2 => sprite.tile(),
3 => sprite.flags(),
_ => panic!("unreachable"),
}
}
/// Set byte in OAM
pub fn set_oam(&mut self, addr: u16, val: u8) {
// Each sprite takes 4 byte in OAM
let index = (addr / 4) as uint;
let attribute = addr % 4;
let update_cache = {
let sprite = self.sprite_mut(index);
match attribute {
0 => {
if sprite.y_pos() != val {
sprite.set_y_pos(val);
true
} else {
false
}
}
1 => {
if sprite.x_pos() != val {
sprite.set_x_pos(val);
true
} else {
false
}
}
2 => {
sprite.set_tile(val);
false
}
3 => {
sprite.set_flags(val);
false
}
_ => panic!("unreachable"),
}
};
// We need to invalidate the cache only if the sprite location
// has changed
if update_cache {
self.rebuild_line_cache();
}
}
/// Return status of VBlank interrupt
pub fn it_vblank(&self) -> bool {
self.it_vblank
}
/// Acknowledge VBlank interrupt
pub fn ack_it_vblank(&mut self) {
self.it_vblank = false;
}
/// Force VBlank interrupt state
pub fn force_it_vblank(&mut self, set: bool) {
self.it_vblank = set;
}
/// Return status of Lcd interrupt
pub fn it_lcd(&self) -> bool {
self.lcd_it_status == LcdItStatus::Triggered
}
/// Acknowledge Lcd interrupt
pub fn ack_it_lcd(&mut self) {
if self.lcd_it_status == LcdItStatus::Triggered {
self.lcd_it_status = LcdItStatus::Acked;
}
}
/// Force Lcd interrupt state. As with all the rest of the Lcd
/// interrupt state machine, I'm not sure if that's right.
pub fn force_it_lcd(&mut self, set: bool) {
match set {
true => self.lcd_it_status = LcdItStatus::Triggered,
false => self.ack_it_lcd(),
}
}
/// Return the current level of the LCD interrupt (`true` if one
/// of the interrupt conditions is met and is enabled).
fn lcd_interrupt_level(&self) -> bool {
let mode = self.mode();
(self.iten_lyc && self.lyc == self.line) ||
(self.iten_prelude && mode == Mode::Prelude) ||
(self.iten_vblank && mode == Mode::VBlank) ||
(self.iten_hblank && mode == Mode::HBlank)
}
/// Look for a transition in the LCD interrupt to see if we should
/// trigger a new one (or rearm it)
fn update_ldc_interrupt(&mut self) {
let level = self.lcd_interrupt_level();
match level {
true => {
if self.lcd_it_status == LcdItStatus::Inactive {
// Rising edge of IT line, we trigger a new interrupt.
self.lcd_it_status = LcdItStatus::Triggered;
}
}
false => {
// Not entirely sure about that one. If the interrupt
// has not been acked yet, what should be done? At the
// moment I just assume it's shadowed somewhere and
// won't go down until acked.
if self.lcd_it_status == LcdItStatus::Acked {
// IT line returned to low, it could trigger again
// within the same line.
self.lcd_it_status = LcdItStatus::Inactive;
}
}
}
}
fn sprite(&self, index: uint) -> &Sprite {
&self.oam[index]
}
fn sprite_mut(&mut self, index: uint) -> &mut Sprite {
&mut self.oam[index]
}
/// Return `true` if the pixel at (`x`, `y`) is in the window
fn in_window(&self, x: u8, y: u8) -> bool {
let x = x as i32;
let y = y as i32;
let wx = (self.wx as i32) - 7;
let wy = self.wy as i32;
x >= wx && y >= wy
}
/// Get pixel in the window. Assumes (`x`, `y`) is inside the
/// window.
fn window_color(&mut self, x: u8, y: u8) -> AlphaColor {
// Window X value is offset by 7 for some reason
let px = x - self.wx + 7;
let py = y - self.wy;
let map = self.window_tile_map;
let set = self.bg_win_tile_set;
self.bg_win_color(px, py, map, set)
}
fn background_color(&mut self, x: u8, y: u8) -> AlphaColor {
let px = x + self.scx;
let py = y + self.scy;
let map = self.bg_tile_map;
let set = self.bg_win_tile_set;
self.bg_win_color(px, py, map, set)
}
/// Get one pixel from either the window or the background.
fn bg_win_color(&self,
x: u8,
y: u8,
map: TileMap,
set: TileSet) -> AlphaColor {
let tile_map_x = x / 8;
let tile_map_y = y / 8;
let tile_x = x % 8;
let tile_y = y % 8;
// The screen is divided in 8x8 pixel tiles. It creates a
// matrix of 32x32 tiles (As far as the GPU is concerned the
// screen resolution is 256x256). The tile map contains one u8
// per tile which is the index of the tile to use in the tile
// set.
let tile_index = self.tile_index(tile_map_x, tile_map_y, map);
let tile_color = self.pix_color(tile_index, tile_x, tile_y, set);
AlphaColor {
// Transform tile_color through the palette
color: self.bgp.transform(tile_color),
// The pixel is transparent if the value pre-palette is white
opaque: tile_color != Color::White,
}
}
/// Return the tile index for the tile at (`tx`, `ty`) in `map`
fn tile_index(&self, tx: u8, ty: u8, map: TileMap) -> u8 {
let base = map.base();
let tx = tx as u16;
let ty = ty as u16;
let map_addr = base + (ty * 32) + tx;
self.vram[map_addr as uint]
}
/// Get the color of pixel (`x`, `y`) in `tile`.
fn pix_color(&self, tile: u8, x: u8, y: u8, set: TileSet) -> Color {
if x >= 8 || y >= 16 {
panic!("tile pos out of range ({}, {})", x, y);
}
let base = set.tile_addr(tile);
let addr = base + 2 * (y as u16);
let addr = addr as uint;
let x = (7 - x) as uint;
// Each row of 8 pixels is split across two contiguous bytes:
// the first for the LSB, the 2nd for the MSB
let lsb = (self.vram[addr] >> x) & 1;
let msb = (self.vram[addr + 1] >> x) & 1;
Color::from_u8(msb << 1 | lsb)
}
/// Rebuild the entire Sprite cache for each line. This is pretty
/// expensive.
fn rebuild_line_cache(&mut self) {
// Clear the cache
self.line_cache = [[None, ..10], ..144];
// Rebuild it
for i in range(0, self.oam.len()) {
self.cache_sprite(i as u8);
}
}
/// Insert sprite at `index` into the line cache
fn cache_sprite(&mut self, index: u8) {
let sprite = self.oam[index as uint];
let height = self.sprite_size.height();
let start = sprite.top_line();
let end = start + (height as i32);
for y in range(start, end) {
if y < 0 || y >= 144 {
// Sprite line is not displayed
continue;
}
let y = y as uint;
let l = self.line_cache[y].len();
if self.line_cache[y][l - 1].is_some() {
// We reached the sprite limit for that line, we can
// display no more.
continue;
}
// Insert sprite into the cache for this line. We order
// the sprites from left to right and from highest to
// lowest priority.
for i in range(0u, l) {
match self.line_cache[y][i] {
None => {
// This cache entry is empty, use it to hold
// our sprite and move on to the next line
self.line_cache[y][i] = Some(index);
break;
}
Some(other) => {
let other_sprite = &self.oam[other as uint];
// When sprites overlap the one with the
// smallest x pos is on top. If the x values
// are equal then the offset in OAM is used.
if sprite.x_pos() < other_sprite.x_pos() ||
(sprite.x_pos() == other_sprite.x_pos() &&
index < other) {
// Our sprite is higher priority, move the
// rest of the cacheline one place. We
// know that the last item is None since
// it's checked above.
for j in range(i, l - 1).rev() {
self.line_cache[y][j + 1] =
self.line_cache[y][j];
}
self.line_cache[y][i] = Some(index);
break;
}
}
}
}
}
}
/// Render a single pixel from the display
fn render_pixel(&mut self, x: u8, y: u8) {
let bg_col =
// Window is always on top of background
if self.window_enabled && self.in_window(x, y) {
self.window_color(x, y)
} else if self.bg_enabled && self.bg_enabled {
self.background_color(x, y)
} else {
// No background or window
AlphaColor { color: Color::White, opaque: false }
};
let col = if self.sprites_enabled {
self.render_sprite(x, y, bg_col)
} else {
bg_col.color
};
self.display.set_pixel(x as u32, y as u32, col);
}
fn render_sprite(&self, x: u8, y: u8, bg_col: AlphaColor) -> Color {
for &entry in self.line_cache[y as uint].iter() {
match entry {
None => break, // Nothing left in cache
Some(index) => {
let sprite = &self.oam[index as uint];
let sprite_x = (x as i32) - sprite.left_column();
if sprite_x >= 8 {
// Sprite was earlier on the line
continue
}
if sprite_x < 0 {
// It's too early to draw that sprite. Since
// sprites are in order on the line we know
// there's no sprite remaining to be drawn
break;
}
if sprite.background() && bg_col.opaque {
// Sprite is covered by the background
continue;
}
let sprite_y = (y as i32) - sprite.top_line();
let (height, tile) = match self.sprite_size {
SpriteSize::Sz8x8 => (7, sprite.tile()),
// For 16pix tiles the LSB is ignored
SpriteSize::Sz8x16 => (15, sprite.tile() & 0xfe),
};
let sprite_y = match sprite.y_flip() {
true => height - sprite_y,
false => sprite_y,
};
let sprite_x = match sprite.x_flip() {
true => 7 - sprite_x,
false => sprite_x,
};
// Sprites always use TileSet 1
let pix = self.pix_color(tile,
sprite_x as u8,
sprite_y as u8,
TileSet::Set1);
// White color (0) pre-palette denotes a
// transparent pixel
if pix != Color::White {
// Pixel is not transparent, compute the color
// and return that
let palette = match sprite.palette() {
sprite::Palette::Obp0 => self.obp0,
sprite::Palette::Obp1 => self.obp1,
};
return palette.transform(pix);
}
}
}
}
bg_col.color
}
}
impl<'a> Show for Gpu<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
try!(write!(f, "Gpu at ({}, {}) [{}] ",
self.htick, self.line, self.mode()));
Ok(())
}
}
/// All possible color values on the original game boy
#[deriving(PartialEq,Eq,Copy)]
pub enum Color {
White = 0,
LightGrey = 1,
DarkGrey = 2,
Black = 3,
}
impl Color {
/// Create a color from a u8 in the range 0...3
fn from_u8(c: u8) -> Color {
match c {
0 => Color::White,
1 => Color::LightGrey,
2 => Color::DarkGrey,
3 => Color::Black,
_ => panic!("Invalid color: 0x{:02x}", c),
}
}
}
/// Palette description
#[deriving(Copy)]
struct Palette {
/// Each color can be mapped to an other one independently of the
/// others
map: [Color, ..4],
}
impl Palette {
/// Build a palette from a register value.
///
/// Register value is 0bC3C2C1C0 where CX is the output
/// value for a given value X. So for instance
/// 0b00_01_10_11 is a palette that reverses the colors.
fn from_reg(r: u8) -> Palette {
let mut p = Palette {
map: [ Color::White,
Color::White,
Color::White,
Color::White, ]
};
for i in range(0, p.map.len()) {
p.map[i] = Color::from_u8((r >> (i * 2)) & 0x3)
}
p
}
/// Convert palette into register value
fn into_reg(&self) -> u8 {
let mut p = 0u8;
for i in range(0, self.map.len()) {
p |= (self.map[i] as u8) << (i * 2);
}
p
}
/// Transform color `c` through the palette
fn transform(&self, c: Color) -> Color {
self.map[c as uint]
}
}
/// Struct used to describe colos that can be transparent
struct AlphaColor {
/// Pixel color
color: Color,
/// If `true` the color is fully opaque, otherwise fully
/// transparent.
opaque: bool,
}
/// There are two tile maps available on the GameBoy. Each map is
/// 32x32x8bits large and contain index values into the tile set for
/// each map.
#[deriving(Copy)]
enum TileMap {
/// Low map at addresse range [0x9800, 0x9bff]
Low,
/// High map at addresse range [0x9c00, 0x9fff]
High,
}
impl TileMap {
/// Return tile map base offset in VRAM
fn base(self) -> u16 {
match self {
TileMap::Low => 0x1800,
TileMap::High => 0x1c00,
}
}
}
/// There are two overlapping tile sets on the Game Boy. Tile sets are
/// 256x16byte large, entries are indexed into the `TileMap`.
#[deriving(Copy)]
enum TileSet {
/// Tile set #0 in [0x8800, 0x9bff], index is signed [-128, 127]
Set0,
/// Tile set #1 in [0x8000, 0x8fff], index is unsigned [0, 255]
Set1,
}
impl TileSet {
/// Return VRAM offset of `tile` for the tileset.
fn tile_addr(self, tile: u8) -> u16 {
match self {
// For `Set0` `tile` is signed and in the range [-128,
// 127]. Tile 0 is at offset 0x1000.
TileSet::Set0 => (0x1000 + (((tile as i8) as i16) * 16)) as u16,
// `Set1` is unsigned and starts at offset 0x0000
TileSet::Set1 => 0x0 + (tile as u16) * 16,
}
}
}
/// Sprites can be 8x8 pixels or 8x16 pixels (a pair of 8x8
/// tiles). The setting is global for all sprites.
#[deriving(PartialEq,Eq,Copy)]
enum SpriteSize {
/// Sprites resolution is 8x8 (i.e. single tile)
Sz8x8,
/// Sprites resolution is 8x16 (i.e. two tiles)
Sz8x16,
}
impl SpriteSize {
/// Return the height of sprites depending on the SpriteSize
/// setting
fn height(self) -> uint {
match self {
SpriteSize::Sz8x8 => 8,
SpriteSize::Sz8x16 => 16,
}
}
}
mod timings {
//! LCD timings
/// Total line size (including hblank)
pub const HTOTAL: u16 = 456;
/// Beginning of Active period
pub const HACTIVE_ON: u16 = 80;
/// Beginning of HSync period
pub const HSYNC_ON: u16 = 173;
/// Total number of lines (including vblank)
pub const VTOTAL: u8 = 154;
/// Beginning of VSync period
pub const VSYNC_ON: u8 = 144;
}
#[cfg(test)]
mod tests {
/// Make sure the palette conversion to and from register values works
/// as expected
#[test]
fn palette_conversion() {
for i in range(0u, 0x100) {
let r = i as u8;
let p = super::Palette::from_reg(r);
assert!(p.into_reg() == r);
}
}
/// Make sure color conversion to and from symbolic values works
#[test]
fn color_conversion() {
for v in range(0, 4) {
let c = super::Color::from_u8(v);
assert!(c as u8 == v);
}
}
}
|
mod render;
mod menu;
mod button;
mod geom;
mod component;
mod event;
use self::render::*;
use super::module::*;
use self::menu::{Menu, MenuUpdate, MenuView};
use self::button::*;
use self::geom::*;
use self::component::*;
use self::event::*;
use glutin::{self, ContextBuilder, EventsLoop, GlContext, WindowBuilder};
use modular_flow as mf;
use std::time::Instant;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use std::cmp::Ordering;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use gfx_device_gl as gl;
type OwnedModule = Mutex<Box<GuiComponent<GuiModuleUpdate>>>;
type Graph = mf::Graph<OwnedModule>;
type Node = mf::Node<OwnedModule>;
/// Model holds info about GUI state
/// and program state
pub struct Model {
ctx: RenderContext,
time: f32,
graph: Arc<Graph>,
window_size: Pt2,
mouse_pos: Pt2,
module_types: Vec<mf::MetaModule<OwnedModule>>,
context_menu: Option<MenuView>,
}
impl Model {
fn new(ctx: RenderContext) -> Model {
Model {
graph: Graph::new(),
time: 0.0,
window_size: Pt2::fill(0.0),
mouse_pos: Pt2::fill(0.0),
module_types: load_metamodules(ctx.clone()),
context_menu: None,
ctx,
}
}
fn handle_mouse_input(&mut self, state: ButtonState, button: MouseButton) {
let event = Event {
time: self.time,
data: EventData::Click(self.mouse_pos, button.into(), state.into()),
};
// intersect menu
if let Some(menu) = self.context_menu.as_mut() {
if menu.intersect(self.mouse_pos) {
let status = menu.handle(&event);
match status {
MenuUpdate::Select(path) => {
let name = path[0].as_ref();
let module = self.module_types.iter().find(|ty| ty.name == name).unwrap();
self.new_module(module, Rect2::new(self.mouse_pos, Pt2::fill(256.0)));
self.context_menu = None;
}
_ => (),
}
return;
}
}
// intersect nodes
let mut nodes = self.graph.nodes();
nodes.sort_by(Self::compare_node_z);
for node in &nodes {
let mut module = node.module().lock().unwrap();
if module.intersect(self.mouse_pos) {
let status = module.handle(&event);
drop(module);
match status {
GuiModuleUpdate::Closed => {
self.graph.remove_node(node.id()).unwrap();
}
_ => {}
}
self.move_to_front(node.id());
break;
}
}
// right click - open menu
if ButtonState::Pressed == state && MouseButton::Right == button {
self.open_new_module_menu();
}
// left click - abort menu
if ButtonState::Pressed == state && MouseButton::Left == button {
self.context_menu = None;
}
}
fn compare_node_z(a: &Arc<Node>, b: &Arc<Node>) -> Ordering {
let a_z = a.module().lock().unwrap().bounds().pos.z;
let b_z = b.module().lock().unwrap().bounds().pos.z;
a_z.partial_cmp(&b_z).unwrap()
}
fn move_to_front(&self, id: mf::NodeId) {
let mut nodes = self.graph.nodes();
nodes.sort_by(|a, b| {
// force given id to front
if a.id() == id {
Ordering::Less
} else if b.id() == id {
Ordering::Greater
} else {
Self::compare_node_z(a, b)
}
});
let max = nodes.len() as f32;
for (idx, node) in nodes.iter().enumerate() {
let mut module = node.module().lock().unwrap();
let mut bounds = module.bounds();
bounds.pos.z = idx as f32 / max;
bounds.size.z = 1.0 / max;
module.set_bounds(bounds);
}
}
fn new_module(&self, meta: &mf::MetaModule<OwnedModule>, rect: Rect2) {
let bounds = Box3::new(rect.pos.with_z(0.0), rect.size.with_z(0.0));
let node = self.graph.add_node(meta, bounds);
self.move_to_front(node.id());
}
fn open_new_module_menu(&mut self) {
self.context_menu = Some(MenuView::new(
self.ctx.clone(),
Box3::new(
self.mouse_pos.with_z(0.0),
(self.window_size - self.mouse_pos).with_z(0.0),
),
Menu::new(&self.module_types
.iter()
.map(|ty| menu::item(&ty.name))
.collect::<Vec<_>>()),
));
}
fn generate_event(&mut self, data: EventData) {
let event = Event {
time: self.time,
data,
};
for node in self.graph.nodes() {
let mut module = node.module().lock().unwrap();
module.handle(&event);
}
if let Some(menu) = self.context_menu.as_mut() {
menu.handle(&event);
}
}
fn handle(&mut self, event: &glutin::Event) {
use glutin::WindowEvent::*;
//println!("{:?}", event);
match event {
glutin::Event::WindowEvent {
window_id: _,
event,
} => match event {
Resized(w, h) => {
self.window_size = Pt2::new(*w as f32, *h as f32);
}
CursorMoved {
device_id: _,
position,
modifiers: _,
} => {
self.mouse_pos = Pt2::new((position.0 as f32).floor(), (position.1 as f32).floor());
self.generate_event(EventData::MouseMove(self.mouse_pos));
}
MouseInput {
device_id: _,
state,
button,
modifiers: _,
} => {
self.handle_mouse_input(state.into(), button.into());
}
_ => (),
},
_ => (),
}
}
}
fn load_metamodules(ctx: RenderContext) -> Vec<mf::MetaModule<OwnedModule>> {
let mut modules = Vec::new();
let mod_ctx = ctx;
let test_module = mf::MetaModule::new(
"TestModule",
Arc::new(move |ifc, bounds| {
Mutex::new(Box::new(GuiModuleWrapper::new(
TestModule::new(ifc),
mod_ctx.clone(),
bounds,
)) as Box<GuiComponent<GuiModuleUpdate>>)
}),
);
modules.push(test_module);
modules
}
pub fn gui_main() {
// init window
let mut events_loop = EventsLoop::new();
let context = ContextBuilder::new().with_gl_profile(glutin::GlProfile::Core);
let builder = WindowBuilder::new().with_title(String::from("flow-synth"));
let (window, mut device, factory, main_color, main_depth) =
gfx_glutin::init::<ColorFormat, DepthFormat>(builder, context, &events_loop);
let mut target = Target {
color: main_color,
depth: main_depth,
};
// init rendering pipeline
let mut ctx = RenderContext::new(factory.clone());
let mut model = Model::new(ctx.clone());
// begin main loop
let mut running = true;
let timer = Instant::now();
let mut frames = VecDeque::new();
while running {
// update fps
let now = timer.elapsed();
model.time = now.as_secs() as f32 + now.subsec_nanos() as f32 / 1_000_000_000.0;
frames.push_back(model.time);
while let Some(&old_frame) = frames.front() {
if old_frame < model.time - 1.0 {
frames.pop_front();
} else {
break;
}
}
// handle events
events_loop.poll_events(|event| {
model.handle(&event);
use glutin::WindowEvent::*;
match event {
glutin::Event::WindowEvent {
window_id: _,
event,
} => match event {
Closed => running = false,
Resized(_, _) => {
gfx_glutin::update_views(&window, &mut target.color, &mut target.depth);
}
_ => (),
},
_ => (),
}
});
ctx.begin_frame(&target);
// render nodes
let graph_nodes = model.graph.node_map();
for (id, node) in &graph_nodes {
let mut module = node.module().lock().unwrap();
module.render(&mut device, &mut ctx);
}
// render global widgets
if let Some(menu) = model.context_menu.as_mut() {
menu.render(&mut device, &mut ctx);
}
// debug text
ctx.draw_text(
&format!("FPS={} Time={}", frames.len(), model.time),
Pt3::new(0.0, 0.0, 0.0),
[1.0, 1.0, 1.0],
);
ctx.end_frame(&mut device, &target);
window.swap_buffers().unwrap();
device.cleanup();
}
}
const TITLE_BAR_HEIGHT: f32 = 24.0;
const BORDER_SIZE: f32 = 1.0;
struct GuiModuleWrapper<T: Module> {
module: T,
target: TextureTarget,
delete_button: Button,
bounds: Box3,
drag: Option<Pt2>,
dirty: bool,
}
impl<T: Module> GuiModuleWrapper<T> {
fn new(module: T, ctx: RenderContext, bounds: Box3) -> GuiModuleWrapper<T> {
let target = TextureTarget::new(ctx.clone(), bounds.size.drop_z());
GuiModuleWrapper {
module,
target,
delete_button: Button::new(
ctx,
"X".into(),
Box3 {
pos: Pt3::new(
bounds.size.x - TITLE_BAR_HEIGHT - BORDER_SIZE,
BORDER_SIZE,
0.0,
),
size: Pt3::new(TITLE_BAR_HEIGHT, TITLE_BAR_HEIGHT, 0.0),
},
),
bounds,
drag: None,
dirty: true,
}
}
fn render_self(&mut self) {
let title = &self.title();
let ctx = self.target.ctx();
// borders
ctx.draw_rect(
Rect3::new(Pt3::new(0.0, 0.0, 1.0), self.bounds.size.drop_z()),
[1.0, 1.0, 1.0],
);
// background
ctx.draw_rect(
Rect3::new(
Pt3::new(BORDER_SIZE, BORDER_SIZE + TITLE_BAR_HEIGHT, 0.9),
self.bounds.size.drop_z() - Pt2::new(BORDER_SIZE * 2.0, BORDER_SIZE * 2.0 + TITLE_BAR_HEIGHT),
),
[0.1, 0.1, 0.1],
);
// title bar
ctx.draw_rect(
Rect3::new(
Pt3::new(BORDER_SIZE, BORDER_SIZE, 0.9),
Pt2::new(self.bounds.size.x - BORDER_SIZE * 2.0, TITLE_BAR_HEIGHT),
),
[0.0, 0.0, 0.0],
);
ctx.draw_text(title, Pt3::new(4.0, 4.0, 0.8), [1.0, 1.0, 1.0]);
}
}
enum GuiModuleUpdate {
Unchanged,
Closed,
}
impl<T> GuiComponent<GuiModuleUpdate> for GuiModuleWrapper<T>
where
T: Module,
{
fn bounds(&self) -> Box3 {
self.bounds
}
fn set_bounds(&mut self, bounds: Box3) {
self.bounds = bounds;
}
fn intersect(&self, pos: Pt2) -> bool {
self.bounds.flatten().drop_z().intersect(pos)
}
fn render(&mut self, device: &mut gl::Device, ctx: &mut RenderContext) {
if self.dirty {
self.target.begin_frame();
self.render_self();
self.delete_button.render(device, self.target.ctx());
self.target.end_frame(device);
self.dirty = false;
}
ctx.draw_textured_rect(self.bounds.flatten(), self.target.shader_resource().clone());
}
fn handle(&mut self, event: &Event) -> GuiModuleUpdate {
let origin = self.bounds.pos.drop_z();
match event.data {
EventData::MouseMove(pos) => {
self.dirty |=
ButtonUpdate::NeedRender == self.delete_button.handle(&event.translate(-origin));
if let Some(drag) = self.drag {
self.bounds.pos.x = -drag.x + pos.x;
self.bounds.pos.y = -drag.y + pos.y;
}
GuiModuleUpdate::Unchanged
}
EventData::Click(pos, button, state) => {
if self.delete_button.intersect(pos - origin) {
if ButtonUpdate::Clicked == self.delete_button.handle(&event.translate(-origin)) {
GuiModuleUpdate::Closed
} else {
GuiModuleUpdate::Unchanged
}
} else {
match button {
MouseButton::Left => match state {
ButtonState::Pressed => {
let mut title_rect = self.bounds.flatten().drop_z();
title_rect.size = Pt2::new(title_rect.size.x, TITLE_BAR_HEIGHT + BORDER_SIZE);
if title_rect.intersect(pos) {
self.drag = Some(pos - origin);
}
}
ButtonState::Released => {
self.drag = None;
}
},
_ => {}
}
GuiModuleUpdate::Unchanged
}
}
}
}
}
impl<T> Module for GuiModuleWrapper<T>
where
T: Module,
{
fn start(&mut self) {
T::start(&mut self.module);
}
fn title(&self) -> String {
T::title(&self.module)
}
}
factor
mod render;
mod menu;
mod button;
mod geom;
mod component;
mod event;
use self::render::*;
use super::module::*;
use self::menu::{Menu, MenuUpdate, MenuView};
use self::button::*;
use self::geom::*;
use self::component::*;
use self::event::*;
use glutin::{self, ContextBuilder, EventsLoop, GlContext, WindowBuilder};
use modular_flow as mf;
use std::time::Instant;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use std::cmp::Ordering;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use gfx_device_gl as gl;
type OwnedModule = Mutex<Box<GuiComponent<GuiModuleUpdate>>>;
type Graph = mf::Graph<OwnedModule>;
type Node = mf::Node<OwnedModule>;
/// Model holds info about GUI state
/// and program state
pub struct Model {
ctx: RenderContext,
time: f32,
graph: Arc<Graph>,
window_size: Pt2,
mouse_pos: Pt2,
module_types: Vec<mf::MetaModule<OwnedModule>>,
context_menu: Option<MenuView>,
}
impl Model {
fn new(ctx: RenderContext) -> Model {
Model {
graph: Graph::new(),
time: 0.0,
window_size: Pt2::fill(0.0),
mouse_pos: Pt2::fill(0.0),
module_types: load_metamodules(ctx.clone()),
context_menu: None,
ctx,
}
}
fn handle_mouse_input(&mut self, state: ButtonState, button: MouseButton) {
let event = Event {
time: self.time,
data: EventData::Click(self.mouse_pos, button.into(), state.into()),
};
// intersect menu
if let Some(menu) = self.context_menu.as_mut() {
if menu.intersect(self.mouse_pos) {
let status = menu.handle(&event);
match status {
MenuUpdate::Select(path) => {
let name = path[0].as_ref();
let module = self.module_types.iter().find(|ty| ty.name == name).unwrap();
self.new_module(module, Rect2::new(self.mouse_pos, Pt2::fill(256.0)));
self.context_menu = None;
}
_ => (),
}
return;
}
}
// intersect nodes
let mut nodes = self.graph.nodes();
nodes.sort_by(Self::compare_node_z);
for node in &nodes {
let mut module = node.module().lock().unwrap();
if module.intersect(self.mouse_pos) {
let status = module.handle(&event);
drop(module);
match status {
GuiModuleUpdate::Closed => {
self.graph.remove_node(node.id()).unwrap();
}
_ => {}
}
self.move_to_front(node.id());
break;
}
}
// right click - open menu
if ButtonState::Pressed == state && MouseButton::Right == button {
self.open_new_module_menu();
}
// left click - abort menu
if ButtonState::Pressed == state && MouseButton::Left == button {
self.context_menu = None;
}
}
fn compare_node_z(a: &Arc<Node>, b: &Arc<Node>) -> Ordering {
let a_z = a.module().lock().unwrap().bounds().pos.z;
let b_z = b.module().lock().unwrap().bounds().pos.z;
a_z.partial_cmp(&b_z).unwrap()
}
fn move_to_front(&self, id: mf::NodeId) {
let mut nodes = self.graph.nodes();
nodes.sort_by(|a, b| {
// force given id to front
if a.id() == id {
Ordering::Less
} else if b.id() == id {
Ordering::Greater
} else {
Self::compare_node_z(a, b)
}
});
let max = nodes.len() as f32;
for (idx, node) in nodes.iter().enumerate() {
let mut module = node.module().lock().unwrap();
let mut bounds = module.bounds();
bounds.pos.z = idx as f32 / max;
bounds.size.z = 1.0 / max;
module.set_bounds(bounds);
}
}
fn new_module(&self, meta: &mf::MetaModule<OwnedModule>, rect: Rect2) {
// dummy z, overwritten by move_to_front
let bounds = Box3::new(rect.pos.with_z(0.0), rect.size.with_z(0.0));
let node = self.graph.add_node(meta, bounds);
self.move_to_front(node.id());
}
fn open_new_module_menu(&mut self) {
self.context_menu = Some(MenuView::new(
self.ctx.clone(),
Box3::new(
self.mouse_pos.with_z(0.0),
(self.window_size - self.mouse_pos).with_z(0.0),
),
Menu::new(&self.module_types
.iter()
.map(|ty| menu::item(&ty.name))
.collect::<Vec<_>>()),
));
}
fn generate_event(&mut self, data: EventData) {
let event = Event {
time: self.time,
data,
};
for node in self.graph.nodes() {
let mut module = node.module().lock().unwrap();
module.handle(&event);
}
if let Some(menu) = self.context_menu.as_mut() {
menu.handle(&event);
}
}
fn handle(&mut self, event: &glutin::Event) {
use glutin::WindowEvent::*;
//println!("{:?}", event);
match event {
glutin::Event::WindowEvent {
window_id: _,
event,
} => match event {
Resized(w, h) => {
self.window_size = Pt2::new(*w as f32, *h as f32);
}
CursorMoved {
device_id: _,
position,
modifiers: _,
} => {
self.mouse_pos = Pt2::new((position.0 as f32).floor(), (position.1 as f32).floor());
self.generate_event(EventData::MouseMove(self.mouse_pos));
}
MouseInput {
device_id: _,
state,
button,
modifiers: _,
} => {
self.handle_mouse_input(state.into(), button.into());
}
_ => (),
},
_ => (),
}
}
fn render(&mut self, ctx: &mut RenderContext, device: &mut gl::Device) {
// render nodes
let graph_nodes = self.graph.node_map();
for (id, node) in &graph_nodes {
let mut module = node.module().lock().unwrap();
module.render(device, ctx);
}
// render global widgets
if let Some(menu) = self.context_menu.as_mut() {
menu.render(device, ctx);
}
}
}
fn load_metamodules(ctx: RenderContext) -> Vec<mf::MetaModule<OwnedModule>> {
let mut modules = Vec::new();
let mod_ctx = ctx;
let test_module = mf::MetaModule::new(
"TestModule",
Arc::new(move |ifc, bounds| {
Mutex::new(Box::new(GuiModuleWrapper::new(
TestModule::new(ifc),
mod_ctx.clone(),
bounds,
)) as Box<GuiComponent<GuiModuleUpdate>>)
}),
);
modules.push(test_module);
modules
}
pub fn gui_main() {
// init window
let mut events_loop = EventsLoop::new();
let context = ContextBuilder::new().with_gl_profile(glutin::GlProfile::Core);
let builder = WindowBuilder::new().with_title(String::from("flow-synth"));
let (window, mut device, factory, main_color, main_depth) =
gfx_glutin::init::<ColorFormat, DepthFormat>(builder, context, &events_loop);
let mut target = Target {
color: main_color,
depth: main_depth,
};
// init rendering pipeline
let mut ctx = RenderContext::new(factory.clone());
let mut model = Model::new(ctx.clone());
// begin main loop
let mut running = true;
let timer = Instant::now();
let mut frames = VecDeque::new();
while running {
// update fps
let now = timer.elapsed();
model.time = now.as_secs() as f32 + now.subsec_nanos() as f32 / 1_000_000_000.0;
frames.push_back(model.time);
while let Some(&old_frame) = frames.front() {
if old_frame < model.time - 1.0 {
frames.pop_front();
} else {
break;
}
}
// handle events
events_loop.poll_events(|event| {
model.handle(&event);
use glutin::WindowEvent::*;
match event {
glutin::Event::WindowEvent {
window_id: _,
event,
} => match event {
Closed => running = false,
Resized(_, _) => {
gfx_glutin::update_views(&window, &mut target.color, &mut target.depth);
}
_ => (),
},
_ => (),
}
});
ctx.begin_frame(&target);
model.render(&mut ctx, &mut device);
// debug text
ctx.draw_text(
&format!("FPS={} Time={}", frames.len(), model.time),
Pt3::new(0.0, 0.0, 0.0),
[1.0, 1.0, 1.0],
);
ctx.end_frame(&mut device, &target);
window.swap_buffers().unwrap();
device.cleanup();
}
}
const TITLE_BAR_HEIGHT: f32 = 24.0;
const BORDER_SIZE: f32 = 1.0;
struct GuiModuleWrapper<T: Module> {
module: T,
target: TextureTarget,
delete_button: Button,
bounds: Box3,
drag: Option<Pt2>,
dirty: bool,
}
impl<T: Module> GuiModuleWrapper<T> {
fn new(module: T, ctx: RenderContext, bounds: Box3) -> GuiModuleWrapper<T> {
let target = TextureTarget::new(ctx.clone(), bounds.size.drop_z());
GuiModuleWrapper {
module,
target,
delete_button: Button::new(
ctx,
"X".into(),
Box3 {
pos: Pt3::new(
bounds.size.x - TITLE_BAR_HEIGHT - BORDER_SIZE,
BORDER_SIZE,
0.0,
),
size: Pt3::new(TITLE_BAR_HEIGHT, TITLE_BAR_HEIGHT, 0.0),
},
),
bounds,
drag: None,
dirty: true,
}
}
fn render_self(&mut self) {
let title = &self.title();
let ctx = self.target.ctx();
// borders
ctx.draw_rect(
Rect3::new(Pt3::new(0.0, 0.0, 1.0), self.bounds.size.drop_z()),
[1.0, 1.0, 1.0],
);
// background
ctx.draw_rect(
Rect3::new(
Pt3::new(BORDER_SIZE, BORDER_SIZE + TITLE_BAR_HEIGHT, 0.9),
self.bounds.size.drop_z() - Pt2::new(BORDER_SIZE * 2.0, BORDER_SIZE * 2.0 + TITLE_BAR_HEIGHT),
),
[0.1, 0.1, 0.1],
);
// title bar
ctx.draw_rect(
Rect3::new(
Pt3::new(BORDER_SIZE, BORDER_SIZE, 0.9),
Pt2::new(self.bounds.size.x - BORDER_SIZE * 2.0, TITLE_BAR_HEIGHT),
),
[0.0, 0.0, 0.0],
);
ctx.draw_text(title, Pt3::new(4.0, 4.0, 0.8), [1.0, 1.0, 1.0]);
}
}
enum GuiModuleUpdate {
Unchanged,
Closed,
}
impl<T> GuiComponent<GuiModuleUpdate> for GuiModuleWrapper<T>
where
T: Module,
{
fn bounds(&self) -> Box3 {
self.bounds
}
fn set_bounds(&mut self, bounds: Box3) {
self.bounds = bounds;
}
fn intersect(&self, pos: Pt2) -> bool {
self.bounds.flatten().drop_z().intersect(pos)
}
fn render(&mut self, device: &mut gl::Device, ctx: &mut RenderContext) {
if self.dirty {
self.target.begin_frame();
self.render_self();
self.delete_button.render(device, self.target.ctx());
self.target.end_frame(device);
self.dirty = false;
}
ctx.draw_textured_rect(self.bounds.flatten(), self.target.shader_resource().clone());
}
fn handle(&mut self, event: &Event) -> GuiModuleUpdate {
let origin = self.bounds.pos.drop_z();
match event.data {
EventData::MouseMove(pos) => {
self.dirty |=
ButtonUpdate::NeedRender == self.delete_button.handle(&event.translate(-origin));
if let Some(drag) = self.drag {
self.bounds.pos.x = -drag.x + pos.x;
self.bounds.pos.y = -drag.y + pos.y;
}
GuiModuleUpdate::Unchanged
}
EventData::Click(pos, button, state) => {
if self.delete_button.intersect(pos - origin) {
if ButtonUpdate::Clicked == self.delete_button.handle(&event.translate(-origin)) {
GuiModuleUpdate::Closed
} else {
GuiModuleUpdate::Unchanged
}
} else {
match button {
MouseButton::Left => match state {
ButtonState::Pressed => {
let mut title_rect = self.bounds.flatten().drop_z();
title_rect.size = Pt2::new(title_rect.size.x, TITLE_BAR_HEIGHT + BORDER_SIZE);
if title_rect.intersect(pos) {
self.drag = Some(pos - origin);
}
}
ButtonState::Released => {
self.drag = None;
}
},
_ => {}
}
GuiModuleUpdate::Unchanged
}
}
}
}
}
impl<T> Module for GuiModuleWrapper<T>
where
T: Module,
{
fn start(&mut self) {
T::start(&mut self.module);
}
fn title(&self) -> String {
T::title(&self.module)
}
}
|
use fnv::{FnvHashMap, FnvHashSet};
use std::collections::vec_deque::IntoIter;
#[cfg(feature = "display")]
use std::fmt::Display;
use std::fmt::{self, Debug, Formatter};
use {Command, Error, Record, RecordBuilder, Signal};
const ORIGIN: usize = 0;
/// A history of commands.
#[derive(Debug)]
pub struct History<R> {
id: usize,
next: usize,
parent: Option<usize>,
record: Record<R>,
branches: FnvHashMap<usize, Branch<R>>,
}
impl<R> History<R> {
/// Returns a new history.
#[inline]
pub fn new(receiver: impl Into<R>) -> History<R> {
History {
id: ORIGIN,
next: 1,
parent: None,
record: Record::new(receiver),
branches: FnvHashMap::default(),
}
}
/// Returns a builder for a history.
#[inline]
pub fn builder() -> HistoryBuilder<R> {
HistoryBuilder {
inner: Record::builder(),
capacity_for_branching: 0,
}
}
/// Reserves capacity for at least `additional` more commands.
///
/// # Panics
/// Panics if the new capacity overflows usize.
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.record.reserve(additional);
}
/// Returns the capacity of the history.
#[inline]
pub fn capacity(&self) -> usize {
self.record.capacity()
}
/// Returns the number of commands in the current branch of the history.
#[inline]
pub fn len(&self) -> usize {
self.record.len()
}
/// Returns `true` if the current branch of the history is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.record.is_empty()
}
/// Returns the limit of the history.
#[inline]
pub fn limit(&self) -> usize {
self.record.limit()
}
/// Sets the limit of the history and returns the new limit.
///
/// If `limit < len` the first commands will be removed until `len == limit`.
/// However, if the current active command is going to be removed, the limit is instead
/// adjusted to `len - active` so the active command is not removed.
#[inline]
pub fn set_limit(&mut self, limit: usize) -> usize {
self.record.set_limit(limit)
}
/// Sets how the signal should be handled when the state changes.
#[inline]
pub fn set_signal(&mut self, f: impl FnMut(Signal) + Send + Sync + 'static) {
self.record.set_signal(f);
}
/// Returns `true` if the history can undo.
#[inline]
pub fn can_undo(&self) -> bool {
self.record.can_undo()
}
/// Returns `true` if the history can redo.
#[inline]
pub fn can_redo(&self) -> bool {
self.record.can_redo()
}
/// Marks the receiver as currently being in a saved or unsaved state.
#[inline]
pub fn set_saved(&mut self, saved: bool) {
self.record.set_saved(saved)
}
/// Returns `true` if the receiver is in a saved state, `false` otherwise.
#[inline]
pub fn is_saved(&self) -> bool {
self.record.is_saved()
}
/// Returns the position of the current command.
#[inline]
pub fn cursor(&self) -> usize {
self.record.cursor()
}
/// Removes all commands from the history without undoing them.
#[inline]
pub fn clear(&mut self) {
self.record.clear();
self.id = ORIGIN;
self.parent = None;
self.branches.clear();
}
/// Pushes the command to the top of the history and executes its [`apply`] method.
/// The command is merged with the previous top command if they have the same [`id`].
///
/// # Errors
/// If an error occur when executing [`apply`] the error is returned together with the command.
///
/// [`apply`]: trait.Command.html#tymethod.apply
/// [`id`]: trait.Command.html#method.id
#[inline]
pub fn apply(&mut self, cmd: impl Command<R> + 'static) -> Result<Option<usize>, Error<R>>
where
R: 'static,
{
let cursor = self.record.cursor();
let commands = self.record.__apply(cmd)?;
if commands.len() > 0 {
let id = self.next;
self.next += 1;
self.branches.insert(
id,
Branch {
parent: self.id,
cursor,
commands,
},
);
Ok(Some(id))
} else {
Ok(None)
}
}
/// Calls the [`undo`] method for the active command and sets the previous one as the new active one.
///
/// # Errors
/// If an error occur when executing [`undo`] the error is returned together with the command.
///
/// [`undo`]: trait.Command.html#tymethod.undo
#[inline]
#[must_use]
pub fn undo(&mut self) -> Option<Result<(), Error<R>>> {
self.record.undo()
}
/// Calls the [`redo`] method for the active command and sets the next one as the
/// new active one.
///
/// # Errors
/// If an error occur when executing [`redo`] the error is returned together with the command.
///
/// [`redo`]: trait.Command.html#method.redo
#[inline]
#[must_use]
pub fn redo(&mut self) -> Option<Result<(), Error<R>>> {
self.record.redo()
}
/// Jumps to the command in `branch` at `cursor`.
#[inline]
#[must_use]
pub fn jump_to(&mut self, mut branch: usize, cursor: usize) -> Option<Result<(), Error<R>>>
where
R: 'static,
{
if self.id == branch {
return self.record.jump_to(cursor);
}
// All visited nodes.
let visited = {
let mut visited =
FnvHashSet::with_capacity_and_hasher(self.record.capacity(), Default::default());
// Find the path from `dest` to `ORIGIN`.
let mut dest = self.branches.get(&branch)?;
while dest.parent != ORIGIN {
assert!(visited.insert(dest.parent));
dest = self.branches.get(&dest.parent).unwrap();
}
visited
};
let mut path = Vec::with_capacity(visited.len() + self.record.len());
// Find the path from `start` to the lowest common ancestor of `dest`.
if let Some(ref parent) = self.parent {
let mut start = self.branches.remove(parent).unwrap();
branch = start.parent;
while !visited.contains(&branch) {
path.push(start);
start = self.branches.remove(&branch).unwrap();
branch = start.parent;
}
}
// Find the path from `dest` to the lowest common ancestor of `start`.
let mut dest = self.branches.remove(&branch)?;
branch = dest.parent;
let len = path.len();
path.push(dest);
let last = path.last().map_or(ORIGIN, |last| last.parent);
while branch != last {
dest = self.branches.remove(&branch).unwrap();
branch = dest.parent;
path.push(dest);
}
path[len..].reverse();
// Walk the path from `start` to `dest`.
let old = self.id;
for branch in path {
// Move to `dest.cursor` either by undoing or redoing.
if let Err(err) = self.record.jump_to(branch.cursor).unwrap() {
return Some(Err(err));
}
// Apply the commands in the branch and move older commands into their own branch.
for cmd in branch.commands {
let cursor = self.record.cursor();
let commands = match self.record.__apply(cmd) {
Ok(commands) => commands,
Err(err) => return Some(Err(err)),
};
if commands.len() > 0 {
self.branches.insert(
self.id,
Branch {
parent: branch.parent,
cursor,
commands,
},
);
self.parent = if branch.parent == ORIGIN {
None
} else {
Some(self.id)
};
self.id = branch.parent;
}
}
}
if let Some(ref mut f) = self.record.signal {
f(Signal::Branch { old, new: self.id });
}
Some(Ok(()))
}
/// Returns the string of the command which will be undone in the next call to [`undo`].
///
/// [`undo`]: struct.History.html#method.undo
#[inline]
#[must_use]
#[cfg(feature = "display")]
pub fn to_undo_string(&self) -> Option<String> {
self.record.to_undo_string()
}
/// Returns the string of the command which will be redone in the next call to [`redo`].
///
/// [`redo`]: struct.History.html#method.redo
#[inline]
#[must_use]
#[cfg(feature = "display")]
pub fn to_redo_string(&self) -> Option<String> {
self.record.to_redo_string()
}
/// Returns a reference to the `receiver`.
#[inline]
pub fn as_receiver(&self) -> &R {
self.record.as_receiver()
}
/// Returns a mutable reference to the `receiver`.
///
/// This method should **only** be used when doing changes that should not be able to be undone.
#[inline]
pub fn as_mut_receiver(&mut self) -> &mut R {
self.record.as_mut_receiver()
}
/// Consumes the history, returning the `receiver`.
#[inline]
pub fn into_receiver(self) -> R {
self.record.into_receiver()
}
}
impl<R: Default> Default for History<R> {
#[inline]
fn default() -> History<R> {
History::new(R::default())
}
}
impl<R> AsRef<R> for History<R> {
#[inline]
fn as_ref(&self) -> &R {
self.as_receiver()
}
}
impl<R> AsMut<R> for History<R> {
#[inline]
fn as_mut(&mut self) -> &mut R {
self.as_mut_receiver()
}
}
impl<R> From<R> for History<R> {
#[inline]
fn from(receiver: R) -> Self {
History::new(receiver)
}
}
#[cfg(feature = "display")]
impl<R> Display for History<R> {
#[inline]
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
(&self.record as &Display).fmt(f)
}
}
struct Branch<R> {
parent: usize,
cursor: usize,
commands: IntoIter<Box<dyn Command<R> + 'static>>,
}
impl<R> Debug for Branch<R> {
#[inline]
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("Branch")
.field("parent", &self.parent)
.field("cursor", &self.cursor)
.field("commands", &self.commands)
.finish()
}
}
/// Builder for a history.
#[derive(Debug)]
pub struct HistoryBuilder<R> {
inner: RecordBuilder<R>,
capacity_for_branching: usize,
}
impl<R> HistoryBuilder<R> {
/// Sets the specified [capacity] for the history.
///
/// [capacity]: https://doc.rust-lang.org/std/vec/struct.Vec.html#capacity-and-reallocation
#[inline]
pub fn capacity(mut self, capacity: usize) -> HistoryBuilder<R> {
self.inner = self.inner.capacity(capacity);
self
}
/// Sets the specified [capacity] for the branches in the history.
///
/// [capacity]: https://doc.rust-lang.org/std/vec/struct.Vec.html#capacity-and-reallocation
#[inline]
pub fn capacity_for_branching(mut self, capacity: usize) -> HistoryBuilder<R> {
self.capacity_for_branching = capacity;
self
}
/// Sets the `limit` for the history.
///
/// If this limit is reached it will start popping of commands at the beginning
/// of the history when pushing new commands on to the stack. No limit is set by
/// default which means it may grow indefinitely.
#[inline]
pub fn limit(mut self, limit: usize) -> HistoryBuilder<R> {
self.inner = self.inner.limit(limit);
self
}
/// Sets if the receiver is initially in a saved state.
#[inline]
pub fn saved(mut self, saved: bool) -> HistoryBuilder<R> {
self.inner = self.inner.saved(saved);
self
}
/// Decides how the signal should be handled when the state changes.
/// By default the history does nothing.
#[inline]
pub fn signal(mut self, f: impl FnMut(Signal) + Send + Sync + 'static) -> HistoryBuilder<R> {
self.inner = self.inner.signal(f);
self
}
/// Creates the record.
#[inline]
pub fn build(self, receiver: impl Into<R>) -> History<R> {
History {
id: ORIGIN,
next: 1,
parent: None,
record: self.inner.build(receiver),
branches: FnvHashMap::with_capacity_and_hasher(
self.capacity_for_branching,
Default::default(),
),
}
}
}
impl<R: Default> HistoryBuilder<R> {
/// Creates the record with a default `receiver`.
#[inline]
pub fn default(self) -> History<R> {
self.build(R::default())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::error::Error;
#[derive(Debug)]
struct Add(char);
impl Command<String> for Add {
fn apply(&mut self, receiver: &mut String) -> Result<(), Box<dyn Error>> {
receiver.push(self.0);
Ok(())
}
fn undo(&mut self, receiver: &mut String) -> Result<(), Box<dyn Error>> {
self.0 = receiver.pop().ok_or("`receiver` is empty")?;
Ok(())
}
}
#[test]
fn jump_to() {
let mut history = History::default();
history.apply(Add('a')).unwrap();
history.apply(Add('b')).unwrap();
history.apply(Add('c')).unwrap();
history.apply(Add('d')).unwrap();
history.apply(Add('e')).unwrap();
history.undo().unwrap().unwrap();
history.undo().unwrap().unwrap();
let b = history.apply(Add('f')).unwrap().unwrap();
history.apply(Add('g')).unwrap();
history.jump_to(b, 5).unwrap().unwrap();
}
}
Fix typos in docs
use fnv::{FnvHashMap, FnvHashSet};
use std::collections::vec_deque::IntoIter;
#[cfg(feature = "display")]
use std::fmt::Display;
use std::fmt::{self, Debug, Formatter};
use {Command, Error, Record, RecordBuilder, Signal};
const ORIGIN: usize = 0;
/// A history of commands.
#[derive(Debug)]
pub struct History<R> {
id: usize,
next: usize,
parent: Option<usize>,
record: Record<R>,
branches: FnvHashMap<usize, Branch<R>>,
}
impl<R> History<R> {
/// Returns a new history.
#[inline]
pub fn new(receiver: impl Into<R>) -> History<R> {
History {
id: ORIGIN,
next: 1,
parent: None,
record: Record::new(receiver),
branches: FnvHashMap::default(),
}
}
/// Returns a builder for a history.
#[inline]
pub fn builder() -> HistoryBuilder<R> {
HistoryBuilder {
inner: Record::builder(),
capacity_for_branching: 0,
}
}
/// Reserves capacity for at least `additional` more commands.
///
/// # Panics
/// Panics if the new capacity overflows usize.
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.record.reserve(additional);
}
/// Returns the capacity of the history.
#[inline]
pub fn capacity(&self) -> usize {
self.record.capacity()
}
/// Returns the number of commands in the current branch of the history.
#[inline]
pub fn len(&self) -> usize {
self.record.len()
}
/// Returns `true` if the current branch of the history is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.record.is_empty()
}
/// Returns the limit of the history.
#[inline]
pub fn limit(&self) -> usize {
self.record.limit()
}
/// Sets the limit of the history and returns the new limit.
///
/// If `limit < len` the first commands will be removed until `len == limit`.
/// However, if the current active command is going to be removed, the limit is instead
/// adjusted to `len - active` so the active command is not removed.
#[inline]
pub fn set_limit(&mut self, limit: usize) -> usize {
self.record.set_limit(limit)
}
/// Sets how the signal should be handled when the state changes.
#[inline]
pub fn set_signal(&mut self, f: impl FnMut(Signal) + Send + Sync + 'static) {
self.record.set_signal(f);
}
/// Returns `true` if the history can undo.
#[inline]
pub fn can_undo(&self) -> bool {
self.record.can_undo()
}
/// Returns `true` if the history can redo.
#[inline]
pub fn can_redo(&self) -> bool {
self.record.can_redo()
}
/// Marks the receiver as currently being in a saved or unsaved state.
#[inline]
pub fn set_saved(&mut self, saved: bool) {
self.record.set_saved(saved)
}
/// Returns `true` if the receiver is in a saved state, `false` otherwise.
#[inline]
pub fn is_saved(&self) -> bool {
self.record.is_saved()
}
/// Returns the position of the current command.
#[inline]
pub fn cursor(&self) -> usize {
self.record.cursor()
}
/// Removes all commands from the history without undoing them.
#[inline]
pub fn clear(&mut self) {
self.record.clear();
self.id = ORIGIN;
self.parent = None;
self.branches.clear();
}
/// Pushes the command to the top of the history and executes its [`apply`] method.
/// The command is merged with the previous top command if they have the same [`id`].
///
/// # Errors
/// If an error occur when executing [`apply`] the error is returned together with the command.
///
/// [`apply`]: trait.Command.html#tymethod.apply
/// [`id`]: trait.Command.html#method.id
#[inline]
pub fn apply(&mut self, cmd: impl Command<R> + 'static) -> Result<Option<usize>, Error<R>>
where
R: 'static,
{
let cursor = self.record.cursor();
let commands = self.record.__apply(cmd)?;
if commands.len() > 0 {
let id = self.next;
self.next += 1;
self.branches.insert(
id,
Branch {
parent: self.id,
cursor,
commands,
},
);
Ok(Some(id))
} else {
Ok(None)
}
}
/// Calls the [`undo`] method for the active command and sets the previous one as the new active one.
///
/// # Errors
/// If an error occur when executing [`undo`] the error is returned together with the command.
///
/// [`undo`]: trait.Command.html#tymethod.undo
#[inline]
#[must_use]
pub fn undo(&mut self) -> Option<Result<(), Error<R>>> {
self.record.undo()
}
/// Calls the [`redo`] method for the active command and sets the next one as the
/// new active one.
///
/// # Errors
/// If an error occur when executing [`redo`] the error is returned together with the command.
///
/// [`redo`]: trait.Command.html#method.redo
#[inline]
#[must_use]
pub fn redo(&mut self) -> Option<Result<(), Error<R>>> {
self.record.redo()
}
/// Jumps to the command in `branch` at `cursor`.
#[inline]
#[must_use]
pub fn jump_to(&mut self, mut branch: usize, cursor: usize) -> Option<Result<(), Error<R>>>
where
R: 'static,
{
if self.id == branch {
return self.record.jump_to(cursor);
}
// All visited nodes.
let visited = {
let mut visited =
FnvHashSet::with_capacity_and_hasher(self.record.capacity(), Default::default());
// Find the path from `dest` to `ORIGIN`.
let mut dest = self.branches.get(&branch)?;
while dest.parent != ORIGIN {
assert!(visited.insert(dest.parent));
dest = self.branches.get(&dest.parent).unwrap();
}
visited
};
let mut path = Vec::with_capacity(visited.len() + self.record.len());
// Find the path from `start` to the lowest common ancestor of `dest`.
if let Some(ref parent) = self.parent {
let mut start = self.branches.remove(parent).unwrap();
branch = start.parent;
while !visited.contains(&branch) {
path.push(start);
start = self.branches.remove(&branch).unwrap();
branch = start.parent;
}
}
// Find the path from `dest` to the lowest common ancestor of `start`.
let mut dest = self.branches.remove(&branch)?;
branch = dest.parent;
let len = path.len();
path.push(dest);
let last = path.last().map_or(ORIGIN, |last| last.parent);
while branch != last {
dest = self.branches.remove(&branch).unwrap();
branch = dest.parent;
path.push(dest);
}
path[len..].reverse();
// Walk the path from `start` to `dest`.
let old = self.id;
for branch in path {
// Move to `dest.cursor` either by undoing or redoing.
if let Err(err) = self.record.jump_to(branch.cursor).unwrap() {
return Some(Err(err));
}
// Apply the commands in the branch and move older commands into their own branch.
for cmd in branch.commands {
let cursor = self.record.cursor();
let commands = match self.record.__apply(cmd) {
Ok(commands) => commands,
Err(err) => return Some(Err(err)),
};
if commands.len() > 0 {
self.branches.insert(
self.id,
Branch {
parent: branch.parent,
cursor,
commands,
},
);
self.parent = if branch.parent == ORIGIN {
None
} else {
Some(self.id)
};
self.id = branch.parent;
}
}
}
if let Some(ref mut f) = self.record.signal {
f(Signal::Branch { old, new: self.id });
}
Some(Ok(()))
}
/// Returns the string of the command which will be undone in the next call to [`undo`].
///
/// [`undo`]: struct.History.html#method.undo
#[inline]
#[must_use]
#[cfg(feature = "display")]
pub fn to_undo_string(&self) -> Option<String> {
self.record.to_undo_string()
}
/// Returns the string of the command which will be redone in the next call to [`redo`].
///
/// [`redo`]: struct.History.html#method.redo
#[inline]
#[must_use]
#[cfg(feature = "display")]
pub fn to_redo_string(&self) -> Option<String> {
self.record.to_redo_string()
}
/// Returns a reference to the `receiver`.
#[inline]
pub fn as_receiver(&self) -> &R {
self.record.as_receiver()
}
/// Returns a mutable reference to the `receiver`.
///
/// This method should **only** be used when doing changes that should not be able to be undone.
#[inline]
pub fn as_mut_receiver(&mut self) -> &mut R {
self.record.as_mut_receiver()
}
/// Consumes the history, returning the `receiver`.
#[inline]
pub fn into_receiver(self) -> R {
self.record.into_receiver()
}
}
impl<R: Default> Default for History<R> {
#[inline]
fn default() -> History<R> {
History::new(R::default())
}
}
impl<R> AsRef<R> for History<R> {
#[inline]
fn as_ref(&self) -> &R {
self.as_receiver()
}
}
impl<R> AsMut<R> for History<R> {
#[inline]
fn as_mut(&mut self) -> &mut R {
self.as_mut_receiver()
}
}
impl<R> From<R> for History<R> {
#[inline]
fn from(receiver: R) -> Self {
History::new(receiver)
}
}
#[cfg(feature = "display")]
impl<R> Display for History<R> {
#[inline]
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
(&self.record as &Display).fmt(f)
}
}
struct Branch<R> {
parent: usize,
cursor: usize,
commands: IntoIter<Box<dyn Command<R> + 'static>>,
}
impl<R> Debug for Branch<R> {
#[inline]
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("Branch")
.field("parent", &self.parent)
.field("cursor", &self.cursor)
.field("commands", &self.commands)
.finish()
}
}
/// Builder for a history.
#[derive(Debug)]
pub struct HistoryBuilder<R> {
inner: RecordBuilder<R>,
capacity_for_branching: usize,
}
impl<R> HistoryBuilder<R> {
/// Sets the specified [capacity] for the history.
///
/// [capacity]: https://doc.rust-lang.org/std/vec/struct.Vec.html#capacity-and-reallocation
#[inline]
pub fn capacity(mut self, capacity: usize) -> HistoryBuilder<R> {
self.inner = self.inner.capacity(capacity);
self
}
/// Sets the specified [capacity] for the branches in the history.
///
/// [capacity]: https://doc.rust-lang.org/std/vec/struct.Vec.html#capacity-and-reallocation
#[inline]
pub fn capacity_for_branching(mut self, capacity: usize) -> HistoryBuilder<R> {
self.capacity_for_branching = capacity;
self
}
/// Sets the `limit` for the history.
///
/// If this limit is reached it will start popping of commands at the beginning
/// of the history when pushing new commands on to the stack. No limit is set by
/// default which means it may grow indefinitely.
#[inline]
pub fn limit(mut self, limit: usize) -> HistoryBuilder<R> {
self.inner = self.inner.limit(limit);
self
}
/// Sets if the receiver is initially in a saved state.
#[inline]
pub fn saved(mut self, saved: bool) -> HistoryBuilder<R> {
self.inner = self.inner.saved(saved);
self
}
/// Decides how the signal should be handled when the state changes.
/// By default the history does nothing.
#[inline]
pub fn signal(mut self, f: impl FnMut(Signal) + Send + Sync + 'static) -> HistoryBuilder<R> {
self.inner = self.inner.signal(f);
self
}
/// Creates the history.
#[inline]
pub fn build(self, receiver: impl Into<R>) -> History<R> {
History {
id: ORIGIN,
next: 1,
parent: None,
record: self.inner.build(receiver),
branches: FnvHashMap::with_capacity_and_hasher(
self.capacity_for_branching,
Default::default(),
),
}
}
}
impl<R: Default> HistoryBuilder<R> {
/// Creates the history with a default `receiver`.
#[inline]
pub fn default(self) -> History<R> {
self.build(R::default())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::error::Error;
#[derive(Debug)]
struct Add(char);
impl Command<String> for Add {
fn apply(&mut self, receiver: &mut String) -> Result<(), Box<dyn Error>> {
receiver.push(self.0);
Ok(())
}
fn undo(&mut self, receiver: &mut String) -> Result<(), Box<dyn Error>> {
self.0 = receiver.pop().ok_or("`receiver` is empty")?;
Ok(())
}
}
#[test]
fn jump_to() {
let mut history = History::default();
history.apply(Add('a')).unwrap();
history.apply(Add('b')).unwrap();
history.apply(Add('c')).unwrap();
history.apply(Add('d')).unwrap();
history.apply(Add('e')).unwrap();
history.undo().unwrap().unwrap();
history.undo().unwrap().unwrap();
let b = history.apply(Add('f')).unwrap().unwrap();
history.apply(Add('g')).unwrap();
history.jump_to(b, 5).unwrap().unwrap();
}
}
|
// hex encoder and decoder used by rust-protobuf unittests
#![allow(dead_code)]
use std::char;
fn decode_hex_digit(digit: char) -> u8 {
match digit {
'0' ... '9' => digit as u8 - '0' as u8,
'a' ... 'f' => digit as u8 - 'a' as u8 + 10,
'A' ... 'F' => digit as u8 - 'A' as u8 + 10,
_ => panic!(),
}
}
pub fn decode_hex(hex: &str) -> Vec<u8> {
let mut r: Vec<u8> = Vec::new();
let mut pos = 0;
loop {
while pos < hex.chars().count() && hex.char_at(pos) == ' ' {
pos += 1;
}
if hex.chars().count() - pos >= 2 {
r.push((decode_hex_digit(hex.char_at(pos)) << 4) | decode_hex_digit(hex.char_at(pos + 1)));
pos += 2;
continue;
}
if pos == hex.chars().count() {
break;
}
panic!("pos = {}d", pos);
}
r
}
fn encode_hex_digit(digit: u8) -> char {
match char::from_digit(digit as uint, 16) {
Some(c) => c,
_ => panic!()
}
}
fn encode_hex_byte(byte: u8) -> [char, ..2] {
[encode_hex_digit(byte >> 4), encode_hex_digit(byte & 0x0Fu8)]
}
pub fn encode_hex(bytes: &[u8]) -> String {
let strs: Vec<String> = bytes.iter().map(|byte| {
String::from_chars(encode_hex_byte(*byte).as_slice())
}).collect();
strs.connect(" ")
}
#[cfg(test)]
mod test {
use super::decode_hex;
use super::encode_hex;
#[test]
fn test_decode_hex() {
assert_eq!([].to_vec(), decode_hex(""));
assert_eq!([0x00u8].to_vec(), decode_hex("00"));
assert_eq!([0xffu8].to_vec(), decode_hex("ff"));
assert_eq!([0xabu8].to_vec(), decode_hex("AB"));
assert_eq!([0xfau8, 0x19].to_vec(), decode_hex("fa 19"));
}
#[test]
fn test_encode_hex() {
assert_eq!("".to_string(), encode_hex(&[]));
assert_eq!("00".to_string(), encode_hex(&[0x00]));
assert_eq!("ab".to_string(), encode_hex(&[0xab]));
assert_eq!("01 a2 1a fe".to_string(), encode_hex(&[0x01, 0xa2, 0x1a, 0xfe]));
}
}
Replace deprecated function
// hex encoder and decoder used by rust-protobuf unittests
#![allow(dead_code)]
use std::char;
fn decode_hex_digit(digit: char) -> u8 {
match digit {
'0' ... '9' => digit as u8 - '0' as u8,
'a' ... 'f' => digit as u8 - 'a' as u8 + 10,
'A' ... 'F' => digit as u8 - 'A' as u8 + 10,
_ => panic!(),
}
}
pub fn decode_hex(hex: &str) -> Vec<u8> {
let mut r: Vec<u8> = Vec::new();
let mut pos = 0;
loop {
while pos < hex.chars().count() && hex.char_at(pos) == ' ' {
pos += 1;
}
if hex.chars().count() - pos >= 2 {
r.push((decode_hex_digit(hex.char_at(pos)) << 4) | decode_hex_digit(hex.char_at(pos + 1)));
pos += 2;
continue;
}
if pos == hex.chars().count() {
break;
}
panic!("pos = {}d", pos);
}
r
}
fn encode_hex_digit(digit: u8) -> char {
match char::from_digit(digit as uint, 16) {
Some(c) => c,
_ => panic!()
}
}
fn encode_hex_byte(byte: u8) -> [char, ..2] {
[encode_hex_digit(byte >> 4), encode_hex_digit(byte & 0x0Fu8)]
}
pub fn encode_hex(bytes: &[u8]) -> String {
let strs: Vec<String> = bytes.iter().map(|byte| {
encode_hex_byte(*byte).iter().map(|c| *c).collect()
}).collect();
strs.connect(" ")
}
#[cfg(test)]
mod test {
use super::decode_hex;
use super::encode_hex;
#[test]
fn test_decode_hex() {
assert_eq!([].to_vec(), decode_hex(""));
assert_eq!([0x00u8].to_vec(), decode_hex("00"));
assert_eq!([0xffu8].to_vec(), decode_hex("ff"));
assert_eq!([0xabu8].to_vec(), decode_hex("AB"));
assert_eq!([0xfau8, 0x19].to_vec(), decode_hex("fa 19"));
}
#[test]
fn test_encode_hex() {
assert_eq!("".to_string(), encode_hex(&[]));
assert_eq!("00".to_string(), encode_hex(&[0x00]));
assert_eq!("ab".to_string(), encode_hex(&[0xab]));
assert_eq!("01 a2 1a fe".to_string(), encode_hex(&[0x01, 0xa2, 0x1a, 0xfe]));
}
}
|
// Copyright (C) 2014 The 6502-rs Developers
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the names of the copyright holders nor the names of any
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
use log;
use std;
use address::{AddressDiff};
use instruction;
use instruction::{DecodedInstr};
use memory::Memory;
use registers::{ Registers, Status, StatusArgs };
use registers::{ PS_NEGATIVE, PS_OVERFLOW, PS_ZERO, PS_CARRY };
pub struct Machine {
pub registers: Registers,
pub memory: Memory
}
impl Machine {
pub fn new() -> Machine {
Machine{
registers: Registers::new(),
memory: Memory::new()
}
}
pub fn reset(&mut self) {
*self = Machine::new();
}
pub fn fetch_next_and_decode(&mut self) -> Option<DecodedInstr> {
let x: u8 = self.memory.get_byte(self.registers.program_counter);
match instruction::OPCODES[x as uint] {
Some((instr, am)) => {
let extra_bytes = am.extra_bytes();
let num_bytes = AddressDiff(1) + extra_bytes;
let data_start = self.registers.program_counter
+ AddressDiff(1);
let slice = self.memory.get_slice(data_start, extra_bytes);
let am_out = am.process(self, slice);
// Increment program counter
self.registers.program_counter =
self.registers.program_counter + num_bytes;
Some((instr, am_out))
}
_ => None
}
}
pub fn execute_instruction(&mut self, decoded_instr: DecodedInstr) {
match decoded_instr {
(instruction::ADC, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "add with carry immediate: {}", val);
self.add_with_carry(val as i8);
},
(instruction::ADC, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
log!(log::DEBUG, "add with carry. address: {}. value: {}",
addr, val);
self.add_with_carry(val);
},
(instruction::DEX, instruction::UseImplied) => {
self.dec_x();
}
(instruction::LDA, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "load A immediate: {}", val);
self.load_accumulator(val as i8);
},
(instruction::LDA, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
log!(log::DEBUG, "load A. address: {}. value: {}", addr, val);
self.load_accumulator(val as i8);
},
(instruction::LDX, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "load X immediate: {}", val);
self.load_x_register(val as i8);
},
(instruction::LDX, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
log!(log::DEBUG, "load X. address: {}. value: {}", addr, val);
self.load_x_register(val as i8);
},
(instruction::LDY, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "load Y immediate: {}", val);
self.load_y_register(val as i8);
},
(instruction::LDY, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
log!(log::DEBUG, "load Y. address: {}. value: {}", addr, val);
self.load_y_register(val as i8);
},
(instruction::NOP, _) => {
log!(log::DEBUG, "nop instr");
},
(_, _) => {
log!(log::DEBUG, "attempting to execute unimplemented \
instruction");
},
};
}
pub fn run(&mut self) {
loop {
if let Some(decoded_instr) = self.fetch_next_and_decode() {
self.execute_instruction(decoded_instr);
} else {
break
}
}
}
fn load_register_with_flags(register: &mut i8,
status: &mut Status,
value: i8) {
*register = value;
let is_zero = value == 0;
let is_negative = value < 0;
status.set_with_mask(
PS_ZERO | PS_NEGATIVE,
Status::new(StatusArgs { zero: is_zero,
negative: is_negative,
..StatusArgs::none() } ));
}
pub fn load_x_register(&mut self, value: i8) {
Machine::load_register_with_flags(&mut self.registers.index_x,
&mut self.registers.status,
value);
}
pub fn load_y_register(&mut self, value: i8) {
Machine::load_register_with_flags(&mut self.registers.index_y,
&mut self.registers.status,
value);
}
pub fn load_accumulator(&mut self, value: i8) {
Machine::load_register_with_flags(&mut self.registers.accumulator,
&mut self.registers.status,
value);
}
// TODO akeeton: Implement binary-coded decimal.
pub fn add_with_carry(&mut self, value: i8) {
let a_before: i8 = self.registers.accumulator;
let c_before: i8 = self.registers.status.get_carry();
let a_after: i8 = a_before + c_before + value;
debug_assert_eq!(a_after as u8, a_before as u8 + c_before as u8
+ value as u8);
let did_carry = (a_after as u8) < (a_before as u8);
let did_overflow =
(a_before < 0 && value < 0 && a_after >= 0)
|| (a_before > 0 && value > 0 && a_after <= 0);
let mask = PS_CARRY | PS_OVERFLOW;
self.registers.status.set_with_mask(mask,
Status::new(StatusArgs { carry: did_carry,
overflow: did_overflow,
..StatusArgs::none() } ));
self.load_accumulator(a_after);
log!(log::DEBUG, "accumulator: {}", self.registers.accumulator);
}
pub fn dec_x(&mut self) {
let x_before = self.registers.index_x;
let value = 1i8;
let x_after = x_before - value;
self.registers.index_x = x_before - 1;
let is_negative = x_after < 0;
let is_zero = x_after == 0;
let mask = ps_negative | ps_zero;
self.registers.status.set_with_mask(
mask,
Status::new(StatusArgs {
negative: is_negative,
zero: is_zero,
..StatusArgs::none()
})
);
}
}
impl std::fmt::Show for Machine {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Machine Dump:\n\nAccumulator: {}",
self.registers.accumulator)
}
}
#[test]
fn add_with_carry_test() {
let mut machine = Machine::new();
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-1);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-127);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.registers.status.remove(PS_CARRY);
machine.add_with_carry(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
}
#[test]
fn dec_x_test() {
let mut machine = Machine::new();
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(ps_carry), false);
assert_eq!(machine.registers.status.contains(ps_zero), false);
assert_eq!(machine.registers.status.contains(ps_negative), true);
assert_eq!(machine.registers.status.contains(ps_overflow), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -2);
assert_eq!(machine.registers.status.contains(ps_carry), false);
assert_eq!(machine.registers.status.contains(ps_zero), false);
assert_eq!(machine.registers.status.contains(ps_negative), true);
assert_eq!(machine.registers.status.contains(ps_overflow), false);
machine.load_x_register(5);
machine.dec_x();
assert_eq!(machine.registers.index_x, 4);
assert_eq!(machine.registers.status.contains(ps_carry), false);
assert_eq!(machine.registers.status.contains(ps_zero), false);
assert_eq!(machine.registers.status.contains(ps_negative), false);
assert_eq!(machine.registers.status.contains(ps_overflow), false);
machine.dec_x();
machine.dec_x();
machine.dec_x();
machine.dec_x();
assert_eq!(machine.registers.index_x, 0);
assert_eq!(machine.registers.status.contains(ps_carry), false);
assert_eq!(machine.registers.status.contains(ps_zero), true);
assert_eq!(machine.registers.status.contains(ps_negative), false);
assert_eq!(machine.registers.status.contains(ps_overflow), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(ps_carry), false);
assert_eq!(machine.registers.status.contains(ps_zero), false);
assert_eq!(machine.registers.status.contains(ps_negative), true);
assert_eq!(machine.registers.status.contains(ps_overflow), false);
}
Rebase onto master to pick up compiler errors and use load_x_register to simplify impl of dec_x. Because load_x_register sets the zero and negative status flags already
// Copyright (C) 2014 The 6502-rs Developers
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the names of the copyright holders nor the names of any
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
use log;
use std;
use address::{AddressDiff};
use instruction;
use instruction::{DecodedInstr};
use memory::Memory;
use registers::{ Registers, Status, StatusArgs };
use registers::{ PS_NEGATIVE, PS_OVERFLOW, PS_ZERO, PS_CARRY };
pub struct Machine {
pub registers: Registers,
pub memory: Memory
}
impl Machine {
pub fn new() -> Machine {
Machine{
registers: Registers::new(),
memory: Memory::new()
}
}
pub fn reset(&mut self) {
*self = Machine::new();
}
pub fn fetch_next_and_decode(&mut self) -> Option<DecodedInstr> {
let x: u8 = self.memory.get_byte(self.registers.program_counter);
match instruction::OPCODES[x as uint] {
Some((instr, am)) => {
let extra_bytes = am.extra_bytes();
let num_bytes = AddressDiff(1) + extra_bytes;
let data_start = self.registers.program_counter
+ AddressDiff(1);
let slice = self.memory.get_slice(data_start, extra_bytes);
let am_out = am.process(self, slice);
// Increment program counter
self.registers.program_counter =
self.registers.program_counter + num_bytes;
Some((instr, am_out))
}
_ => None
}
}
pub fn execute_instruction(&mut self, decoded_instr: DecodedInstr) {
match decoded_instr {
(instruction::ADC, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "add with carry immediate: {}", val);
self.add_with_carry(val as i8);
},
(instruction::ADC, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
log!(log::DEBUG, "add with carry. address: {}. value: {}",
addr, val);
self.add_with_carry(val);
},
(instruction::DEX, instruction::UseImplied) => {
self.dec_x();
}
(instruction::LDA, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "load A immediate: {}", val);
self.load_accumulator(val as i8);
},
(instruction::LDA, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
log!(log::DEBUG, "load A. address: {}. value: {}", addr, val);
self.load_accumulator(val as i8);
},
(instruction::LDX, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "load X immediate: {}", val);
self.load_x_register(val as i8);
},
(instruction::LDX, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
log!(log::DEBUG, "load X. address: {}. value: {}", addr, val);
self.load_x_register(val as i8);
},
(instruction::LDY, instruction::UseImmediate(val)) => {
log!(log::DEBUG, "load Y immediate: {}", val);
self.load_y_register(val as i8);
},
(instruction::LDY, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
log!(log::DEBUG, "load Y. address: {}. value: {}", addr, val);
self.load_y_register(val as i8);
},
(instruction::NOP, _) => {
log!(log::DEBUG, "nop instr");
},
(_, _) => {
log!(log::DEBUG, "attempting to execute unimplemented \
instruction");
},
};
}
pub fn run(&mut self) {
loop {
if let Some(decoded_instr) = self.fetch_next_and_decode() {
self.execute_instruction(decoded_instr);
} else {
break
}
}
}
fn load_register_with_flags(register: &mut i8,
status: &mut Status,
value: i8) {
*register = value;
let is_zero = value == 0;
let is_negative = value < 0;
status.set_with_mask(
PS_ZERO | PS_NEGATIVE,
Status::new(StatusArgs { zero: is_zero,
negative: is_negative,
..StatusArgs::none() } ));
}
pub fn load_x_register(&mut self, value: i8) {
Machine::load_register_with_flags(&mut self.registers.index_x,
&mut self.registers.status,
value);
}
pub fn load_y_register(&mut self, value: i8) {
Machine::load_register_with_flags(&mut self.registers.index_y,
&mut self.registers.status,
value);
}
pub fn load_accumulator(&mut self, value: i8) {
Machine::load_register_with_flags(&mut self.registers.accumulator,
&mut self.registers.status,
value);
}
// TODO akeeton: Implement binary-coded decimal.
pub fn add_with_carry(&mut self, value: i8) {
let a_before: i8 = self.registers.accumulator;
let c_before: i8 = self.registers.status.get_carry();
let a_after: i8 = a_before + c_before + value;
debug_assert_eq!(a_after as u8, a_before as u8 + c_before as u8
+ value as u8);
let did_carry = (a_after as u8) < (a_before as u8);
let did_overflow =
(a_before < 0 && value < 0 && a_after >= 0)
|| (a_before > 0 && value > 0 && a_after <= 0);
let mask = PS_CARRY | PS_OVERFLOW;
self.registers.status.set_with_mask(mask,
Status::new(StatusArgs { carry: did_carry,
overflow: did_overflow,
..StatusArgs::none() } ));
self.load_accumulator(a_after);
log!(log::DEBUG, "accumulator: {}", self.registers.accumulator);
}
pub fn dec_x(&mut self) {
let val = self.registers.index_x;
self.load_x_register(val - 1);
}
}
impl std::fmt::Show for Machine {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Machine Dump:\n\nAccumulator: {}",
self.registers.accumulator)
}
}
#[test]
fn add_with_carry_test() {
let mut machine = Machine::new();
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-1);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-127);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.registers.status.remove(PS_CARRY);
machine.add_with_carry(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
}
#[test]
fn dec_x_test() {
let mut machine = Machine::new();
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.load_x_register(5);
machine.dec_x();
assert_eq!(machine.registers.index_x, 4);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
machine.dec_x();
machine.dec_x();
machine.dec_x();
assert_eq!(machine.registers.index_x, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
|
use rand::thread_rng;
use std::io;
use std::fmt;
use opcode::{Opcode, OpcodeError, SetRegMode};
const PROGRAM_START: u16 = 0x200;
const MEMORY_SIZE: usize = 4096;
const REGISTER_COUNT: usize = 16;
pub enum RuntimeError {
EmptyCallStack,
InvalidRegister(u8),
AddressOutOfBounds(u16),
OpcodeErr(OpcodeError),
}
pub struct Chip8 {
memory: [u8; MEMORY_SIZE],
regs: [u8; REGISTER_COUNT], // registers V0 - V15
addressReg: u16, // register I
pc: u16,
// Stores thebbkbb vbfcvgbhnjm,k,mjnhbgvfbgnm,.
// program counters of sub routine calls, used to return after a sub routine ends
stack: Vec<u16>,
screen: [[bool; 64]; 32],
}
impl Chip8 {
pub fn new() -> Chip8 {
Chip8 {
memory: [0; 4096],
regs: [0; 16],
addressReg: 0,
pc: PROGRAM_START,
stack: Vec::new(),
screen: [[false; 64]; 32],
}
}
pub fn load_program<R: io::Read>(&mut self, mut program: R) -> io::Result<()> {
let mut bytes = Vec::new();
try!(program.read_to_end(&mut bytes));
for (offset, byte) in bytes.iter().enumerate() {
self.memory[offset + PROGRAM_START as usize] = *byte;
}
Ok(())
}
pub fn cycle(&mut self) -> Result<(), RuntimeError> {
use self::RuntimeError::*;
let pc_index = self.pc as usize;
let opcode_bytes = (self.memory[pc_index] as u16) << 8 | (self.memory[pc_index + 1] as u16);
let opcode = match Opcode::from_u16(opcode_bytes) {
Ok(opcode) => opcode,
Err(err) => return Err(OpcodeErr(err)),
};
println!("{:?}", opcode);
try!(self.execute_opcode(opcode));
Ok(())
}
pub fn clear_screen(&mut self) {
for row in self.screen.iter_mut() {
for col in row.iter_mut() {
*col = false;
}
}
}
// Wrapping is performed in this function, no need to perform it outside
pub fn toggle_pixel(x: usize, y: usize) {
let x = x % 64;
let y = y % 32;
}
pub fn execute_opcode(&mut self, opcode: Opcode) -> Result<(), RuntimeError> {
use self::RuntimeError::*;
use opcode::Opcode::*;
match opcode {
ClearScreen => self.clear_screen(),
Return => {
self.pc = match self.stack.pop() {
Some(addr) => addr,
None => return Err(EmptyCallStack),
};
},
JumpTo { addr, plus_v0 } => {
self.pc = addr;
if plus_v0 {
self.pc += self.regs[0] as u16;
}
},
Call(addr) => {
self.stack.push(self.pc);
self.pc = addr;
},
SkipIfRegEqualConst { not_equal, reg, value } => {
let mut should_jump = self.regs[reg as usize] == value;
if not_equal {
should_jump = !should_jump; // Effectively computes self.regs[reg] != value
}
if should_jump {
self.pc += 4;
return Ok(());
}
},
SkipIfRegsEqual { not_equal, regs: (v_x, v_y) } => {
let mut should_jump = self.regs[v_x as usize] == self.regs[v_y as usize];
if not_equal {
should_jump = !should_jump;
}
if should_jump {
self.pc += 4;
return Ok(());
}
},
SetRegToConst { add, reg, value } => {
if add {
self.regs[reg as usize] += value;
} else {
self.regs[reg as usize] = value;
}
},
SetRegToReg { regs: (v_x, v_y), mode } => {
let v_x = v_x as usize;
let v_y = v_y as usize;
match mode {
SetRegMode::Copy => self.regs[v_x] = self.regs[v_y],
SetRegMode::Or => self.regs[v_x] |= self.regs[v_y],
SetRegMode::And => self.regs[v_x] &= self.regs[v_y],
SetRegMode::Xor => self.regs[v_x] ^= self.regs[v_y],
SetRegMode::Add => {
let mut reg_value = self.regs[v_x] as usize + self.regs[v_y] as usize;
if reg_value > 255 {
reg_value -= 255;
self.regs[0xF] = 1;
}
self.regs[v_x] = reg_value as u8;
},
SetRegMode::Subtract | SetRegMode::InverseSubtract => {
let mut reg_value = if mode == SetRegMode::Subtract {
self.regs[v_x] as isize - self.regs[v_y] as isize
} else { // Must be InverseSubtract
self.regs[v_y] as isize - self.regs[v_x] as isize
};
if reg_value < 0 {
reg_value += 255;
self.regs[0xF] = 1;
}
self.regs[v_x] = reg_value as u8;
},
// v_y is ignored for the shift opcodes, not sure why
SetRegMode::ShiftLeft => {
self.regs[0xF] = self.regs[v_x] & 0xF0 >> 4;
self.regs[v_x] <<= 1;
},
SetRegMode::ShiftRight => {
self.regs[0xF] = self.regs[v_x] & 0x0F;
self.regs[v_x] >>= 1;
}
}
},
_ => unreachable!(),
}
self.pc += 2;
Ok(())
}
}
impl fmt::Debug for Chip8 {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(fmt, "Program Counter: 0x{:X}", self.pc));
try!(writeln!(fmt, "Address Register: 0x{:X}", self.addressReg));
try!(writeln!(fmt, "Stack: {:?}", self.stack));
write!(fmt, "Register Contents: {:?}", self.regs)
}
}
Remove print
use rand::thread_rng;
use std::io;
use std::fmt;
use opcode::{Opcode, OpcodeError, SetRegMode};
const PROGRAM_START: u16 = 0x200;
const MEMORY_SIZE: usize = 4096;
const REGISTER_COUNT: usize = 16;
pub enum RuntimeError {
EmptyCallStack,
InvalidRegister(u8),
AddressOutOfBounds(u16),
OpcodeErr(OpcodeError),
}
pub struct Chip8 {
memory: [u8; MEMORY_SIZE],
regs: [u8; REGISTER_COUNT], // registers V0 - V15
addressReg: u16, // register I
pc: u16,
// Stores thebbkbb vbfcvgbhnjm,k,mjnhbgvfbgnm,.
// program counters of sub routine calls, used to return after a sub routine ends
stack: Vec<u16>,
screen: [[bool; 64]; 32],
}
impl Chip8 {
pub fn new() -> Chip8 {
Chip8 {
memory: [0; 4096],
regs: [0; 16],
addressReg: 0,
pc: PROGRAM_START,
stack: Vec::new(),
screen: [[false; 64]; 32],
}
}
pub fn load_program<R: io::Read>(&mut self, mut program: R) -> io::Result<()> {
let mut bytes = Vec::new();
try!(program.read_to_end(&mut bytes));
for (offset, byte) in bytes.iter().enumerate() {
self.memory[offset + PROGRAM_START as usize] = *byte;
}
Ok(())
}
pub fn cycle(&mut self) -> Result<(), RuntimeError> {
use self::RuntimeError::*;
let pc_index = self.pc as usize;
let opcode_bytes = (self.memory[pc_index] as u16) << 8 | (self.memory[pc_index + 1] as u16);
let opcode = match Opcode::from_u16(opcode_bytes) {
Ok(opcode) => opcode,
Err(err) => return Err(OpcodeErr(err)),
};
try!(self.execute_opcode(opcode));
Ok(())
}
pub fn clear_screen(&mut self) {
for row in self.screen.iter_mut() {
for col in row.iter_mut() {
*col = false;
}
}
}
// Wrapping is performed in this function, no need to perform it outside
pub fn toggle_pixel(x: usize, y: usize) {
let x = x % 64;
let y = y % 32;
}
pub fn execute_opcode(&mut self, opcode: Opcode) -> Result<(), RuntimeError> {
use self::RuntimeError::*;
use opcode::Opcode::*;
match opcode {
ClearScreen => self.clear_screen(),
Return => {
self.pc = match self.stack.pop() {
Some(addr) => addr,
None => return Err(EmptyCallStack),
};
},
JumpTo { addr, plus_v0 } => {
self.pc = addr;
if plus_v0 {
self.pc += self.regs[0] as u16;
}
},
Call(addr) => {
self.stack.push(self.pc);
self.pc = addr;
},
SkipIfRegEqualConst { not_equal, reg, value } => {
let mut should_jump = self.regs[reg as usize] == value;
if not_equal {
should_jump = !should_jump; // Effectively computes self.regs[reg] != value
}
if should_jump {
self.pc += 4;
return Ok(());
}
},
SkipIfRegsEqual { not_equal, regs: (v_x, v_y) } => {
let mut should_jump = self.regs[v_x as usize] == self.regs[v_y as usize];
if not_equal {
should_jump = !should_jump;
}
if should_jump {
self.pc += 4;
return Ok(());
}
},
SetRegToConst { add, reg, value } => {
if add {
self.regs[reg as usize] += value;
} else {
self.regs[reg as usize] = value;
}
},
SetRegToReg { regs: (v_x, v_y), mode } => {
let v_x = v_x as usize;
let v_y = v_y as usize;
match mode {
SetRegMode::Copy => self.regs[v_x] = self.regs[v_y],
SetRegMode::Or => self.regs[v_x] |= self.regs[v_y],
SetRegMode::And => self.regs[v_x] &= self.regs[v_y],
SetRegMode::Xor => self.regs[v_x] ^= self.regs[v_y],
SetRegMode::Add => {
let mut reg_value = self.regs[v_x] as usize + self.regs[v_y] as usize;
if reg_value > 255 {
reg_value -= 255;
self.regs[0xF] = 1;
}
self.regs[v_x] = reg_value as u8;
},
SetRegMode::Subtract | SetRegMode::InverseSubtract => {
let mut reg_value = if mode == SetRegMode::Subtract {
self.regs[v_x] as isize - self.regs[v_y] as isize
} else { // Must be InverseSubtract
self.regs[v_y] as isize - self.regs[v_x] as isize
};
if reg_value < 0 {
reg_value += 255;
self.regs[0xF] = 1;
}
self.regs[v_x] = reg_value as u8;
},
// v_y is ignored for the shift opcodes, not sure why
SetRegMode::ShiftLeft => {
self.regs[0xF] = self.regs[v_x] & 0xF0 >> 4;
self.regs[v_x] <<= 1;
},
SetRegMode::ShiftRight => {
self.regs[0xF] = self.regs[v_x] & 0x0F;
self.regs[v_x] >>= 1;
}
}
},
_ => unreachable!(),
}
self.pc += 2;
Ok(())
}
}
impl fmt::Debug for Chip8 {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(fmt, "Program Counter: 0x{:X}", self.pc));
try!(writeln!(fmt, "Address Register: 0x{:X}", self.addressReg));
try!(writeln!(fmt, "Stack: {:?}", self.stack));
write!(fmt, "Register Contents: {:?}", self.regs)
}
}
|
//! Global machine state as well as implementation of the interpreter engine
//! `Machine` trait.
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::num::NonZeroU64;
use std::rc::Rc;
use rand::rngs::StdRng;
use rustc::mir;
use rustc::ty::{
self,
layout::{LayoutOf, Size},
Ty,
};
use rustc_ast::attr;
use rustc_span::{source_map::Span, symbol::sym};
use crate::*;
// Some global facts about the emulated machine.
pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture
pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations
pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever
pub const NUM_CPUS: u64 = 1;
/// Extra data stored with each stack frame
#[derive(Debug)]
pub struct FrameData<'tcx> {
/// Extra data for Stacked Borrows.
pub call_id: stacked_borrows::CallId,
/// If this is Some(), then this is a special "catch unwind" frame (the frame of the closure
/// called by `__rustc_maybe_catch_panic`). When this frame is popped during unwinding a panic,
/// we stop unwinding, use the `CatchUnwindData` to
/// store the panic payload, and continue execution in the parent frame.
pub catch_panic: Option<CatchUnwindData<'tcx>>,
}
/// Extra memory kinds
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MiriMemoryKind {
/// `__rust_alloc` memory.
Rust,
/// `malloc` memory.
C,
/// Windows `HeapAlloc` memory.
WinHeap,
/// Memory for env vars and args, errno, extern statics and other parts of the machine-managed environment.
Machine,
/// Rust statics.
Static,
}
impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind {
#[inline(always)]
fn into(self) -> MemoryKind<MiriMemoryKind> {
MemoryKind::Machine(self)
}
}
/// Extra per-allocation data
#[derive(Debug, Clone)]
pub struct AllocExtra {
/// Stacked Borrows state is only added if it is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
}
/// Extra global memory data
#[derive(Clone, Debug)]
pub struct MemoryExtra {
pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
pub intptrcast: intptrcast::MemoryExtra,
/// Mapping extern static names to their canonical allocation.
pub(crate) extern_statics: HashMap<&'static str, AllocId>,
/// The random number generator used for resolving non-determinism.
/// Needs to be queried by ptr_to_int, hence needs interior mutability.
pub(crate) rng: RefCell<StdRng>,
}
impl MemoryExtra {
pub fn new(rng: StdRng, stacked_borrows: bool, tracked_pointer_tag: Option<PtrId>) -> Self {
let stacked_borrows = if stacked_borrows {
Some(Rc::new(RefCell::new(stacked_borrows::GlobalState::new(tracked_pointer_tag))))
} else {
None
};
MemoryExtra {
stacked_borrows,
intptrcast: Default::default(),
extern_statics: HashMap::default(),
rng: RefCell::new(rng),
}
}
/// Sets up the "extern statics" for this machine.
pub fn init_extern_statics<'mir, 'tcx>(
this: &mut MiriEvalContext<'mir, 'tcx>,
) -> InterpResult<'tcx> {
match this.tcx.sess.target.target.target_os.as_str() {
"linux" => {
// "__cxa_thread_atexit_impl"
// This should be all-zero, pointer-sized.
let layout = this.layout_of(this.tcx.types.usize)?;
let place = this.allocate(layout, MiriMemoryKind::Machine.into());
this.write_scalar(Scalar::from_machine_usize(0, &*this.tcx), place.into())?;
this.memory
.extra
.extern_statics
.insert("__cxa_thread_atexit_impl", place.ptr.assert_ptr().alloc_id)
.unwrap_none();
}
_ => {} // No "extern statics" supported on this platform
}
Ok(())
}
}
/// The machine itself.
pub struct Evaluator<'tcx> {
/// Environment variables set by `setenv`.
/// Miri does not expose env vars from the host to the emulated program.
pub(crate) env_vars: EnvVars,
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
/// These are *pointers* to argc/argv because macOS.
/// We also need the full command line as one string because of Windows.
pub(crate) argc: Option<Scalar<Tag>>,
pub(crate) argv: Option<Scalar<Tag>>,
pub(crate) cmd_line: Option<Scalar<Tag>>,
/// Last OS error location in memory. It is a 32-bit integer.
pub(crate) last_error: Option<MPlaceTy<'tcx, Tag>>,
/// TLS state.
pub(crate) tls: TlsData<'tcx>,
/// If enabled, the `env_vars` field is populated with the host env vars during initialization
/// and random number generation is delegated to the host.
pub(crate) communicate: bool,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
pub(crate) file_handler: FileHandler,
pub(crate) dir_handler: DirHandler,
/// The temporary used for storing the argument of
/// the call to `miri_start_panic` (the panic payload) when unwinding.
pub(crate) panic_payload: Option<ImmTy<'tcx, Tag>>,
}
impl<'tcx> Evaluator<'tcx> {
pub(crate) fn new(communicate: bool, validate: bool) -> Self {
Evaluator {
// `env_vars` could be initialized properly here if `Memory` were available before
// calling this method.
env_vars: EnvVars::default(),
argc: None,
argv: None,
cmd_line: None,
last_error: None,
tls: TlsData::default(),
communicate,
validate,
file_handler: Default::default(),
dir_handler: Default::default(),
panic_payload: None,
}
}
}
/// A rustc InterpCx for Miri.
pub type MiriEvalContext<'mir, 'tcx> = InterpCx<'mir, 'tcx, Evaluator<'tcx>>;
/// A little trait that's useful to be inherited by extension traits.
pub trait MiriEvalContextExt<'mir, 'tcx> {
fn eval_context_ref<'a>(&'a self) -> &'a MiriEvalContext<'mir, 'tcx>;
fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriEvalContext<'mir, 'tcx>;
}
impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {
#[inline(always)]
fn eval_context_ref(&self) -> &MiriEvalContext<'mir, 'tcx> {
self
}
#[inline(always)]
fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> {
self
}
}
/// Machine hook implementations.
impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
type MemoryKinds = MiriMemoryKind;
type FrameExtra = FrameData<'tcx>;
type MemoryExtra = MemoryExtra;
type AllocExtra = AllocExtra;
type PointerTag = Tag;
type ExtraFnVal = Dlsym;
type MemoryMap =
MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Tag, Self::AllocExtra>)>;
const STATIC_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Static);
const CHECK_ALIGN: bool = true;
#[inline(always)]
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.validate
}
#[inline(always)]
fn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
_span: Span,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
ecx.find_mir_or_eval_fn(instance, args, ret, unwind)
}
#[inline(always)]
fn call_extra_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: Dlsym,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
ecx.call_dlsym(fn_val, args, ret)
}
#[inline(always)]
fn call_intrinsic(
ecx: &mut rustc_mir::interpret::InterpCx<'mir, 'tcx, Self>,
span: Span,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
ecx.call_intrinsic(span, instance, args, ret, unwind)
}
#[inline(always)]
fn assert_panic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
span: Span,
msg: &mir::AssertMessage<'tcx>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
ecx.assert_panic(span, msg, unwind)
}
#[inline(always)]
fn binary_ptr_op(
ecx: &rustc_mir::interpret::InterpCx<'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: ImmTy<'tcx, Tag>,
right: ImmTy<'tcx, Tag>,
) -> InterpResult<'tcx, (Scalar<Tag>, bool, Ty<'tcx>)> {
ecx.binary_ptr_op(bin_op, left, right)
}
fn box_alloc(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
dest: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
trace!("box_alloc for {:?}", dest.layout.ty);
let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
// First argument: `size`.
// (`0` is allowed here -- this is expected to be handled by the lang item).
let size = Scalar::from_uint(layout.size.bytes(), ecx.pointer_size());
// Second argument: `align`.
let align = Scalar::from_uint(layout.align.abi.bytes(), ecx.pointer_size());
// Call the `exchange_malloc` lang item.
let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
ecx.call_function(
malloc,
&[size.into(), align.into()],
Some(dest),
// Don't do anything when we are done. The `statement()` function will increment
// the old stack frame's stmt counter to the next statement, which means that when
// `exchange_malloc` returns, we go on evaluating exactly where we want to be.
StackPopCleanup::None { cleanup: true },
)?;
Ok(())
}
fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId {
let tcx = mem.tcx;
// Figure out if this is an extern static, and if yes, which one.
let def_id = match tcx.alloc_map.lock().get(id) {
Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => def_id,
_ => {
// No need to canonicalize anything.
return id;
}
};
let attrs = tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) {
Some(name) => name.as_str(),
None => tcx.item_name(def_id).as_str(),
};
// Check if we know this one.
if let Some(canonical_id) = mem.extra.extern_statics.get(&*link_name) {
trace!("canonical_alloc_id: {:?} ({}) -> {:?}", id, link_name, canonical_id);
*canonical_id
} else {
// Return original id; `Memory::get_static_alloc` will throw an error.
id
}
}
fn init_allocation_extra<'b>(
memory_extra: &MemoryExtra,
id: AllocId,
alloc: Cow<'b, Allocation>,
kind: Option<MemoryKind<Self::MemoryKinds>>,
) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag) {
let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
let alloc = alloc.into_owned();
let (stacks, base_tag) =
if let Some(stacked_borrows) = memory_extra.stacked_borrows.as_ref() {
let (stacks, base_tag) =
Stacks::new_allocation(id, alloc.size, Rc::clone(stacked_borrows), kind);
(Some(stacks), base_tag)
} else {
// No stacks, no tag.
(None, Tag::Untagged)
};
let mut stacked_borrows = memory_extra.stacked_borrows.as_ref().map(|sb| sb.borrow_mut());
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.with_tags_and_extra(
|alloc| {
if let Some(stacked_borrows) = stacked_borrows.as_mut() {
// Only statics may already contain pointers at this point
assert_eq!(kind, MiriMemoryKind::Static.into());
stacked_borrows.static_base_ptr(alloc)
} else {
Tag::Untagged
}
},
AllocExtra { stacked_borrows: stacks },
);
(Cow::Owned(alloc), base_tag)
}
#[inline(always)]
fn tag_static_base_pointer(memory_extra: &MemoryExtra, id: AllocId) -> Self::PointerTag {
if let Some(stacked_borrows) = memory_extra.stacked_borrows.as_ref() {
stacked_borrows.borrow_mut().static_base_ptr(id)
} else {
Tag::Untagged
}
}
#[inline(always)]
fn retag(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
kind: mir::RetagKind,
place: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
if ecx.memory.extra.stacked_borrows.is_none() {
// No tracking.
Ok(())
} else {
ecx.retag(kind, place)
}
}
#[inline(always)]
fn stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx, FrameData<'tcx>> {
let stacked_borrows = ecx.memory.extra.stacked_borrows.as_ref();
let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| {
stacked_borrows.borrow_mut().new_call()
});
Ok(FrameData { call_id, catch_panic: None })
}
#[inline(always)]
fn stack_pop(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
extra: FrameData<'tcx>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
ecx.handle_stack_pop(extra, unwinding)
}
#[inline(always)]
fn int_to_ptr(
memory: &Memory<'mir, 'tcx, Self>,
int: u64,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
intptrcast::GlobalState::int_to_ptr(int, memory)
}
#[inline(always)]
fn ptr_to_int(
memory: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> InterpResult<'tcx, u64> {
intptrcast::GlobalState::ptr_to_int(ptr, memory)
}
}
impl AllocationExtra<Tag> for AllocExtra {
#[inline(always)]
fn memory_read<'tcx>(
alloc: &Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(ref stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
fn memory_written<'tcx>(
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
fn memory_deallocated<'tcx>(
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {
Ok(())
}
}
}
impl MayLeak for MiriMemoryKind {
#[inline(always)]
fn may_leak(self) -> bool {
use self::MiriMemoryKind::*;
match self {
Rust | C | WinHeap => false,
Machine | Static => true,
}
}
}
switch extern_statics map to symbols
//! Global machine state as well as implementation of the interpreter engine
//! `Machine` trait.
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::num::NonZeroU64;
use std::rc::Rc;
use rand::rngs::StdRng;
use rustc::mir;
use rustc::ty::{
self,
layout::{LayoutOf, Size},
Ty,
};
use rustc_ast::attr;
use rustc_span::{source_map::Span, symbol::{sym, Symbol}};
use crate::*;
// Some global facts about the emulated machine.
pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture
pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations
pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever
pub const NUM_CPUS: u64 = 1;
/// Extra data stored with each stack frame
#[derive(Debug)]
pub struct FrameData<'tcx> {
/// Extra data for Stacked Borrows.
pub call_id: stacked_borrows::CallId,
/// If this is Some(), then this is a special "catch unwind" frame (the frame of the closure
/// called by `__rustc_maybe_catch_panic`). When this frame is popped during unwinding a panic,
/// we stop unwinding, use the `CatchUnwindData` to
/// store the panic payload, and continue execution in the parent frame.
pub catch_panic: Option<CatchUnwindData<'tcx>>,
}
/// Extra memory kinds
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MiriMemoryKind {
/// `__rust_alloc` memory.
Rust,
/// `malloc` memory.
C,
/// Windows `HeapAlloc` memory.
WinHeap,
/// Memory for env vars and args, errno, extern statics and other parts of the machine-managed environment.
Machine,
/// Rust statics.
Static,
}
impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind {
#[inline(always)]
fn into(self) -> MemoryKind<MiriMemoryKind> {
MemoryKind::Machine(self)
}
}
/// Extra per-allocation data
#[derive(Debug, Clone)]
pub struct AllocExtra {
/// Stacked Borrows state is only added if it is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
}
/// Extra global memory data
#[derive(Clone, Debug)]
pub struct MemoryExtra {
pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
pub intptrcast: intptrcast::MemoryExtra,
/// Mapping extern static names to their canonical allocation.
pub(crate) extern_statics: HashMap<Symbol, AllocId>,
/// The random number generator used for resolving non-determinism.
/// Needs to be queried by ptr_to_int, hence needs interior mutability.
pub(crate) rng: RefCell<StdRng>,
}
impl MemoryExtra {
pub fn new(rng: StdRng, stacked_borrows: bool, tracked_pointer_tag: Option<PtrId>) -> Self {
let stacked_borrows = if stacked_borrows {
Some(Rc::new(RefCell::new(stacked_borrows::GlobalState::new(tracked_pointer_tag))))
} else {
None
};
MemoryExtra {
stacked_borrows,
intptrcast: Default::default(),
extern_statics: HashMap::default(),
rng: RefCell::new(rng),
}
}
/// Sets up the "extern statics" for this machine.
pub fn init_extern_statics<'mir, 'tcx>(
this: &mut MiriEvalContext<'mir, 'tcx>,
) -> InterpResult<'tcx> {
match this.tcx.sess.target.target.target_os.as_str() {
"linux" => {
// "__cxa_thread_atexit_impl"
// This should be all-zero, pointer-sized.
let layout = this.layout_of(this.tcx.types.usize)?;
let place = this.allocate(layout, MiriMemoryKind::Machine.into());
this.write_scalar(Scalar::from_machine_usize(0, &*this.tcx), place.into())?;
this.memory
.extra
.extern_statics
.insert(Symbol::intern("__cxa_thread_atexit_impl"), place.ptr.assert_ptr().alloc_id)
.unwrap_none();
}
_ => {} // No "extern statics" supported on this platform
}
Ok(())
}
}
/// The machine itself.
pub struct Evaluator<'tcx> {
/// Environment variables set by `setenv`.
/// Miri does not expose env vars from the host to the emulated program.
pub(crate) env_vars: EnvVars,
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
/// These are *pointers* to argc/argv because macOS.
/// We also need the full command line as one string because of Windows.
pub(crate) argc: Option<Scalar<Tag>>,
pub(crate) argv: Option<Scalar<Tag>>,
pub(crate) cmd_line: Option<Scalar<Tag>>,
/// Last OS error location in memory. It is a 32-bit integer.
pub(crate) last_error: Option<MPlaceTy<'tcx, Tag>>,
/// TLS state.
pub(crate) tls: TlsData<'tcx>,
/// If enabled, the `env_vars` field is populated with the host env vars during initialization
/// and random number generation is delegated to the host.
pub(crate) communicate: bool,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
pub(crate) file_handler: FileHandler,
pub(crate) dir_handler: DirHandler,
/// The temporary used for storing the argument of
/// the call to `miri_start_panic` (the panic payload) when unwinding.
pub(crate) panic_payload: Option<ImmTy<'tcx, Tag>>,
}
impl<'tcx> Evaluator<'tcx> {
pub(crate) fn new(communicate: bool, validate: bool) -> Self {
Evaluator {
// `env_vars` could be initialized properly here if `Memory` were available before
// calling this method.
env_vars: EnvVars::default(),
argc: None,
argv: None,
cmd_line: None,
last_error: None,
tls: TlsData::default(),
communicate,
validate,
file_handler: Default::default(),
dir_handler: Default::default(),
panic_payload: None,
}
}
}
/// A rustc InterpCx for Miri.
pub type MiriEvalContext<'mir, 'tcx> = InterpCx<'mir, 'tcx, Evaluator<'tcx>>;
/// A little trait that's useful to be inherited by extension traits.
pub trait MiriEvalContextExt<'mir, 'tcx> {
fn eval_context_ref<'a>(&'a self) -> &'a MiriEvalContext<'mir, 'tcx>;
fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriEvalContext<'mir, 'tcx>;
}
impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {
#[inline(always)]
fn eval_context_ref(&self) -> &MiriEvalContext<'mir, 'tcx> {
self
}
#[inline(always)]
fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> {
self
}
}
/// Machine hook implementations.
impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
type MemoryKinds = MiriMemoryKind;
type FrameExtra = FrameData<'tcx>;
type MemoryExtra = MemoryExtra;
type AllocExtra = AllocExtra;
type PointerTag = Tag;
type ExtraFnVal = Dlsym;
type MemoryMap =
MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Tag, Self::AllocExtra>)>;
const STATIC_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Static);
const CHECK_ALIGN: bool = true;
#[inline(always)]
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.validate
}
#[inline(always)]
fn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
_span: Span,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
ecx.find_mir_or_eval_fn(instance, args, ret, unwind)
}
#[inline(always)]
fn call_extra_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: Dlsym,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
ecx.call_dlsym(fn_val, args, ret)
}
#[inline(always)]
fn call_intrinsic(
ecx: &mut rustc_mir::interpret::InterpCx<'mir, 'tcx, Self>,
span: Span,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
ecx.call_intrinsic(span, instance, args, ret, unwind)
}
#[inline(always)]
fn assert_panic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
span: Span,
msg: &mir::AssertMessage<'tcx>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
ecx.assert_panic(span, msg, unwind)
}
#[inline(always)]
fn binary_ptr_op(
ecx: &rustc_mir::interpret::InterpCx<'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: ImmTy<'tcx, Tag>,
right: ImmTy<'tcx, Tag>,
) -> InterpResult<'tcx, (Scalar<Tag>, bool, Ty<'tcx>)> {
ecx.binary_ptr_op(bin_op, left, right)
}
fn box_alloc(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
dest: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
trace!("box_alloc for {:?}", dest.layout.ty);
let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
// First argument: `size`.
// (`0` is allowed here -- this is expected to be handled by the lang item).
let size = Scalar::from_uint(layout.size.bytes(), ecx.pointer_size());
// Second argument: `align`.
let align = Scalar::from_uint(layout.align.abi.bytes(), ecx.pointer_size());
// Call the `exchange_malloc` lang item.
let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
ecx.call_function(
malloc,
&[size.into(), align.into()],
Some(dest),
// Don't do anything when we are done. The `statement()` function will increment
// the old stack frame's stmt counter to the next statement, which means that when
// `exchange_malloc` returns, we go on evaluating exactly where we want to be.
StackPopCleanup::None { cleanup: true },
)?;
Ok(())
}
fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId {
let tcx = mem.tcx;
// Figure out if this is an extern static, and if yes, which one.
let def_id = match tcx.alloc_map.lock().get(id) {
Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => def_id,
_ => {
// No need to canonicalize anything.
return id;
}
};
let attrs = tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) {
Some(name) => name,
None => tcx.item_name(def_id),
};
// Check if we know this one.
if let Some(canonical_id) = mem.extra.extern_statics.get(&link_name) {
trace!("canonical_alloc_id: {:?} ({}) -> {:?}", id, link_name, canonical_id);
*canonical_id
} else {
// Return original id; `Memory::get_static_alloc` will throw an error.
id
}
}
fn init_allocation_extra<'b>(
memory_extra: &MemoryExtra,
id: AllocId,
alloc: Cow<'b, Allocation>,
kind: Option<MemoryKind<Self::MemoryKinds>>,
) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag) {
let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
let alloc = alloc.into_owned();
let (stacks, base_tag) =
if let Some(stacked_borrows) = memory_extra.stacked_borrows.as_ref() {
let (stacks, base_tag) =
Stacks::new_allocation(id, alloc.size, Rc::clone(stacked_borrows), kind);
(Some(stacks), base_tag)
} else {
// No stacks, no tag.
(None, Tag::Untagged)
};
let mut stacked_borrows = memory_extra.stacked_borrows.as_ref().map(|sb| sb.borrow_mut());
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.with_tags_and_extra(
|alloc| {
if let Some(stacked_borrows) = stacked_borrows.as_mut() {
// Only statics may already contain pointers at this point
assert_eq!(kind, MiriMemoryKind::Static.into());
stacked_borrows.static_base_ptr(alloc)
} else {
Tag::Untagged
}
},
AllocExtra { stacked_borrows: stacks },
);
(Cow::Owned(alloc), base_tag)
}
#[inline(always)]
fn tag_static_base_pointer(memory_extra: &MemoryExtra, id: AllocId) -> Self::PointerTag {
if let Some(stacked_borrows) = memory_extra.stacked_borrows.as_ref() {
stacked_borrows.borrow_mut().static_base_ptr(id)
} else {
Tag::Untagged
}
}
#[inline(always)]
fn retag(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
kind: mir::RetagKind,
place: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
if ecx.memory.extra.stacked_borrows.is_none() {
// No tracking.
Ok(())
} else {
ecx.retag(kind, place)
}
}
#[inline(always)]
fn stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx, FrameData<'tcx>> {
let stacked_borrows = ecx.memory.extra.stacked_borrows.as_ref();
let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| {
stacked_borrows.borrow_mut().new_call()
});
Ok(FrameData { call_id, catch_panic: None })
}
#[inline(always)]
fn stack_pop(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
extra: FrameData<'tcx>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
ecx.handle_stack_pop(extra, unwinding)
}
#[inline(always)]
fn int_to_ptr(
memory: &Memory<'mir, 'tcx, Self>,
int: u64,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
intptrcast::GlobalState::int_to_ptr(int, memory)
}
#[inline(always)]
fn ptr_to_int(
memory: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> InterpResult<'tcx, u64> {
intptrcast::GlobalState::ptr_to_int(ptr, memory)
}
}
impl AllocationExtra<Tag> for AllocExtra {
#[inline(always)]
fn memory_read<'tcx>(
alloc: &Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(ref stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
fn memory_written<'tcx>(
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
fn memory_deallocated<'tcx>(
alloc: &mut Allocation<Tag, AllocExtra>,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {
Ok(())
}
}
}
impl MayLeak for MiriMemoryKind {
#[inline(always)]
fn may_leak(self) -> bool {
use self::MiriMemoryKind::*;
match self {
Rust | C | WinHeap => false,
Machine | Static => true,
}
}
}
|
// Copyright (c) 2016 Fedor Gogolev <knsd@knsd.net>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io::{Read, BufRead, Error as IoError, ErrorKind};
use std::string::{FromUtf8Error};
use std::collections::{HashMap};
use std::cell::{RefCell};
use std::rc::{Rc};
use num::{Zero};
use num::bigint::{BigInt, ToBigInt, Sign};
use byteorder::{ReadBytesExt, LittleEndian, BigEndian, Error as ByteorderError};
use from_ascii::{FromAscii, ParseIntError, ParseFloatError};
use string::{unescape, Error as UnescapeError};
use value::{Value};
use opcodes::*;
quick_error! {
#[derive(Debug)]
pub enum Error {
EmptyMarker
StackTooSmall
EmptyStack
InvalidValueOnStack
InvalidGetValue
InvalidPutValue
Read(err: ByteorderError) {
from()
}
Io(err: IoError) {
from()
}
UnknownOpcode(opcode: u8) {}
InvalidInt {
from(ParseIntError)
}
InvalidLong
InvalidFloat {
from(ParseFloatError)
}
InvalidString
UnicodeError {
from(FromUtf8Error)
}
UnescapeError(err: UnescapeError) {
from()
}
InvalidProto(proto: u8)
NegativeLength {}
#[doc(hidden)]
__Nonexhaustive
}
}
#[derive(Debug, PartialEq)]
pub enum BooleanOrInt {
Boolean(bool),
Int(i64),
}
macro_rules! rc {
($term: expr) => (Rc::new(RefCell::new($term)))
}
fn read_exact<R>(rd: &mut R, mut buf: &mut [u8]) -> Result<(), IoError> where R: Read {
while !buf.is_empty() {
match rd.read(buf) {
Ok(0) => break,
Ok(n) => { let tmp = buf; buf = &mut tmp[n..]; }
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
if !buf.is_empty() {
Err(IoError::new(ErrorKind::Other,
"failed to fill whole buffer"))
} else {
Ok(())
}
}
fn read_until_newline<R>(rd: &mut R) -> Result<Vec<u8>, Error> where R: Read + BufRead {
let mut buf = Vec::new();
try!(rd.read_until('\n' as u8, &mut buf));
// Skip last symbol — \n
match buf.split_last() {
Some((&b'\n', init)) => Ok(init.to_vec()),
_ => Err(Error::InvalidString),
}
}
fn read_decimal_int<R>(rd: &mut R) -> Result<BooleanOrInt, Error> where R: Read + BufRead {
let s = try!(read_until_newline(rd));
let val = match &s[..] {
b"00" => BooleanOrInt::Boolean(false),
b"01" => BooleanOrInt::Boolean(true),
_ => BooleanOrInt::Int(try!(i64::from_ascii(&s)))
};
Ok(val)
}
fn read_decimal_long<R>(rd: &mut R) -> Result<BigInt, Error> where R: Read + BufRead {
let s = try!(read_until_newline(rd));
let init = match s.split_last() {
None => return Err(Error::InvalidLong),
Some((&b'L', init)) => init,
Some(_) => &s[..],
};
match BigInt::parse_bytes(&init, 10) {
Some(i) => Ok(i),
None => Err(Error::InvalidLong)
}
}
fn read_long<R>(rd: &mut R, length: usize) -> Result<BigInt, Error> where R: Read + BufRead {
let mut buf = vec![0; length];
try!(read_exact(rd, buf.as_mut()));
let mut n = BigInt::from_bytes_le(Sign::Plus, &buf);
let last = match buf.last_mut() {
None => return Err(Error::InvalidLong),
Some(last) => last,
};
if *last > 127 {
n = n - (1.to_bigint().unwrap() << (length * 8)) // TODO: remove unwrap()
}
Ok(n)
}
fn read_bracketed_string<R>(rd: &mut R) -> Result<Vec<u8>, Error> where R: Read + BufRead {
let s = try!(read_until_newline(rd));
// Skip last and first symbols — '
if s.len() < 2 {
return Err(Error::InvalidString)
}
Ok(try!(unescape(&s[1..s.len() - 1], false)))
}
pub struct Machine {
stack: Vec<Value>,
memo: HashMap<usize, Value>,
marker: Option<usize>,
}
impl Machine {
pub fn new() -> Self {
Machine {
stack: Vec::new(),
memo: HashMap::new(),
marker: None,
}
}
fn split_off(&mut self) -> Result<Vec<Value>, Error> {
let at = match self.marker {
None => return Err(Error::EmptyMarker),
Some(marker) => marker,
};
if at > self.stack.len() {
return Err(Error::StackTooSmall);
}
Ok(self.stack.split_off(at))
}
fn pop(&mut self) -> Result<Value, Error> {
match self.stack.pop() {
None => Err(Error::EmptyStack),
Some(value) => Ok(value),
}
}
fn handle_get(&mut self, i: usize) -> Result<(), Error> {
let value = match self.memo.get(&i) {
None => return Err(Error::InvalidGetValue),
Some(ref v) => (*v).clone(),
};
self.stack.push(value);
Ok(())
}
fn handle_put(&mut self, i: usize) -> Result<(), Error> {
let value = match self.stack.last() {
None => return Err(Error::EmptyStack),
Some(ref v) => (*v).clone(),
};
self.memo.insert(i, value);
Ok(())
}
pub fn execute<R>(&mut self, rd: &mut R) -> Result<bool, Error> where R: Read + BufRead {
macro_rules! ensure_not_negative {
($n: expr) => ({
if $n < Zero::zero() {
return Err(Error::NegativeLength)
}
})
}
match try!(rd.read_u8()) {
PROTO => {
let version = try!(rd.read_u8());
if version < 2 || version > 2 {
return Err(Error::InvalidProto(version))
}
},
STOP => return Ok(true),
INT => {
self.stack.push(match try!(read_decimal_int(rd)) {
BooleanOrInt::Boolean(v) => Value::Bool(v),
BooleanOrInt::Int(v) => Value::Long(BigInt::from(v)), // FIXME: or int?
})
},
BININT => self.stack.push(Value::Int(try!(rd.read_i32::<LittleEndian>()) as isize)),
BININT1 => self.stack.push(Value::Int(try!(rd.read_u8()) as isize)),
BININT2 => self.stack.push(Value::Int(try!(rd.read_u16::<LittleEndian>()) as isize)),
LONG => self.stack.push(Value::Long(BigInt::from(try!(read_decimal_long(rd))))),
LONG1 => {
let length = try!(rd.read_u8());
self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize)))))
}
LONG4 => {
let length = try!(rd.read_i32::<LittleEndian>());
self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize)))))
}
STRING => self.stack.push(Value::String(try!(read_bracketed_string(rd)))),
BINSTRING => {
let length = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(length);
let mut buf = vec![0; length as usize];
try!(read_exact(rd, &mut buf));
self.stack.push(Value::String(buf))
},
SHORT_BINSTRING => {
let length = try!(rd.read_u8());
let mut buf = vec![0; length as usize];
try!(read_exact(rd, &mut buf));
self.stack.push(Value::String(buf))
},
NONE => self.stack.push(Value::None),
NEWTRUE => self.stack.push(Value::Bool(true)),
NEWFALSE => self.stack.push(Value::Bool(false)),
UNICODE => {
let buf = try!(unescape(&try!(read_until_newline(rd)), true));
self.stack.push(Value::Unicode(try!(String::from_utf8(buf))))
},
BINUNICODE => {
let length = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(length);
let mut buf = vec![0; length as usize];
try!(read_exact(rd, buf.as_mut()));
self.stack.push(Value::Unicode(try!(String::from_utf8(buf))))
},
FLOAT => {
let s = try!(read_until_newline(rd));
self.stack.push(Value::Float(try!(f64::from_ascii(&s))))
},
BINFLOAT => {
self.stack.push(Value::Float(try!(rd.read_f64::<BigEndian>())))
},
EMPTY_LIST => {
self.stack.push(Value::List(rc!(Vec::new())))
},
APPEND => {
let v = try!(self.pop());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).push(v),
_ => return Err(Error::InvalidValueOnStack),
}
},
APPENDS => {
let values = try!(self.split_off());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).extend(values),
_ => return Err(Error::InvalidValueOnStack),
}
},
LIST => {
let values = try!(self.split_off());
self.stack.push(Value::List(rc!(values)));
},
EMPTY_TUPLE => self.stack.push(Value::Tuple(rc!(Vec::new()))),
TUPLE => {
let values = try!(self.split_off());
self.stack.push(Value::Tuple(rc!(values)));
},
TUPLE1 => {
let v1 = try!(self.pop());
self.stack.push(Value::Tuple(rc!(vec![v1])))
},
TUPLE2 => {
let v1 = try!(self.pop());
let v2 = try!(self.pop());
self.stack.push(Value::Tuple(rc!(vec![v1, v2])))
},
TUPLE3 => {
let v1 = try!(self.pop());
let v2 = try!(self.pop());
let v3 = try!(self.pop());
self.stack.push(Value::Tuple(rc!(vec![v1, v2, v3])))
}
EMPTY_DICT => self.stack.push(Value::Dict(rc!(Vec::new()))),
DICT => {
let mut values = try!(self.split_off());
let mut dict = Vec::new();
for i in 0 .. values.len() / 2 { // TODO: Check panic
let key = values.remove(2 * i);
let value = values.remove(2 * i + 1);
dict.push((key, value));
}
self.stack.push(Value::Dict(rc!(dict)));
},
SETITEM => {
let value = try!(self.pop());
let key = try!(self.pop());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::Dict(ref mut dict)) => (*dict.borrow_mut()).push((key, value)),
_ => return Err(Error::InvalidValueOnStack),
}
},
SETITEMS => {
let mut values = try!(self.split_off());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::Dict(ref mut dict_ref)) => {
for i in 0 .. values.len() / 2 { // TODO: Check panic
let key = values.remove(2 * i);
let value = values.remove(2 * i + 1);
(*dict_ref.borrow_mut()).push((key, value));
}
},
_ => return Err(Error::InvalidValueOnStack),
}
},
POP => {
try!(self.pop());
},
DUP => {
let value = match self.stack.last() {
None => return Err(Error::EmptyStack),
Some(ref v) => (*v).clone(),
};
self.stack.push(value)
},
MARK => {
self.marker = Some(self.stack.len())
},
POP_MARK => {
try!(self.split_off());
},
GET => {
let n = match try!(read_decimal_int(rd)) {
BooleanOrInt::Int(n) => n,
BooleanOrInt::Boolean(false) => 0,
BooleanOrInt::Boolean(true) => 1,
};
ensure_not_negative!(n);
try!(self.handle_get(n as usize))
}
BINGET => {
try!(self.handle_get(try!(rd.read_u8()) as usize))
}
LONG_BINGET => {
let n = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(n);
try!(self.handle_get(n as usize))
}
PUT => {
let n = match try!(read_decimal_int(rd)) {
BooleanOrInt::Int(n) => n,
BooleanOrInt::Boolean(false) => 0,
BooleanOrInt::Boolean(true) => 1,
};
ensure_not_negative!(n);
try!(self.handle_put(n as usize))
}
BINPUT => {
try!(self.handle_put(try!(rd.read_u8()) as usize))
}
LONG_BINPUT => {
let n = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(n);
try!(self.handle_put(n as usize))
}
c => return Err(Error::UnknownOpcode(c)),
}
Ok(false)
}
}
pub fn unpickle<R>(rd: &mut R) -> Result<Value, Error> where R: Read + BufRead {
let mut machine = Machine::new();
loop {
if try!(machine.execute(rd)) {
break
}
}
Ok(try!(machine.pop()))
}
#[cfg(test)]
mod tests {
use std::io::{Cursor};
use num::{FromPrimitive};
use super::{Error, unpickle};
use super::super::value::{Value};
macro_rules! t {
($buffer: expr, $pat:pat, $result:expr) => ({
match unpickle(&mut Cursor::new(&$buffer[..])) {
Ok($pat) => $result,
other => {
println!("ERROR {:?}", other);
assert!(false)
},
}
})
}
macro_rules! e {
($buffer: expr, $pat:pat) => ({
match unpickle(&mut Cursor::new(&$buffer[..])) {
Err($pat) => (),
other => {
println!("ERROR {:?}", other);
assert!(false)
},
}
})
}
macro_rules! n {
($x: expr) => ({FromPrimitive::from_isize($x).unwrap()})
}
#[test]
fn test_int() {
t!(b"I1\n.", Value::Long(n), assert_eq!(n, n!(1)));
t!(b"K\x01.", Value::Int(n), assert_eq!(n, 1));
t!(b"\x80\x02K\x01.", Value::Int(n), assert_eq!(n, 1));
}
#[test]
fn test_const() {
t!(b"I00\n.", Value::Bool(false), ());
t!(b"I01\n.", Value::Bool(true), ());
t!(b"\x80\x02\x89.", Value::Bool(false), ());
t!(b"\x80\x02\x88.", Value::Bool(true), ());
t!(b"N.", Value::None, ());
t!(b"\x80\x02N.", Value::None, ());
}
#[test]
fn test_string() {
t!(b"S''\np1\n.", Value::String(s), assert_eq!(s, b""));
t!(b"S'foo'\np1\n.", Value::String(s), assert_eq!(s, b"foo"));
t!(b"U\x03fooq\x01.", Value::String(s), assert_eq!(s, b"foo"));
t!(b"\x80\x02U\x03fooq\x01.", Value::String(s), assert_eq!(s, b"foo"));
t!(b"S'\\n'\np1\n.", Value::String(s), assert_eq!(s, b"\n"));
}
#[test]
fn test_unicode() {
t!(b"Vfoo\np1\n.", Value::Unicode(s), assert_eq!(s, "foo"));
t!(b"X\x03\x00\x00\x00fooq\x01.", Value::Unicode(s), assert_eq!(s, "foo"));
t!(b"\x80\x02X\x03\x00\x00\x00fooq\x01.", Value::Unicode(s), assert_eq!(s, "foo"));
}
// Errors
#[test]
fn test_unknown_opcode() {
e!(b"\xff", Error::UnknownOpcode(255))
}
#[test]
fn test_invalid_int() {
e!(b"I1000000000000000000000000000000\n.", Error::InvalidInt); // TODO: Should be long?
// Int
e!(b"I\n.", Error::InvalidInt);
e!(b"I0.0\n.", Error::InvalidInt);
e!(b"I0.1\n.", Error::InvalidInt);
e!(b"Ia\n.", Error::InvalidInt);
e!(b"I\n\n.", Error::InvalidInt);
// Get
e!(b"g\n.", Error::InvalidInt);
e!(b"g0.0\n.", Error::InvalidInt);
e!(b"g0.1\n.", Error::InvalidInt);
e!(b"ga\n.", Error::InvalidInt);
e!(b"g\n\n.", Error::InvalidInt);
// Put
e!(b"p\n.", Error::InvalidInt);
e!(b"p0.0\n.", Error::InvalidInt);
e!(b"p0.1\n.", Error::InvalidInt);
e!(b"pa\n.", Error::InvalidInt);
e!(b"p\n\n.", Error::InvalidInt);
}
#[test]
fn test_invalid_long() {
// LONG
e!(b"L\n.", Error::InvalidLong);
e!(b"L0.0\n.", Error::InvalidLong);
e!(b"L0.1\n.", Error::InvalidLong);
e!(b"La\n.", Error::InvalidLong);
e!(b"L\n\n.", Error::InvalidLong);
// LONG1
e!(b"\x8a\x00.", Error::InvalidLong);
// LONG4
e!(b"\x8b\x00\x00\x00\x00.", Error::InvalidLong);
}
#[test]
fn test_invalid_float() {
e!(b"F\n.", Error::InvalidFloat);
e!(b"Ffoo\n.", Error::InvalidFloat);
e!(b"F1.O\n.", Error::InvalidFloat);
}
#[test]
fn test_invalid_string() {
// STRING
e!("S", Error::InvalidString);
e!("S'\n", Error::InvalidString);
// UNICODE
e!("V", Error::InvalidString);
// INT
e!(b"I", Error::InvalidString);
// LONG
e!(b"L", Error::InvalidString);
// FLOAT
e!(b"F", Error::InvalidString);
}
#[test]
fn test_unicode_error() {
// UNICODE
e!(b"V\xe2\x28\xa1\n", Error::UnicodeError);
// BINUNICODE
e!(b"X\x03\x00\x00\x00\xe2\x28\xa1", Error::UnicodeError);
}
#[test]
fn test_invalid_proto() {
e!(b"\x80\x00", Error::InvalidProto(0));
e!(b"\x80\x01", Error::InvalidProto(1));
e!(b"\x80\x64", Error::InvalidProto(100));
}
}
Use stable read_exact function
// Copyright (c) 2016 Fedor Gogolev <knsd@knsd.net>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io::{Read, BufRead, Error as IoError};
use std::string::{FromUtf8Error};
use std::collections::{HashMap};
use std::cell::{RefCell};
use std::rc::{Rc};
use num::{Zero};
use num::bigint::{BigInt, ToBigInt, Sign};
use byteorder::{ReadBytesExt, LittleEndian, BigEndian, Error as ByteorderError};
use from_ascii::{FromAscii, ParseIntError, ParseFloatError};
use string::{unescape, Error as UnescapeError};
use value::{Value};
use opcodes::*;
quick_error! {
#[derive(Debug)]
pub enum Error {
EmptyMarker
StackTooSmall
EmptyStack
InvalidValueOnStack
InvalidGetValue
InvalidPutValue
Read(err: ByteorderError) {
from()
}
Io(err: IoError) {
from()
}
UnknownOpcode(opcode: u8) {}
InvalidInt {
from(ParseIntError)
}
InvalidLong
InvalidFloat {
from(ParseFloatError)
}
InvalidString
UnicodeError {
from(FromUtf8Error)
}
UnescapeError(err: UnescapeError) {
from()
}
InvalidProto(proto: u8)
NegativeLength {}
#[doc(hidden)]
__Nonexhaustive
}
}
#[derive(Debug, PartialEq)]
pub enum BooleanOrInt {
Boolean(bool),
Int(i64),
}
macro_rules! rc {
($term: expr) => (Rc::new(RefCell::new($term)))
}
fn read_until_newline<R>(rd: &mut R) -> Result<Vec<u8>, Error> where R: Read + BufRead {
let mut buf = Vec::new();
try!(rd.read_until('\n' as u8, &mut buf));
// Skip last symbol — \n
match buf.split_last() {
Some((&b'\n', init)) => Ok(init.to_vec()),
_ => Err(Error::InvalidString),
}
}
fn read_decimal_int<R>(rd: &mut R) -> Result<BooleanOrInt, Error> where R: Read + BufRead {
let s = try!(read_until_newline(rd));
let val = match &s[..] {
b"00" => BooleanOrInt::Boolean(false),
b"01" => BooleanOrInt::Boolean(true),
_ => BooleanOrInt::Int(try!(i64::from_ascii(&s)))
};
Ok(val)
}
fn read_decimal_long<R>(rd: &mut R) -> Result<BigInt, Error> where R: Read + BufRead {
let s = try!(read_until_newline(rd));
let init = match s.split_last() {
None => return Err(Error::InvalidLong),
Some((&b'L', init)) => init,
Some(_) => &s[..],
};
match BigInt::parse_bytes(&init, 10) {
Some(i) => Ok(i),
None => Err(Error::InvalidLong)
}
}
fn read_long<R>(rd: &mut R, length: usize) -> Result<BigInt, Error> where R: Read + BufRead {
let mut buf = vec![0; length];
try!(rd.read_exact(buf.as_mut()));
let mut n = BigInt::from_bytes_le(Sign::Plus, &buf);
let last = match buf.last_mut() {
None => return Err(Error::InvalidLong),
Some(last) => last,
};
if *last > 127 {
n = n - (1.to_bigint().unwrap() << (length * 8)) // TODO: remove unwrap()
}
Ok(n)
}
fn read_bracketed_string<R>(rd: &mut R) -> Result<Vec<u8>, Error> where R: Read + BufRead {
let s = try!(read_until_newline(rd));
// Skip last and first symbols — '
if s.len() < 2 {
return Err(Error::InvalidString)
}
Ok(try!(unescape(&s[1..s.len() - 1], false)))
}
pub struct Machine {
stack: Vec<Value>,
memo: HashMap<usize, Value>,
marker: Option<usize>,
}
impl Machine {
pub fn new() -> Self {
Machine {
stack: Vec::new(),
memo: HashMap::new(),
marker: None,
}
}
fn split_off(&mut self) -> Result<Vec<Value>, Error> {
let at = match self.marker {
None => return Err(Error::EmptyMarker),
Some(marker) => marker,
};
if at > self.stack.len() {
return Err(Error::StackTooSmall);
}
Ok(self.stack.split_off(at))
}
fn pop(&mut self) -> Result<Value, Error> {
match self.stack.pop() {
None => Err(Error::EmptyStack),
Some(value) => Ok(value),
}
}
fn handle_get(&mut self, i: usize) -> Result<(), Error> {
let value = match self.memo.get(&i) {
None => return Err(Error::InvalidGetValue),
Some(ref v) => (*v).clone(),
};
self.stack.push(value);
Ok(())
}
fn handle_put(&mut self, i: usize) -> Result<(), Error> {
let value = match self.stack.last() {
None => return Err(Error::EmptyStack),
Some(ref v) => (*v).clone(),
};
self.memo.insert(i, value);
Ok(())
}
pub fn execute<R>(&mut self, rd: &mut R) -> Result<bool, Error> where R: Read + BufRead {
macro_rules! ensure_not_negative {
($n: expr) => ({
if $n < Zero::zero() {
return Err(Error::NegativeLength)
}
})
}
match try!(rd.read_u8()) {
PROTO => {
let version = try!(rd.read_u8());
if version < 2 || version > 2 {
return Err(Error::InvalidProto(version))
}
},
STOP => return Ok(true),
INT => {
self.stack.push(match try!(read_decimal_int(rd)) {
BooleanOrInt::Boolean(v) => Value::Bool(v),
BooleanOrInt::Int(v) => Value::Long(BigInt::from(v)), // FIXME: or int?
})
},
BININT => self.stack.push(Value::Int(try!(rd.read_i32::<LittleEndian>()) as isize)),
BININT1 => self.stack.push(Value::Int(try!(rd.read_u8()) as isize)),
BININT2 => self.stack.push(Value::Int(try!(rd.read_u16::<LittleEndian>()) as isize)),
LONG => self.stack.push(Value::Long(BigInt::from(try!(read_decimal_long(rd))))),
LONG1 => {
let length = try!(rd.read_u8());
self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize)))))
}
LONG4 => {
let length = try!(rd.read_i32::<LittleEndian>());
self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize)))))
}
STRING => self.stack.push(Value::String(try!(read_bracketed_string(rd)))),
BINSTRING => {
let length = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(length);
let mut buf = vec![0; length as usize];
try!(rd.read_exact(&mut buf));
self.stack.push(Value::String(buf))
},
SHORT_BINSTRING => {
let length = try!(rd.read_u8());
let mut buf = vec![0; length as usize];
try!(rd.read_exact(&mut buf));
self.stack.push(Value::String(buf))
},
NONE => self.stack.push(Value::None),
NEWTRUE => self.stack.push(Value::Bool(true)),
NEWFALSE => self.stack.push(Value::Bool(false)),
UNICODE => {
let buf = try!(unescape(&try!(read_until_newline(rd)), true));
self.stack.push(Value::Unicode(try!(String::from_utf8(buf))))
},
BINUNICODE => {
let length = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(length);
let mut buf = vec![0; length as usize];
try!(rd.read_exact(buf.as_mut()));
self.stack.push(Value::Unicode(try!(String::from_utf8(buf))))
},
FLOAT => {
let s = try!(read_until_newline(rd));
self.stack.push(Value::Float(try!(f64::from_ascii(&s))))
},
BINFLOAT => {
self.stack.push(Value::Float(try!(rd.read_f64::<BigEndian>())))
},
EMPTY_LIST => {
self.stack.push(Value::List(rc!(Vec::new())))
},
APPEND => {
let v = try!(self.pop());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).push(v),
_ => return Err(Error::InvalidValueOnStack),
}
},
APPENDS => {
let values = try!(self.split_off());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).extend(values),
_ => return Err(Error::InvalidValueOnStack),
}
},
LIST => {
let values = try!(self.split_off());
self.stack.push(Value::List(rc!(values)));
},
EMPTY_TUPLE => self.stack.push(Value::Tuple(rc!(Vec::new()))),
TUPLE => {
let values = try!(self.split_off());
self.stack.push(Value::Tuple(rc!(values)));
},
TUPLE1 => {
let v1 = try!(self.pop());
self.stack.push(Value::Tuple(rc!(vec![v1])))
},
TUPLE2 => {
let v1 = try!(self.pop());
let v2 = try!(self.pop());
self.stack.push(Value::Tuple(rc!(vec![v1, v2])))
},
TUPLE3 => {
let v1 = try!(self.pop());
let v2 = try!(self.pop());
let v3 = try!(self.pop());
self.stack.push(Value::Tuple(rc!(vec![v1, v2, v3])))
}
EMPTY_DICT => self.stack.push(Value::Dict(rc!(Vec::new()))),
DICT => {
let mut values = try!(self.split_off());
let mut dict = Vec::new();
for i in 0 .. values.len() / 2 { // TODO: Check panic
let key = values.remove(2 * i);
let value = values.remove(2 * i + 1);
dict.push((key, value));
}
self.stack.push(Value::Dict(rc!(dict)));
},
SETITEM => {
let value = try!(self.pop());
let key = try!(self.pop());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::Dict(ref mut dict)) => (*dict.borrow_mut()).push((key, value)),
_ => return Err(Error::InvalidValueOnStack),
}
},
SETITEMS => {
let mut values = try!(self.split_off());
match self.stack.last_mut() {
None => return Err(Error::EmptyStack),
Some(&mut Value::Dict(ref mut dict_ref)) => {
for i in 0 .. values.len() / 2 { // TODO: Check panic
let key = values.remove(2 * i);
let value = values.remove(2 * i + 1);
(*dict_ref.borrow_mut()).push((key, value));
}
},
_ => return Err(Error::InvalidValueOnStack),
}
},
POP => {
try!(self.pop());
},
DUP => {
let value = match self.stack.last() {
None => return Err(Error::EmptyStack),
Some(ref v) => (*v).clone(),
};
self.stack.push(value)
},
MARK => {
self.marker = Some(self.stack.len())
},
POP_MARK => {
try!(self.split_off());
},
GET => {
let n = match try!(read_decimal_int(rd)) {
BooleanOrInt::Int(n) => n,
BooleanOrInt::Boolean(false) => 0,
BooleanOrInt::Boolean(true) => 1,
};
ensure_not_negative!(n);
try!(self.handle_get(n as usize))
}
BINGET => {
try!(self.handle_get(try!(rd.read_u8()) as usize))
}
LONG_BINGET => {
let n = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(n);
try!(self.handle_get(n as usize))
}
PUT => {
let n = match try!(read_decimal_int(rd)) {
BooleanOrInt::Int(n) => n,
BooleanOrInt::Boolean(false) => 0,
BooleanOrInt::Boolean(true) => 1,
};
ensure_not_negative!(n);
try!(self.handle_put(n as usize))
}
BINPUT => {
try!(self.handle_put(try!(rd.read_u8()) as usize))
}
LONG_BINPUT => {
let n = try!(rd.read_i32::<LittleEndian>());
ensure_not_negative!(n);
try!(self.handle_put(n as usize))
}
c => return Err(Error::UnknownOpcode(c)),
}
Ok(false)
}
}
pub fn unpickle<R>(rd: &mut R) -> Result<Value, Error> where R: Read + BufRead {
let mut machine = Machine::new();
loop {
if try!(machine.execute(rd)) {
break
}
}
Ok(try!(machine.pop()))
}
#[cfg(test)]
mod tests {
use std::io::{Cursor};
use num::{FromPrimitive};
use super::{Error, unpickle};
use super::super::value::{Value};
macro_rules! t {
($buffer: expr, $pat:pat, $result:expr) => ({
match unpickle(&mut Cursor::new(&$buffer[..])) {
Ok($pat) => $result,
other => {
println!("ERROR {:?}", other);
assert!(false)
},
}
})
}
macro_rules! e {
($buffer: expr, $pat:pat) => ({
match unpickle(&mut Cursor::new(&$buffer[..])) {
Err($pat) => (),
other => {
println!("ERROR {:?}", other);
assert!(false)
},
}
})
}
macro_rules! n {
($x: expr) => ({FromPrimitive::from_isize($x).unwrap()})
}
#[test]
fn test_int() {
t!(b"I1\n.", Value::Long(n), assert_eq!(n, n!(1)));
t!(b"K\x01.", Value::Int(n), assert_eq!(n, 1));
t!(b"\x80\x02K\x01.", Value::Int(n), assert_eq!(n, 1));
}
#[test]
fn test_const() {
t!(b"I00\n.", Value::Bool(false), ());
t!(b"I01\n.", Value::Bool(true), ());
t!(b"\x80\x02\x89.", Value::Bool(false), ());
t!(b"\x80\x02\x88.", Value::Bool(true), ());
t!(b"N.", Value::None, ());
t!(b"\x80\x02N.", Value::None, ());
}
#[test]
fn test_string() {
t!(b"S''\np1\n.", Value::String(s), assert_eq!(s, b""));
t!(b"S'foo'\np1\n.", Value::String(s), assert_eq!(s, b"foo"));
t!(b"U\x03fooq\x01.", Value::String(s), assert_eq!(s, b"foo"));
t!(b"\x80\x02U\x03fooq\x01.", Value::String(s), assert_eq!(s, b"foo"));
t!(b"S'\\n'\np1\n.", Value::String(s), assert_eq!(s, b"\n"));
}
#[test]
fn test_unicode() {
t!(b"Vfoo\np1\n.", Value::Unicode(s), assert_eq!(s, "foo"));
t!(b"X\x03\x00\x00\x00fooq\x01.", Value::Unicode(s), assert_eq!(s, "foo"));
t!(b"\x80\x02X\x03\x00\x00\x00fooq\x01.", Value::Unicode(s), assert_eq!(s, "foo"));
}
// Errors
#[test]
fn test_unknown_opcode() {
e!(b"\xff", Error::UnknownOpcode(255))
}
#[test]
fn test_invalid_int() {
e!(b"I1000000000000000000000000000000\n.", Error::InvalidInt); // TODO: Should be long?
// Int
e!(b"I\n.", Error::InvalidInt);
e!(b"I0.0\n.", Error::InvalidInt);
e!(b"I0.1\n.", Error::InvalidInt);
e!(b"Ia\n.", Error::InvalidInt);
e!(b"I\n\n.", Error::InvalidInt);
// Get
e!(b"g\n.", Error::InvalidInt);
e!(b"g0.0\n.", Error::InvalidInt);
e!(b"g0.1\n.", Error::InvalidInt);
e!(b"ga\n.", Error::InvalidInt);
e!(b"g\n\n.", Error::InvalidInt);
// Put
e!(b"p\n.", Error::InvalidInt);
e!(b"p0.0\n.", Error::InvalidInt);
e!(b"p0.1\n.", Error::InvalidInt);
e!(b"pa\n.", Error::InvalidInt);
e!(b"p\n\n.", Error::InvalidInt);
}
#[test]
fn test_invalid_long() {
// LONG
e!(b"L\n.", Error::InvalidLong);
e!(b"L0.0\n.", Error::InvalidLong);
e!(b"L0.1\n.", Error::InvalidLong);
e!(b"La\n.", Error::InvalidLong);
e!(b"L\n\n.", Error::InvalidLong);
// LONG1
e!(b"\x8a\x00.", Error::InvalidLong);
// LONG4
e!(b"\x8b\x00\x00\x00\x00.", Error::InvalidLong);
}
#[test]
fn test_invalid_float() {
e!(b"F\n.", Error::InvalidFloat);
e!(b"Ffoo\n.", Error::InvalidFloat);
e!(b"F1.O\n.", Error::InvalidFloat);
}
#[test]
fn test_invalid_string() {
// STRING
e!("S", Error::InvalidString);
e!("S'\n", Error::InvalidString);
// UNICODE
e!("V", Error::InvalidString);
// INT
e!(b"I", Error::InvalidString);
// LONG
e!(b"L", Error::InvalidString);
// FLOAT
e!(b"F", Error::InvalidString);
}
#[test]
fn test_unicode_error() {
// UNICODE
e!(b"V\xe2\x28\xa1\n", Error::UnicodeError);
// BINUNICODE
e!(b"X\x03\x00\x00\x00\xe2\x28\xa1", Error::UnicodeError);
}
#[test]
fn test_invalid_proto() {
e!(b"\x80\x00", Error::InvalidProto(0));
e!(b"\x80\x01", Error::InvalidProto(1));
e!(b"\x80\x64", Error::InvalidProto(100));
}
}
|
//! A YAML mapping and its iterator types.
use crate::Value;
use indexmap::IndexMap;
use serde::{Deserialize, Deserializer, Serialize};
use std::fmt;
use std::iter::FromIterator;
use std::ops::{Index, IndexMut};
/// A YAML mapping in which the keys and values are both `serde_yaml::Value`.
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, PartialOrd)]
pub struct Mapping {
map: IndexMap<Value, Value>,
}
impl Mapping {
/// Creates an empty YAML map.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an empty YAML map with the given initial capacity.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Mapping {
map: IndexMap::with_capacity(capacity),
}
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the map. The map may reserve more space to avoid frequent
/// allocations.
///
/// # Panics
///
/// Panics if the new allocation size overflows `usize`.
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.map.reserve(additional);
}
/// Shrinks the capacity of the map as much as possible. It will drop down
/// as much as possible while maintaining the internal rules and possibly
/// leaving some space in accordance with the resize policy.
#[inline]
pub fn shrink_to_fit(&mut self) {
self.map.shrink_to_fit();
}
/// Inserts a key-value pair into the map. If the key already existed, the
/// old value is returned.
#[inline]
pub fn insert(&mut self, k: Value, v: Value) -> Option<Value> {
self.map.insert(k, v)
}
/// Checks if the map contains the given key.
#[inline]
pub fn contains_key(&self, k: &Value) -> bool {
self.map.contains_key(k)
}
/// Returns the value corresponding to the key in the map.
#[inline]
pub fn get(&self, k: &Value) -> Option<&Value> {
self.map.get(k)
}
/// Returns the mutable reference corresponding to the key in the map.
#[inline]
pub fn get_mut(&mut self, k: &Value) -> Option<&mut Value> {
self.map.get_mut(k)
}
/// Removes and returns the value corresponding to the key from the map.
#[inline]
pub fn remove(&mut self, k: &Value) -> Option<Value> {
self.map.remove(k)
}
/// Returns the maximum number of key-value pairs the map can hold without
/// reallocating.
#[inline]
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Returns the number of key-value pairs in the map.
#[inline]
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns whether the map is currently empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Clears the map of all key-value pairs.
#[inline]
pub fn clear(&mut self) {
self.map.clear();
}
/// Returns a double-ended iterator visiting all key-value pairs in order of
/// insertion. Iterator element type is `(&'a Value, &'a Value)`.
#[inline]
pub fn iter(&self) -> Iter {
Iter {
iter: self.map.iter(),
}
}
/// Returns a double-ended iterator visiting all key-value pairs in order of
/// insertion. Iterator element type is `(&'a Value, &'a mut ValuE)`.
#[inline]
pub fn iter_mut(&mut self) -> IterMut {
IterMut {
iter: self.map.iter_mut(),
}
}
}
impl<'a> Index<&'a Value> for Mapping {
type Output = Value;
#[inline]
fn index(&self, index: &'a Value) -> &Value {
self.map.index(index)
}
}
impl<'a> IndexMut<&'a Value> for Mapping {
#[inline]
fn index_mut(&mut self, index: &'a Value) -> &mut Value {
self.map.index_mut(index)
}
}
impl Extend<(Value, Value)> for Mapping {
#[inline]
fn extend<I: IntoIterator<Item = (Value, Value)>>(&mut self, iter: I) {
self.map.extend(iter);
}
}
impl FromIterator<(Value, Value)> for Mapping {
#[inline]
fn from_iter<I: IntoIterator<Item = (Value, Value)>>(iter: I) -> Self {
Mapping {
map: IndexMap::from_iter(iter),
}
}
}
macro_rules! delegate_iterator {
(($name:ident $($generics:tt)*) => $item:ty) => {
impl $($generics)* Iterator for $name $($generics)* {
type Item = $item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl $($generics)* ExactSizeIterator for $name $($generics)* {
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
}
}
/// Iterator over `&serde_yaml::Mapping`.
pub struct Iter<'a> {
iter: indexmap::map::Iter<'a, Value, Value>,
}
delegate_iterator!((Iter<'a>) => (&'a Value, &'a Value));
impl<'a> IntoIterator for &'a Mapping {
type Item = (&'a Value, &'a Value);
type IntoIter = Iter<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
Iter {
iter: self.map.iter(),
}
}
}
/// Iterator over `&mut serde_yaml::Mapping`.
pub struct IterMut<'a> {
iter: indexmap::map::IterMut<'a, Value, Value>,
}
delegate_iterator!((IterMut<'a>) => (&'a Value, &'a mut Value));
impl<'a> IntoIterator for &'a mut Mapping {
type Item = (&'a Value, &'a mut Value);
type IntoIter = IterMut<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
IterMut {
iter: self.map.iter_mut(),
}
}
}
/// Iterator over `serde_yaml::Mapping` by value.
pub struct IntoIter {
iter: indexmap::map::IntoIter<Value, Value>,
}
delegate_iterator!((IntoIter) => (Value, Value));
impl IntoIterator for Mapping {
type Item = (Value, Value);
type IntoIter = IntoIter;
#[inline]
fn into_iter(self) -> Self::IntoIter {
IntoIter {
iter: self.map.into_iter(),
}
}
}
impl Serialize for Mapping {
#[inline]
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
use serde::ser::SerializeMap;
let mut map_serializer = serializer.serialize_map(Some(self.len()))?;
for (k, v) in self {
map_serializer.serialize_entry(k, v)?;
}
map_serializer.end()
}
}
impl<'de> Deserialize<'de> for Mapping {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Mapping;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a YAML mapping")
}
#[inline]
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Mapping::new())
}
#[inline]
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: serde::de::MapAccess<'de>,
{
let mut values = Mapping::new();
while let Some((k, v)) = visitor.next_entry()? {
values.insert(k, v);
}
Ok(values)
}
}
deserializer.deserialize_map(Visitor)
}
}
Remove nonworking derives from Mapping
//! A YAML mapping and its iterator types.
use crate::Value;
use indexmap::IndexMap;
use serde::{Deserialize, Deserializer, Serialize};
use std::fmt;
use std::iter::FromIterator;
use std::ops::{Index, IndexMut};
/// A YAML mapping in which the keys and values are both `serde_yaml::Value`.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Mapping {
map: IndexMap<Value, Value>,
}
impl Mapping {
/// Creates an empty YAML map.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an empty YAML map with the given initial capacity.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Mapping {
map: IndexMap::with_capacity(capacity),
}
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the map. The map may reserve more space to avoid frequent
/// allocations.
///
/// # Panics
///
/// Panics if the new allocation size overflows `usize`.
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.map.reserve(additional);
}
/// Shrinks the capacity of the map as much as possible. It will drop down
/// as much as possible while maintaining the internal rules and possibly
/// leaving some space in accordance with the resize policy.
#[inline]
pub fn shrink_to_fit(&mut self) {
self.map.shrink_to_fit();
}
/// Inserts a key-value pair into the map. If the key already existed, the
/// old value is returned.
#[inline]
pub fn insert(&mut self, k: Value, v: Value) -> Option<Value> {
self.map.insert(k, v)
}
/// Checks if the map contains the given key.
#[inline]
pub fn contains_key(&self, k: &Value) -> bool {
self.map.contains_key(k)
}
/// Returns the value corresponding to the key in the map.
#[inline]
pub fn get(&self, k: &Value) -> Option<&Value> {
self.map.get(k)
}
/// Returns the mutable reference corresponding to the key in the map.
#[inline]
pub fn get_mut(&mut self, k: &Value) -> Option<&mut Value> {
self.map.get_mut(k)
}
/// Removes and returns the value corresponding to the key from the map.
#[inline]
pub fn remove(&mut self, k: &Value) -> Option<Value> {
self.map.remove(k)
}
/// Returns the maximum number of key-value pairs the map can hold without
/// reallocating.
#[inline]
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Returns the number of key-value pairs in the map.
#[inline]
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns whether the map is currently empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Clears the map of all key-value pairs.
#[inline]
pub fn clear(&mut self) {
self.map.clear();
}
/// Returns a double-ended iterator visiting all key-value pairs in order of
/// insertion. Iterator element type is `(&'a Value, &'a Value)`.
#[inline]
pub fn iter(&self) -> Iter {
Iter {
iter: self.map.iter(),
}
}
/// Returns a double-ended iterator visiting all key-value pairs in order of
/// insertion. Iterator element type is `(&'a Value, &'a mut ValuE)`.
#[inline]
pub fn iter_mut(&mut self) -> IterMut {
IterMut {
iter: self.map.iter_mut(),
}
}
}
impl<'a> Index<&'a Value> for Mapping {
type Output = Value;
#[inline]
fn index(&self, index: &'a Value) -> &Value {
self.map.index(index)
}
}
impl<'a> IndexMut<&'a Value> for Mapping {
#[inline]
fn index_mut(&mut self, index: &'a Value) -> &mut Value {
self.map.index_mut(index)
}
}
impl Extend<(Value, Value)> for Mapping {
#[inline]
fn extend<I: IntoIterator<Item = (Value, Value)>>(&mut self, iter: I) {
self.map.extend(iter);
}
}
impl FromIterator<(Value, Value)> for Mapping {
#[inline]
fn from_iter<I: IntoIterator<Item = (Value, Value)>>(iter: I) -> Self {
Mapping {
map: IndexMap::from_iter(iter),
}
}
}
macro_rules! delegate_iterator {
(($name:ident $($generics:tt)*) => $item:ty) => {
impl $($generics)* Iterator for $name $($generics)* {
type Item = $item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl $($generics)* ExactSizeIterator for $name $($generics)* {
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
}
}
/// Iterator over `&serde_yaml::Mapping`.
pub struct Iter<'a> {
iter: indexmap::map::Iter<'a, Value, Value>,
}
delegate_iterator!((Iter<'a>) => (&'a Value, &'a Value));
impl<'a> IntoIterator for &'a Mapping {
type Item = (&'a Value, &'a Value);
type IntoIter = Iter<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
Iter {
iter: self.map.iter(),
}
}
}
/// Iterator over `&mut serde_yaml::Mapping`.
pub struct IterMut<'a> {
iter: indexmap::map::IterMut<'a, Value, Value>,
}
delegate_iterator!((IterMut<'a>) => (&'a Value, &'a mut Value));
impl<'a> IntoIterator for &'a mut Mapping {
type Item = (&'a Value, &'a mut Value);
type IntoIter = IterMut<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
IterMut {
iter: self.map.iter_mut(),
}
}
}
/// Iterator over `serde_yaml::Mapping` by value.
pub struct IntoIter {
iter: indexmap::map::IntoIter<Value, Value>,
}
delegate_iterator!((IntoIter) => (Value, Value));
impl IntoIterator for Mapping {
type Item = (Value, Value);
type IntoIter = IntoIter;
#[inline]
fn into_iter(self) -> Self::IntoIter {
IntoIter {
iter: self.map.into_iter(),
}
}
}
impl Serialize for Mapping {
#[inline]
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
use serde::ser::SerializeMap;
let mut map_serializer = serializer.serialize_map(Some(self.len()))?;
for (k, v) in self {
map_serializer.serialize_entry(k, v)?;
}
map_serializer.end()
}
}
impl<'de> Deserialize<'de> for Mapping {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Mapping;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a YAML mapping")
}
#[inline]
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Mapping::new())
}
#[inline]
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: serde::de::MapAccess<'de>,
{
let mut values = Mapping::new();
while let Some((k, v)) = visitor.next_entry()? {
values.insert(k, v);
}
Ok(values)
}
}
deserializer.deserialize_map(Visitor)
}
}
|
use chrono::{DateTime, Utc};
pub fn format_for_send<M, I, S>(in_metric: &M, in_namespace: &str, tags: I) -> Vec<u8>
where M: Metric,
I: IntoIterator<Item=S>,
S: AsRef<str>,
{
let metric = in_metric.metric_type_format();
let namespace = if in_metric.uses_namespace() {
in_namespace
} else {
""
};
let mut buf = Vec::with_capacity(metric.len() + namespace.len());
if !namespace.is_empty() {
buf.extend_from_slice(namespace.as_bytes());
buf.extend_from_slice(b".");
}
buf.extend_from_slice(metric.as_bytes());
let mut tags_iter = tags.into_iter();
let mut next_tag = tags_iter.next();
if next_tag.is_some() {
buf.extend_from_slice(b"|#");
}
while next_tag.is_some() {
buf.extend_from_slice(next_tag.unwrap().as_ref().as_bytes());
next_tag = tags_iter.next();
if next_tag.is_some() {
buf.extend_from_slice(b",");
}
}
buf
}
pub trait Metric {
fn metric_type_format(&self) -> String;
fn uses_namespace(&self) -> bool {
true
}
}
pub enum CountMetric<'a> {
Incr(&'a str),
Decr(&'a str),
}
impl<'a> Metric for CountMetric<'a> {
// my_count:1|c
// my_count:-1|c
fn metric_type_format(&self) -> String {
match *self {
CountMetric::Incr(stat) => {
let mut buf = String::with_capacity(3 + stat.len() + 4);
buf.push_str(stat);
buf.push_str(":1|c");
buf
},
CountMetric::Decr(stat) => {
let mut buf = String::with_capacity(3 + stat.len() + 4);
buf.push_str(stat);
buf.push_str(":-1|c");
buf
},
}
}
}
pub struct TimeMetric<'a> {
start_time: &'a DateTime<Utc>,
end_time: &'a DateTime<Utc>,
stat: &'a str,
}
impl<'a> Metric for TimeMetric<'a> {
// my_stat:500|ms
fn metric_type_format(&self) -> String {
let dur = self.end_time.signed_duration_since(*self.start_time);
let mut buf = String::with_capacity(3 + self.stat.len() + 11);
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(&dur.num_milliseconds().to_string());
buf.push_str("|ms");
buf
}
}
impl<'a> TimeMetric<'a> {
pub fn new(stat: &'a str, start_time: &'a DateTime<Utc>, end_time: &'a DateTime<Utc>) -> Self {
TimeMetric {
start_time: start_time,
end_time: end_time,
stat: stat,
}
}
}
pub struct TimingMetric<'a> {
ms: i64,
stat: &'a str,
}
impl<'a> Metric for TimingMetric<'a> {
// my_stat:500|ms
fn metric_type_format(&self) -> String {
let ms = self.ms.to_string();
let mut buf = String::with_capacity(3 + self.stat.len() + ms.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(&ms);
buf.push_str("|ms");
buf
}
}
impl<'a> TimingMetric<'a> {
pub fn new(stat: &'a str, ms: i64) -> Self {
TimingMetric {
ms: ms,
stat: stat,
}
}
}
pub struct GaugeMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a> Metric for GaugeMetric<'a> {
// my_gauge:1000|g
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|g");
buf
}
}
impl<'a> GaugeMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
GaugeMetric {
stat: stat,
val: val,
}
}
}
pub struct HistogramMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a> Metric for HistogramMetric<'a> {
// my_histogram:1000|h
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|h");
buf
}
}
impl<'a> HistogramMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
HistogramMetric {
stat: stat,
val: val,
}
}
}
pub struct DistributionMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a>Metric for DistributionMetric<'a> {
// my_distribution:1000|d
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|d");
buf
}
}
impl<'a> DistributionMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
DistributionMetric {
stat: stat,
val: val,
}
}
}
pub struct SetMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a> Metric for SetMetric<'a> {
// my_set:45|s
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|s");
buf
}
}
impl<'a> SetMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
SetMetric {
stat: stat,
val: val,
}
}
}
/// Represents the different states a service can be in
#[derive(Clone, Copy, Debug)]
pub enum ServiceStatus {
/// OK State
OK,
/// Warning State
Warning,
/// Critical State
Critical,
/// Unknown State
Unknown,
}
impl ServiceStatus {
fn to_int(&self) -> i32 {
match *self {
ServiceStatus::OK => 0,
ServiceStatus::Warning => 1,
ServiceStatus::Critical => 2,
ServiceStatus::Unknown => 3,
}
}
}
/// Struct for adding optional pieces to a service check
#[derive(Clone, Copy, Debug)]
pub struct ServiceCheckOptions {
/// An optional timestamp to include with the check
pub timestamp: Option<i32>,
/// An optional hostname to include with the check
pub hostname: Option<&'static str>,
/// An optional message to include with the check
pub message: Option<&'static str>,
}
impl Default for ServiceCheckOptions {
fn default() -> Self {
ServiceCheckOptions {
timestamp: None,
hostname: None,
message: None,
}
}
}
impl ServiceCheckOptions {
fn len(&self) -> usize {
let mut length = 0;
length += self.timestamp.map_or(0, |ts| format!("{}", ts).len() + 3);
length += self.hostname.map_or(0, |host| host.len() + 3);
length += self.message.map_or(0, |msg| msg.len() + 3);
length
}
}
pub struct ServiceCheck<'a> {
stat: &'a str,
val: ServiceStatus,
options: ServiceCheckOptions,
}
impl<'a> Metric for ServiceCheck<'a> {
fn uses_namespace(&self) -> bool {
false
}
// _sc|my_service.can_connect|1
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(6 + self.stat.len() + self.options.len());
buf.push_str("_sc|");
buf.push_str(self.stat);
buf.push_str("|");
buf.push_str(&format!("{}", self.val.to_int()));
if self.options.timestamp.is_some() {
buf.push_str("|d:");
buf.push_str(&format!("{}", self.options.timestamp.unwrap()));
}
if self.options.hostname.is_some() {
buf.push_str("|h:");
buf.push_str(self.options.hostname.unwrap());
}
if self.options.message.is_some() {
buf.push_str("|m:");
buf.push_str(self.options.message.unwrap());
}
buf
}
}
impl<'a> ServiceCheck<'a> {
pub fn new(stat: &'a str, val: ServiceStatus, options: ServiceCheckOptions) -> Self {
ServiceCheck {
stat: stat,
val: val,
options: options,
}
}
}
pub struct Event<'a> {
title: &'a str,
text: &'a str,
}
impl<'a> Metric for Event<'a> {
fn uses_namespace(&self) -> bool {
false
}
fn metric_type_format(&self) -> String {
let title_len = self.title.len().to_string();
let text_len = self.text.len().to_string();
let mut buf = String::with_capacity(self.title.len() + self.text.len() + title_len.len() + text_len.len() + 6);
buf.push_str("_e{");
buf.push_str(&title_len);
buf.push_str(",");
buf.push_str(&text_len);
buf.push_str("}:");
buf.push_str(self.title);
buf.push_str("|");
buf.push_str(self.text);
buf
}
}
impl<'a> Event<'a> {
pub fn new(title: &'a str, text: &'a str) -> Self {
Event {
title: title,
text: text,
}
}
}
#[cfg(test)]
mod tests {
use chrono::{TimeZone, Utc};
use super::*;
#[test]
fn test_format_for_send_no_tags() {
assert_eq!(
&b"namespace.foo:1|c"[..],
&format_for_send(&CountMetric::Incr("foo"), "namespace", &[] as &[String])[..]
)
}
#[test]
fn test_format_for_send_no_namespace() {
assert_eq!(
&b"foo:1|c|#tag:1,tag:2"[..],
&format_for_send(&CountMetric::Incr("foo"), "", &["tag:1", "tag:2"])[..]
)
}
#[test]
fn test_format_for_send_everything() {
assert_eq!(
&b"namespace.foo:1|c|#tag:1,tag:2"[..],
&format_for_send(&CountMetric::Incr("foo"), "namespace", &["tag:1", "tag:2"])[..]
)
}
#[test]
fn test_format_for_send_everything_omit_namespace() {
assert_eq!(
&b"_e{5,4}:title|text|#tag:1,tag:2"[..],
&format_for_send(&Event::new("title".into(), "text".into()), "namespace", &["tag:1", "tag:2"])[..]
)
}
#[test]
fn test_count_incr_metric() {
let metric = CountMetric::Incr("incr".into());
assert_eq!("incr:1|c", metric.metric_type_format())
}
#[test]
fn test_count_decr_metric() {
let metric = CountMetric::Decr("decr".into());
assert_eq!("decr:-1|c", metric.metric_type_format())
}
#[test]
fn test_time_metric() {
let start_time = Utc.ymd(2016, 4, 24).and_hms_milli(0, 0, 0, 0);
let end_time = Utc.ymd(2016, 4, 24).and_hms_milli(0, 0, 0, 900);
let metric = TimeMetric::new("time".into(), &start_time, &end_time);
assert_eq!("time:900|ms", metric.metric_type_format())
}
#[test]
fn test_timing_metric() {
let metric = TimingMetric::new("timing".into(), 720);
assert_eq!("timing:720|ms", metric.metric_type_format())
}
#[test]
fn test_gauge_metric() {
let metric = GaugeMetric::new("gauge".into(), "12345".into());
assert_eq!("gauge:12345|g", metric.metric_type_format())
}
#[test]
fn test_histogram_metric() {
let metric = HistogramMetric::new("histogram".into(), "67890".into());
assert_eq!("histogram:67890|h", metric.metric_type_format())
}
#[test]
fn test_distribution_metric() {
let metric = DistributionMetric::new("distribution".into(), "67890".into());
assert_eq!("distribution:67890|d", metric.metric_type_format())
}
#[test]
fn test_set_metric() {
let metric = SetMetric::new("set".into(), "13579".into());
assert_eq!("set:13579|s", metric.metric_type_format())
}
#[test]
fn test_service_check() {
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, ServiceCheckOptions::default());
assert_eq!("_sc|redis.can_connect|1", metric.metric_type_format())
}
#[test]
fn test_service_check_with_timestamp() {
let options = ServiceCheckOptions {
timestamp: Some(1234567890),
..Default::default()
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!("_sc|redis.can_connect|1|d:1234567890", metric.metric_type_format())
}
#[test]
fn test_service_check_with_hostname() {
let options = ServiceCheckOptions {
hostname: Some("my_server.localhost"),
..Default::default()
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!("_sc|redis.can_connect|1|h:my_server.localhost", metric.metric_type_format())
}
#[test]
fn test_service_check_with_message() {
let options = ServiceCheckOptions {
message: Some("Service is possibly down"),
..Default::default()
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!("_sc|redis.can_connect|1|m:Service is possibly down", metric.metric_type_format())
}
#[test]
fn test_service_check_with_all() {
let options = ServiceCheckOptions {
timestamp: Some(1234567890),
hostname: Some("my_server.localhost"),
message: Some("Service is possibly down")
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!(
"_sc|redis.can_connect|1|d:1234567890|h:my_server.localhost|m:Service is possibly down",
metric.metric_type_format()
)
}
#[test]
fn test_event() {
let metric = Event::new("Event Title".into(), "Event Body - Something Happened".into());
assert_eq!(
"_e{11,31}:Event Title|Event Body - Something Happened",
metric.metric_type_format()
)
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use super::*;
struct NullMetric;
impl Metric for NullMetric {
fn metric_type_format(&self) -> String {
String::new()
}
}
#[bench]
fn bench_format_for_send(b: &mut Bencher) {
let metric = NullMetric;
b.iter(|| {
format_for_send(&metric, "foo", &["bar", "baz"]);
})
}
#[bench]
fn bench_set_metric(b: &mut Bencher) {
let metric = SetMetric {
stat: "blahblahblah-blahblahblah",
val: "valuel"
};
b.iter(|| {
metric.metric_type_format()
})
}
#[bench]
fn bench_set_counter(b: &mut Bencher) {
let metric = CountMetric::Incr("foo");
b.iter(|| {
metric.metric_type_format()
})
}
}
Fix pre-allocated string length for CountMetric::Decr
The counter decrement metric actually needs 5 characters to store the
constant part of the string to account for the "-" sign on "-1". This
wasn't breaking anything, but the String was being forced to reallocate
each time.
Signed-off-by: Nick Stevens <75ef9faee755c70589550b513ad881e5a603182c@bitcurry.com>
use chrono::{DateTime, Utc};
pub fn format_for_send<M, I, S>(in_metric: &M, in_namespace: &str, tags: I) -> Vec<u8>
where M: Metric,
I: IntoIterator<Item=S>,
S: AsRef<str>,
{
let metric = in_metric.metric_type_format();
let namespace = if in_metric.uses_namespace() {
in_namespace
} else {
""
};
let mut buf = Vec::with_capacity(metric.len() + namespace.len());
if !namespace.is_empty() {
buf.extend_from_slice(namespace.as_bytes());
buf.extend_from_slice(b".");
}
buf.extend_from_slice(metric.as_bytes());
let mut tags_iter = tags.into_iter();
let mut next_tag = tags_iter.next();
if next_tag.is_some() {
buf.extend_from_slice(b"|#");
}
while next_tag.is_some() {
buf.extend_from_slice(next_tag.unwrap().as_ref().as_bytes());
next_tag = tags_iter.next();
if next_tag.is_some() {
buf.extend_from_slice(b",");
}
}
buf
}
pub trait Metric {
fn metric_type_format(&self) -> String;
fn uses_namespace(&self) -> bool {
true
}
}
pub enum CountMetric<'a> {
Incr(&'a str),
Decr(&'a str),
}
impl<'a> Metric for CountMetric<'a> {
// my_count:1|c
// my_count:-1|c
fn metric_type_format(&self) -> String {
match *self {
CountMetric::Incr(stat) => {
let mut buf = String::with_capacity(3 + stat.len() + 4);
buf.push_str(stat);
buf.push_str(":1|c");
buf
},
CountMetric::Decr(stat) => {
let mut buf = String::with_capacity(3 + stat.len() + 5);
buf.push_str(stat);
buf.push_str(":-1|c");
buf
},
}
}
}
pub struct TimeMetric<'a> {
start_time: &'a DateTime<Utc>,
end_time: &'a DateTime<Utc>,
stat: &'a str,
}
impl<'a> Metric for TimeMetric<'a> {
// my_stat:500|ms
fn metric_type_format(&self) -> String {
let dur = self.end_time.signed_duration_since(*self.start_time);
let mut buf = String::with_capacity(3 + self.stat.len() + 11);
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(&dur.num_milliseconds().to_string());
buf.push_str("|ms");
buf
}
}
impl<'a> TimeMetric<'a> {
pub fn new(stat: &'a str, start_time: &'a DateTime<Utc>, end_time: &'a DateTime<Utc>) -> Self {
TimeMetric {
start_time: start_time,
end_time: end_time,
stat: stat,
}
}
}
pub struct TimingMetric<'a> {
ms: i64,
stat: &'a str,
}
impl<'a> Metric for TimingMetric<'a> {
// my_stat:500|ms
fn metric_type_format(&self) -> String {
let ms = self.ms.to_string();
let mut buf = String::with_capacity(3 + self.stat.len() + ms.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(&ms);
buf.push_str("|ms");
buf
}
}
impl<'a> TimingMetric<'a> {
pub fn new(stat: &'a str, ms: i64) -> Self {
TimingMetric {
ms: ms,
stat: stat,
}
}
}
pub struct GaugeMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a> Metric for GaugeMetric<'a> {
// my_gauge:1000|g
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|g");
buf
}
}
impl<'a> GaugeMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
GaugeMetric {
stat: stat,
val: val,
}
}
}
pub struct HistogramMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a> Metric for HistogramMetric<'a> {
// my_histogram:1000|h
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|h");
buf
}
}
impl<'a> HistogramMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
HistogramMetric {
stat: stat,
val: val,
}
}
}
pub struct DistributionMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a>Metric for DistributionMetric<'a> {
// my_distribution:1000|d
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|d");
buf
}
}
impl<'a> DistributionMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
DistributionMetric {
stat: stat,
val: val,
}
}
}
pub struct SetMetric<'a> {
stat: &'a str,
val: &'a str,
}
impl<'a> Metric for SetMetric<'a> {
// my_set:45|s
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(3 + self.stat.len() + self.val.len());
buf.push_str(self.stat);
buf.push_str(":");
buf.push_str(self.val);
buf.push_str("|s");
buf
}
}
impl<'a> SetMetric<'a> {
pub fn new(stat: &'a str, val: &'a str) -> Self {
SetMetric {
stat: stat,
val: val,
}
}
}
/// Represents the different states a service can be in
#[derive(Clone, Copy, Debug)]
pub enum ServiceStatus {
/// OK State
OK,
/// Warning State
Warning,
/// Critical State
Critical,
/// Unknown State
Unknown,
}
impl ServiceStatus {
fn to_int(&self) -> i32 {
match *self {
ServiceStatus::OK => 0,
ServiceStatus::Warning => 1,
ServiceStatus::Critical => 2,
ServiceStatus::Unknown => 3,
}
}
}
/// Struct for adding optional pieces to a service check
#[derive(Clone, Copy, Debug)]
pub struct ServiceCheckOptions {
/// An optional timestamp to include with the check
pub timestamp: Option<i32>,
/// An optional hostname to include with the check
pub hostname: Option<&'static str>,
/// An optional message to include with the check
pub message: Option<&'static str>,
}
impl Default for ServiceCheckOptions {
fn default() -> Self {
ServiceCheckOptions {
timestamp: None,
hostname: None,
message: None,
}
}
}
impl ServiceCheckOptions {
fn len(&self) -> usize {
let mut length = 0;
length += self.timestamp.map_or(0, |ts| format!("{}", ts).len() + 3);
length += self.hostname.map_or(0, |host| host.len() + 3);
length += self.message.map_or(0, |msg| msg.len() + 3);
length
}
}
pub struct ServiceCheck<'a> {
stat: &'a str,
val: ServiceStatus,
options: ServiceCheckOptions,
}
impl<'a> Metric for ServiceCheck<'a> {
fn uses_namespace(&self) -> bool {
false
}
// _sc|my_service.can_connect|1
fn metric_type_format(&self) -> String {
let mut buf = String::with_capacity(6 + self.stat.len() + self.options.len());
buf.push_str("_sc|");
buf.push_str(self.stat);
buf.push_str("|");
buf.push_str(&format!("{}", self.val.to_int()));
if self.options.timestamp.is_some() {
buf.push_str("|d:");
buf.push_str(&format!("{}", self.options.timestamp.unwrap()));
}
if self.options.hostname.is_some() {
buf.push_str("|h:");
buf.push_str(self.options.hostname.unwrap());
}
if self.options.message.is_some() {
buf.push_str("|m:");
buf.push_str(self.options.message.unwrap());
}
buf
}
}
impl<'a> ServiceCheck<'a> {
pub fn new(stat: &'a str, val: ServiceStatus, options: ServiceCheckOptions) -> Self {
ServiceCheck {
stat: stat,
val: val,
options: options,
}
}
}
pub struct Event<'a> {
title: &'a str,
text: &'a str,
}
impl<'a> Metric for Event<'a> {
fn uses_namespace(&self) -> bool {
false
}
fn metric_type_format(&self) -> String {
let title_len = self.title.len().to_string();
let text_len = self.text.len().to_string();
let mut buf = String::with_capacity(self.title.len() + self.text.len() + title_len.len() + text_len.len() + 6);
buf.push_str("_e{");
buf.push_str(&title_len);
buf.push_str(",");
buf.push_str(&text_len);
buf.push_str("}:");
buf.push_str(self.title);
buf.push_str("|");
buf.push_str(self.text);
buf
}
}
impl<'a> Event<'a> {
pub fn new(title: &'a str, text: &'a str) -> Self {
Event {
title: title,
text: text,
}
}
}
#[cfg(test)]
mod tests {
use chrono::{TimeZone, Utc};
use super::*;
#[test]
fn test_format_for_send_no_tags() {
assert_eq!(
&b"namespace.foo:1|c"[..],
&format_for_send(&CountMetric::Incr("foo"), "namespace", &[] as &[String])[..]
)
}
#[test]
fn test_format_for_send_no_namespace() {
assert_eq!(
&b"foo:1|c|#tag:1,tag:2"[..],
&format_for_send(&CountMetric::Incr("foo"), "", &["tag:1", "tag:2"])[..]
)
}
#[test]
fn test_format_for_send_everything() {
assert_eq!(
&b"namespace.foo:1|c|#tag:1,tag:2"[..],
&format_for_send(&CountMetric::Incr("foo"), "namespace", &["tag:1", "tag:2"])[..]
)
}
#[test]
fn test_format_for_send_everything_omit_namespace() {
assert_eq!(
&b"_e{5,4}:title|text|#tag:1,tag:2"[..],
&format_for_send(&Event::new("title".into(), "text".into()), "namespace", &["tag:1", "tag:2"])[..]
)
}
#[test]
fn test_count_incr_metric() {
let metric = CountMetric::Incr("incr".into());
assert_eq!("incr:1|c", metric.metric_type_format())
}
#[test]
fn test_count_decr_metric() {
let metric = CountMetric::Decr("decr".into());
assert_eq!("decr:-1|c", metric.metric_type_format())
}
#[test]
fn test_time_metric() {
let start_time = Utc.ymd(2016, 4, 24).and_hms_milli(0, 0, 0, 0);
let end_time = Utc.ymd(2016, 4, 24).and_hms_milli(0, 0, 0, 900);
let metric = TimeMetric::new("time".into(), &start_time, &end_time);
assert_eq!("time:900|ms", metric.metric_type_format())
}
#[test]
fn test_timing_metric() {
let metric = TimingMetric::new("timing".into(), 720);
assert_eq!("timing:720|ms", metric.metric_type_format())
}
#[test]
fn test_gauge_metric() {
let metric = GaugeMetric::new("gauge".into(), "12345".into());
assert_eq!("gauge:12345|g", metric.metric_type_format())
}
#[test]
fn test_histogram_metric() {
let metric = HistogramMetric::new("histogram".into(), "67890".into());
assert_eq!("histogram:67890|h", metric.metric_type_format())
}
#[test]
fn test_distribution_metric() {
let metric = DistributionMetric::new("distribution".into(), "67890".into());
assert_eq!("distribution:67890|d", metric.metric_type_format())
}
#[test]
fn test_set_metric() {
let metric = SetMetric::new("set".into(), "13579".into());
assert_eq!("set:13579|s", metric.metric_type_format())
}
#[test]
fn test_service_check() {
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, ServiceCheckOptions::default());
assert_eq!("_sc|redis.can_connect|1", metric.metric_type_format())
}
#[test]
fn test_service_check_with_timestamp() {
let options = ServiceCheckOptions {
timestamp: Some(1234567890),
..Default::default()
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!("_sc|redis.can_connect|1|d:1234567890", metric.metric_type_format())
}
#[test]
fn test_service_check_with_hostname() {
let options = ServiceCheckOptions {
hostname: Some("my_server.localhost"),
..Default::default()
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!("_sc|redis.can_connect|1|h:my_server.localhost", metric.metric_type_format())
}
#[test]
fn test_service_check_with_message() {
let options = ServiceCheckOptions {
message: Some("Service is possibly down"),
..Default::default()
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!("_sc|redis.can_connect|1|m:Service is possibly down", metric.metric_type_format())
}
#[test]
fn test_service_check_with_all() {
let options = ServiceCheckOptions {
timestamp: Some(1234567890),
hostname: Some("my_server.localhost"),
message: Some("Service is possibly down")
};
let metric = ServiceCheck::new("redis.can_connect".into(), ServiceStatus::Warning, options);
assert_eq!(
"_sc|redis.can_connect|1|d:1234567890|h:my_server.localhost|m:Service is possibly down",
metric.metric_type_format()
)
}
#[test]
fn test_event() {
let metric = Event::new("Event Title".into(), "Event Body - Something Happened".into());
assert_eq!(
"_e{11,31}:Event Title|Event Body - Something Happened",
metric.metric_type_format()
)
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use super::*;
struct NullMetric;
impl Metric for NullMetric {
fn metric_type_format(&self) -> String {
String::new()
}
}
#[bench]
fn bench_format_for_send(b: &mut Bencher) {
let metric = NullMetric;
b.iter(|| {
format_for_send(&metric, "foo", &["bar", "baz"]);
})
}
#[bench]
fn bench_set_metric(b: &mut Bencher) {
let metric = SetMetric {
stat: "blahblahblah-blahblahblah",
val: "valuel"
};
b.iter(|| {
metric.metric_type_format()
})
}
#[bench]
fn bench_set_counter(b: &mut Bencher) {
let metric = CountMetric::Incr("foo");
b.iter(|| {
metric.metric_type_format()
})
}
}
|
// Integration with Musashi
extern crate libc;
// Register enum copied from Musashi's m68k_register_t enum
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(dead_code)]
pub enum Register {
/* Real registers */
D0, /* Data registers */
D1,
D2,
D3,
D4,
D5,
D6,
D7,
A0, /* Address registers */
A1,
A2,
A3,
A4,
A5,
A6,
A7,
PC, /* Program Counter */
SR, /* Status Register */
SP, /* The current Stack Pointer (located in A7) */
USP, /* User Stack Pointer */
ISP, /* Interrupt Stack Pointer */
MSP, /* Master Stack Pointer */
SFC, /* Source Function Code */
DFC, /* Destination Function Code */
VBR, /* Vector Base Register */
CACR, /* Cache Control Register */
CAAR, /* Cache Address Register */
/* Assumed registers */
/* These are cheat registers which emulate the 1-longword prefetch
* present in the 68000 and 68010.
*/
PrefAddr, /* Last prefetch address */
PrefData, /* Last prefetch data */
/* Convenience registers */
PPC, /* Previous value in the program counter */
IR, /* Instruction register */
CpuType /* Type of CPU being run */
}
#[repr(C)]
#[derive(Copy, Clone)]
#[allow(dead_code)]
enum CpuType
{
Invalid,
M68000,
M68010,
M68EC020,
M68020,
M68030, /* Supported by disassembler ONLY */
M68040 /* Supported by disassembler ONLY */
}
#[link(name = "musashi", kind = "static")]
extern {
fn m68k_init();
fn m68k_set_cpu_type(cputype: CpuType);
fn m68k_pulse_reset();
fn m68k_execute(num_cycles: i32) -> i32;
fn m68k_get_reg(context: *mut libc::c_void, regnum: Register) -> u32;
fn m68k_set_reg(regnum: Register, value: u32);
}
use ram::{Operation, AddressBus, AddressSpace, SUPERVISOR_PROGRAM, SUPERVISOR_DATA, USER_PROGRAM, USER_DATA, ADDRBUS_MASK};
static mut musashi_memory: [u8; 16*1024*1024+16] = [0xaa; 16*1024*1024+16];
// as statics are not allowed to have destructors, allocate a
// big enough array to hold the small number of operations
// expected from executing a very limited number of opcodes
static mut musashi_ops: [Operation; 128] = [Operation::None; 128];
static mut musashi_opcount: usize = 0;
static mut musashi_address_space: AddressSpace = SUPERVISOR_PROGRAM;
unsafe fn register_op(op: Operation) {
if musashi_opcount < musashi_ops.len() {
// println!("mem_op {:?}", op);
musashi_ops[musashi_opcount] = op;
musashi_opcount += 1;
}
}
// callbacks from Musashi
#[no_mangle]
pub extern fn m68k_read_memory_8(address: u32) -> u32 {
unsafe {
let address = address & ADDRBUS_MASK;
let addr = address as usize;
let value = musashi_memory[addr];
let op = Operation::ReadByte(musashi_address_space, address, value);
register_op(op);
value as u32
}
}
#[no_mangle]
pub extern fn m68k_read_memory_16(address: u32) -> u32 {
unsafe {
let address = address & ADDRBUS_MASK;
let addr = address as usize;
let value = (musashi_memory[addr+0] as u16) << 8
|(musashi_memory[addr+1] as u16) << 0;
let op = Operation::ReadWord(musashi_address_space, address, value);
register_op(op);
value as u32
}
}
#[no_mangle]
pub extern fn m68k_read_memory_32(address: u32) -> u32 {
unsafe {
let addr = (address & ADDRBUS_MASK) as usize;
let value = ((musashi_memory[addr+0] as u32) << 24
|(musashi_memory[addr+1] as u32) << 16
|(musashi_memory[addr+2] as u32) << 8
|(musashi_memory[addr+3] as u32) << 0) as u32;
let op = Operation::ReadLong(musashi_address_space, address, value);
register_op(op);
value
}
}
#[no_mangle]
pub extern fn m68k_write_memory_8(address: u32, value: u32) {
unsafe {
let op = Operation::WriteByte(musashi_address_space, address, value);
let address = (address & ADDRBUS_MASK) as usize;
register_op(op);
musashi_memory[address+0] = (value & 0xff) as u8;
}
}
#[no_mangle]
pub extern fn m68k_write_memory_16(address: u32, value: u32) {
unsafe {
let op = Operation::WriteWord(musashi_address_space, address, value);
let address = (address & ADDRBUS_MASK) as usize;
register_op(op);
musashi_memory[address+0] = ((value & 0xff00) >> 8) as u8;
musashi_memory[address+1] = ((value & 0x00ff) >> 0) as u8;
}
}
#[no_mangle]
pub extern fn m68k_write_memory_32(address: u32, value: u32) {
unsafe {
let op = Operation::WriteLong(musashi_address_space, address, value);
let address = (address & ADDRBUS_MASK) as usize;
register_op(op);
musashi_memory[address+0] = ((value & 0xff000000) >> 24) as u8;
musashi_memory[address+1] = ((value & 0x00ff0000) >> 16) as u8;
musashi_memory[address+2] = ((value & 0x0000ff00) >> 8) as u8;
musashi_memory[address+3] = ((value & 0x000000ff) >> 0) as u8;
}
}
#[no_mangle]
pub extern fn cpu_pulse_reset() {panic!("pr")}
#[no_mangle]
pub extern fn cpu_long_branch() {}
#[no_mangle]
pub extern fn m68k_set_fc(fc: u32) {
unsafe {
musashi_address_space = match fc {
1 => USER_DATA,
2 => USER_PROGRAM,
5 => SUPERVISOR_DATA,
6 => SUPERVISOR_PROGRAM,
_ => panic!("unknown fc: {}", fc),
};
// println!("set_fc {:?}", musashi_address_space);
}
}
#[allow(unused_variables)]
#[no_mangle]
pub extern fn cpu_irq_ack(level: i32) -> i32 {panic!("ia")}
#[no_mangle]
pub extern fn cpu_instr_callback() {}
use std::ptr;
#[allow(unused_variables)]
pub fn experimental_communication() {
let mutex = MUSASHI_LOCK.lock().unwrap();
unsafe {
m68k_init();
m68k_set_cpu_type(CpuType::M68000);
m68k_set_reg(Register::D0, 123);
println!("D0: {}", m68k_get_reg(ptr::null_mut(), Register::D0));
}
}
#[allow(unused_variables)]
pub fn roundtrip_register(reg: Register, value: u32) -> u32 {
let mutex = MUSASHI_LOCK.lock().unwrap();
unsafe {
m68k_init();
m68k_set_cpu_type(CpuType::M68000);
m68k_set_reg(reg, value);
m68k_get_reg(ptr::null_mut(), reg)
}
}
use cpu::{Core, Cycles};
static REGS:[Register; 16] = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6, Register::A7];
fn get_ops() -> Vec<Operation> {
let mut res: Vec<Operation> = vec![];
unsafe {
for i in 0..musashi_opcount {
res.push(musashi_ops[i]);
}
}
res
}
// since we know exactly where writes have occurred, undoing is much
// less work than simply rewriting all 16M
fn undo_musashi_writes() {
for op in get_ops()
{
match op {
Operation::WriteByte(_, addr, _) => m68k_write_memory_8(addr, 0xaa),
Operation::WriteWord(_, addr, _) => m68k_write_memory_16(addr, 0xaaaa),
Operation::WriteLong(_, addr, _) => m68k_write_memory_32(addr, 0xaaaaaaaa),
_ => (),
}
}
}
pub fn initialize_musashi(core: &mut Core) {
// println!("initialize_musashi {:?}", thread::current());
unsafe {
undo_musashi_writes();
m68k_init();
m68k_set_cpu_type(CpuType::M68000);
m68k_write_memory_32(0, core.ssp());
m68k_write_memory_32(4, core.pc);
m68k_pulse_reset();
// Resetting opcount, because m68k_pulse_reset causes irrelevant
// reads from 0x00000000 to set PC/SP, a jump to PC and
// resetting of state. But we don't want to test those ops.
musashi_opcount = 0;
//m68k_set_reg(Register::PC, core.pc);
m68k_set_reg(Register::USP, core.usp());
// if SR clears S_FLAG then SSP <- A7, A7 <- USP
m68k_set_reg(Register::SR, core.status_register() as u32);
for (i, ®) in REGS.iter().enumerate() {
if i != 15 {
m68k_set_reg(reg, core.dar[i]);
}
}
// just reset first and last KB of memory, as it takes too long to
// reset all 16MB
let last_kb = (1 << 24) - 1024;
for i in 0..1024usize {
musashi_memory[i] = core.mem.read_byte(SUPERVISOR_PROGRAM, i as u32) as u8;
musashi_memory[last_kb + i] = core.mem.read_byte(SUPERVISOR_PROGRAM, (last_kb + i) as u32) as u8;
}
}
}
pub fn execute1(core: &mut Core) -> Cycles {
// println!("execute1 mushashi {:?}", thread::current());
unsafe {
let cycle_count = m68k_execute(1);
for (i, ®) in REGS.iter().enumerate() {
core.dar[i] = m68k_get_reg(ptr::null_mut(), reg);
}
core.pc = m68k_get_reg(ptr::null_mut(), Register::PC);
core.sr_to_flags(m68k_get_reg(ptr::null_mut(), Register::SR) as u16);
if core.s_flag > 0 {
core.inactive_usp = m68k_get_reg(ptr::null_mut(), Register::USP);
core.dar[15] = m68k_get_reg(ptr::null_mut(), Register::ISP);
} else {
core.dar[15] = m68k_get_reg(ptr::null_mut(), Register::USP);
core.inactive_ssp = m68k_get_reg(ptr::null_mut(), Register::ISP);
}
Cycles(cycle_count)
}
}
#[allow(unused_variables)]
pub fn reset_and_execute1(core: &mut Core) -> Cycles {
let mutex = MUSASHI_LOCK.lock().unwrap();
initialize_musashi(core);
execute1(core)
}
// Talking to Musashi isn't thread-safe, and the tests are running
// threaded, which cause intermittent test failures unless serializing
// access using something like a mutex. Musashi functions are called in
// global/static context, and statics are not allowed to have
// destructors
use std::sync::{Arc, Mutex};
// using lazy_static! to work-around "statics are not allowed to have destructors [E0493]""
lazy_static! {
static ref MUSASHI_LOCK: Arc<Mutex<i32>> = Arc::new(Mutex::new(0));
static ref QUICKCHECK_LOCK: Arc<Mutex<i32>> = Arc::new(Mutex::new(0));
}
#[cfg(test)]
mod tests {
use super::*;
use ram::SUPERVISOR_PROGRAM;
use super::MUSASHI_LOCK;
use super::QUICKCHECK_LOCK;
use ram::{Operation, AddressBus};
use cpu::Core;
extern crate quickcheck;
use self::quickcheck::*;
#[derive(Copy, Clone, Debug, PartialEq)]
struct Bitpattern(u32);
impl Arbitrary for Bitpattern {
fn arbitrary<G: Gen>(g: &mut G) -> Bitpattern {
// let m : u32 = Arbitrary::arbitrary(g);
// let mut mask: u32 = 0xF; //((m & 0xF) | (m >> 4) & 0xF) as u32;
// let mut i : u32 = Arbitrary::arbitrary(g);
// let mut sum: u32 = 0;
// println!("{}/{} when {}", i, mask, g.size());
// // 0b11001100 => 0xFF00FF00
// while i > 0 {
// sum += if i & 1 == 1 { mask } else { 0 };
// i >>= 1;
// mask <<= 4;
// }
// when size 256, could generate any 32 bit pattern
// let i1: u32 = Arbitrary::arbitrary(g);
// let i2: u32 = Arbitrary::arbitrary(g);
// let i3: u32 = Arbitrary::arbitrary(g);
// let i4: u32 = Arbitrary::arbitrary(g);
// let sum: u32 = (i1 << 24) | (i2 << 16) | (i3 << 8) | i4;
// println!("{:b} when {}", i4, g.size());
Bitpattern(Arbitrary::arbitrary(g))
}
fn shrink(&self) -> Box<Iterator<Item=Self>> {
match *self {
Bitpattern(x) => {
let xs = x.shrink(); // should shrink Bitpattern by clearing bits, not setting new ones
let tagged = xs //.inspect(|x|println!("{}", x))
.map(Bitpattern);
Box::new(tagged)
}
}
}
}
impl Arbitrary for Register {
fn arbitrary<G: Gen>(g: &mut G) -> Register {
let regs = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6,
Register::SR, // Register::A7, Register::SP, Register::PC
];
//println!("{}",i);
if let Some(®) = g.choose(®s) {
reg
} else {
unreachable!();
}
}
}
extern crate rand;
use itertools::{Itertools, assert_equal};
use cpu::ops::handlers::*;
use super::get_ops;
// struct OpSeq {
// mask: u32,
// matching: u32,
// current_op: u32,
// }
// impl OpSeq {
// fn new(mask: u32, matching: u32) -> OpSeq {
// OpSeq { mask: mask, matching: matching, current_op: 0 }
// }
// }
// impl Iterator for OpSeq {
// type Item = u32;
// fn next(&mut self) -> Option<u32> {
// if self.current_op == 0x10000 {
// None
// } else {
// while (self.current_op & self.mask) != self.matching && self.current_op < 0x10000 {
// self.current_op += 1;
// }
// if self.current_op == 0x10000 {
// return None;
// }
// let res = Some(self.current_op);
// self.current_op += 1;
// res
// }
// }
// }
fn opcodes(mask: u32, matching: u32) -> Vec<u16> {
(matching..0x10000u32)
.filter(|opcode| (opcode & mask) == matching)
.map(|v|v as u16).collect::<Vec<u16>>()
}
macro_rules! opcodes {
($mask:expr , $matching:expr) => {($matching..0x10000).filter(|opcode| (opcode & $mask) == $matching)}
}
#[test]
fn opcodes_from_mask_and_matching(){
let mut opseq = Vec::new();
opseq.extend(opcodes!(MASK_OUT_X_Y, OP_ABCD_8_RR));
assert_eq!(64, opseq.len());
let ops = opseq.iter().unique();
assert_eq!(64, ops.count());
if let Some(&min) = opseq.iter().min() {
assert_eq!(0b1100000100000000, min);
}
if let Some(&max) = opseq.iter().max() {
assert_eq!(0b1100111100000111, max);
}
for code in opseq.iter() {
assert_eq!(OP_ABCD_8_RR, code & OP_ABCD_8_RR);
}
}
static mut opcode_under_test: u16 = 0;
fn hammer_cores_even_addresses(rs: Vec<(Register, Bitpattern)>) -> bool {
let mem_mask = (2<<24)-2; // keep even
hammer_cores_with(mem_mask, rs)
}
fn hammer_cores(rs: Vec<(Register, Bitpattern)>) -> bool {
let mem_mask = (2<<24)-1; // allow odd
hammer_cores_with(mem_mask, rs)
}
fn hammer_cores_with(mem_mask: u32, rs: Vec<(Register, Bitpattern)>) -> bool {
let pc = 0x40;
let mem = unsafe {
[((opcode_under_test >> 8) & 0xff) as u8, (opcode_under_test & 0xff) as u8]
};
let mut musashi = Core::new_mem(pc, &mem);
const STACK_MASK:u32 = (1024-16); // keep even
musashi.inactive_ssp = 0x128;
musashi.inactive_usp = 0x256;
for r in 0..8 {
musashi.dar[r] = 0;
musashi.dar[8+r] = 0x128;
}
for r in rs {
match r {
(Register::D0, Bitpattern(bp)) => musashi.dar[0] = bp,
(Register::D1, Bitpattern(bp)) => musashi.dar[1] = bp,
(Register::D2, Bitpattern(bp)) => musashi.dar[2] = bp,
(Register::D3, Bitpattern(bp)) => musashi.dar[3] = bp,
(Register::D4, Bitpattern(bp)) => musashi.dar[4] = bp,
(Register::D5, Bitpattern(bp)) => musashi.dar[5] = bp,
(Register::D6, Bitpattern(bp)) => musashi.dar[6] = bp,
(Register::D7, Bitpattern(bp)) => musashi.dar[7] = bp,
// must ensure Addresses are within musashi memory space!
(Register::A0, Bitpattern(bp)) => musashi.dar[0+8] = bp & mem_mask,
(Register::A1, Bitpattern(bp)) => musashi.dar[1+8] = bp & mem_mask,
(Register::A2, Bitpattern(bp)) => musashi.dar[2+8] = bp & mem_mask,
(Register::A3, Bitpattern(bp)) => musashi.dar[3+8] = bp & mem_mask,
(Register::A4, Bitpattern(bp)) => musashi.dar[4+8] = bp & mem_mask,
(Register::A5, Bitpattern(bp)) => musashi.dar[5+8] = bp & mem_mask,
(Register::A6, Bitpattern(bp)) => musashi.dar[6+8] = bp & mem_mask,
(Register::A7, Bitpattern(bp)) => musashi.dar[7+8] = bp & STACK_MASK + 8,
(Register::USP, Bitpattern(bp)) => musashi.inactive_usp = bp & STACK_MASK + 8,
(Register::SR, Bitpattern(bp)) => musashi.sr_to_flags(bp as u16),
_ => {
panic!("No idea how to set {:?}", r.0)
},
}
}
let mut r68k = musashi.clone(); // so very self-aware!
let musashi_cycles = reset_and_execute1(&mut musashi);
let r68k_cycles = r68k.execute1();
let res = assert_cores_equal(&musashi, &r68k);
assert_eq!(musashi_cycles, r68k_cycles);
res
}
macro_rules! qc8 {
($opcode:ident, $fn_name:ident) => (qc!($opcode, MASK_OUT_X_Y, $fn_name, hammer_cores););
($opcode:ident, $opmask:ident, $fn_name:ident) => (qc!($opcode, $opmask, $fn_name, hammer_cores););
}
macro_rules! qc {
($opcode:ident, $fn_name:ident) => (qc!($opcode, MASK_OUT_X_Y, $fn_name, hammer_cores_even_addresses););
($opcode:ident, $opmask:ident, $fn_name:ident) => (qc!($opcode, $opmask, $fn_name, hammer_cores_even_addresses););
($opcode:ident, $opmask:ident, $fn_name:ident, $hammer:ident) => (
#[test]
#[ignore]
#[allow(unused_variables)]
fn $fn_name() {
// Musashi isn't thread safe, and the construct with opcode_under_test
// isn't either. :(
let mutex = QUICKCHECK_LOCK.lock().unwrap();
for opcode in opcodes($opmask, $opcode)
{
println!("Will hammer {:b}", opcode);
unsafe {
// this is because I don't know how to make
// hammer_cores take the opcode as a parameter and
// we cannot simply use a closure either; see
// https://github.com/BurntSushi/quickcheck/issues/56
opcode_under_test = opcode;
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 256))
.tests(10)
.quickcheck($hammer as fn(Vec<(Register, Bitpattern)>) -> bool);
}
})
}
qc8!(OP_ABCD_8_RR, qc_abcd_rr);
qc8!(OP_ABCD_8_MM, qc_abcd_mm);
qc8!(OP_ADD_8_ER_DN, qc_add_8_er_dn);
qc8!(OP_ADD_8_ER_PI, qc_add_8_er_pi);
qc8!(OP_ADD_8_ER_PD, qc_add_8_er_pd);
qc8!(OP_ADD_8_ER_AI, qc_add_8_er_ai);
qc8!(OP_ADD_8_ER_DI, qc_add_8_er_di);
qc8!(OP_ADD_8_ER_IX, qc_add_8_er_ix);
qc8!(OP_ADD_8_ER_AW, MASK_OUT_X, qc_add_8_er_aw);
qc8!(OP_ADD_8_ER_AL, MASK_OUT_X, qc_add_8_er_al);
qc8!(OP_ADD_8_ER_PCDI, MASK_OUT_X, qc_add_8_er_pcdi);
qc8!(OP_ADD_8_ER_PCIX, MASK_OUT_X, qc_add_8_er_pcix);
qc8!(OP_ADD_8_ER_IMM, MASK_OUT_X, qc_add_8_er_imm);
qc8!(OP_ADD_8_RE_PI, qc_add_8_re_pi);
qc8!(OP_ADD_8_RE_PD, qc_add_8_re_pd);
qc8!(OP_ADD_8_RE_AI, qc_add_8_re_ai);
qc8!(OP_ADD_8_RE_DI, qc_add_8_re_di);
qc8!(OP_ADD_8_RE_IX, qc_add_8_re_ix);
qc8!(OP_ADD_8_RE_AW, MASK_OUT_X, qc_add_8_re_aw);
qc8!(OP_ADD_8_RE_AL, MASK_OUT_X, qc_add_8_re_al);
qc!(OP_ADD_16_ER_DN, qc_add_16_er_dn);
qc!(OP_ADD_16_ER_AN, qc_add_16_er_an);
qc!(OP_ADD_16_ER_PI, qc_add_16_er_pi);
qc!(OP_ADD_16_ER_PD, qc_add_16_er_pd);
qc!(OP_ADD_16_ER_AI, qc_add_16_er_ai);
qc!(OP_ADD_16_ER_DI, qc_add_16_er_di);
qc!(OP_ADD_16_ER_IX, qc_add_16_er_ix);
qc!(OP_ADD_16_ER_AW, MASK_OUT_X, qc_add_16_er_aw);
qc!(OP_ADD_16_ER_AL, MASK_OUT_X, qc_add_16_er_al);
qc!(OP_ADD_16_ER_PCDI, MASK_OUT_X, qc_add_16_er_pcdi);
qc!(OP_ADD_16_ER_PCIX, MASK_OUT_X, qc_add_16_er_pcix);
qc!(OP_ADD_16_ER_IMM, MASK_OUT_X, qc_add_16_er_imm);
qc!(OP_ADD_16_RE_PI, qc_add_16_re_pi);
qc!(OP_ADD_16_RE_PD, qc_add_16_re_pd);
qc!(OP_ADD_16_RE_AI, qc_add_16_re_ai);
qc!(OP_ADD_16_RE_DI, qc_add_16_re_di);
qc!(OP_ADD_16_RE_IX, qc_add_16_re_ix);
qc!(OP_ADD_16_RE_AW, MASK_OUT_X, qc_add_16_re_aw);
qc!(OP_ADD_16_RE_AL, MASK_OUT_X, qc_add_16_re_al);
qc!(OP_ADD_32_ER_DN, qc_add_32_er_dn);
qc!(OP_ADD_32_ER_AN, qc_add_32_er_an);
qc!(OP_ADD_32_ER_PI, qc_add_32_er_pi);
qc!(OP_ADD_32_ER_PD, qc_add_32_er_pd);
qc!(OP_ADD_32_ER_AI, qc_add_32_er_ai);
qc!(OP_ADD_32_ER_DI, qc_add_32_er_di);
qc!(OP_ADD_32_ER_IX, qc_add_32_er_ix);
qc!(OP_ADD_32_ER_AW, MASK_OUT_X, qc_add_32_er_aw);
qc!(OP_ADD_32_ER_AL, MASK_OUT_X, qc_add_32_er_al);
qc!(OP_ADD_32_ER_PCDI, MASK_OUT_X, qc_add_32_er_pcdi);
qc!(OP_ADD_32_ER_PCIX, MASK_OUT_X, qc_add_32_er_pcix);
qc!(OP_ADD_32_ER_IMM, MASK_OUT_X, qc_add_32_er_imm);
qc!(OP_ADD_32_RE_PI, qc_add_32_re_pi);
qc!(OP_ADD_32_RE_PD, qc_add_32_re_pd);
qc!(OP_ADD_32_RE_AI, qc_add_32_re_ai);
qc!(OP_ADD_32_RE_DI, qc_add_32_re_di);
qc!(OP_ADD_32_RE_IX, qc_add_32_re_ix);
qc!(OP_ADD_32_RE_AW, MASK_OUT_X, qc_add_32_re_aw);
qc!(OP_ADD_32_RE_AL, MASK_OUT_X, qc_add_32_re_al);
qc!(OP_ADDA_16_DN, qc_adda_16_dn);
qc!(OP_ADDA_16_AN, qc_adda_16_an);
qc!(OP_ADDA_16_PI, qc_adda_16_pi);
qc!(OP_ADDA_16_PD, qc_adda_16_pd);
qc!(OP_ADDA_16_AI, qc_adda_16_ai);
qc!(OP_ADDA_16_DI, qc_adda_16_di);
qc!(OP_ADDA_16_IX, qc_adda_16_ix);
qc!(OP_ADDA_16_AW, MASK_OUT_X, qc_adda_16_aw);
qc!(OP_ADDA_16_AL, MASK_OUT_X, qc_adda_16_al);
qc!(OP_ADDA_16_PCDI, MASK_OUT_X, qc_adda_16_pcdi);
qc!(OP_ADDA_16_PCIX, MASK_OUT_X, qc_adda_16_pcix);
qc!(OP_ADDA_16_IMM, MASK_OUT_X, qc_adda_16_imm);
qc!(OP_ADDA_32_DN, qc_adda_32_dn);
qc!(OP_ADDA_32_AN, qc_adda_32_an);
qc!(OP_ADDA_32_PI, qc_adda_32_pi);
qc!(OP_ADDA_32_PD, qc_adda_32_pd);
qc!(OP_ADDA_32_AI, qc_adda_32_ai);
qc!(OP_ADDA_32_DI, qc_adda_32_di);
qc!(OP_ADDA_32_IX, qc_adda_32_ix);
qc!(OP_ADDA_32_AW, MASK_OUT_X, qc_adda_32_aw);
qc!(OP_ADDA_32_AL, MASK_OUT_X, qc_adda_32_al);
qc!(OP_ADDA_32_PCDI, MASK_OUT_X, qc_adda_32_pcdi);
qc!(OP_ADDA_32_PCIX, MASK_OUT_X, qc_adda_32_pcix);
qc!(OP_ADDA_32_IMM, MASK_OUT_X, qc_adda_32_imm);
qc8!(OP_ADDI_8_DN, MASK_OUT_Y, qc_addi_8_dn);
qc8!(OP_ADDI_8_PI, MASK_OUT_Y, qc_addi_8_pi);
qc8!(OP_ADDI_8_PD, MASK_OUT_Y, qc_addi_8_pd);
qc8!(OP_ADDI_8_AI, MASK_OUT_Y, qc_addi_8_ai);
qc8!(OP_ADDI_8_DI, MASK_OUT_Y, qc_addi_8_di);
qc8!(OP_ADDI_8_IX, MASK_OUT_Y, qc_addi_8_ix);
qc8!(OP_ADDI_8_AW, MASK_EXACT, qc_addi_8_aw);
qc8!(OP_ADDI_8_AL, MASK_EXACT, qc_addi_8_al);
qc!(OP_ADDI_16_DN, MASK_OUT_Y, qc_addi_16_dn);
qc!(OP_ADDI_16_PI, MASK_OUT_Y, qc_addi_16_pi);
qc!(OP_ADDI_16_PD, MASK_OUT_Y, qc_addi_16_pd);
qc!(OP_ADDI_16_AI, MASK_OUT_Y, qc_addi_16_ai);
qc!(OP_ADDI_16_DI, MASK_OUT_Y, qc_addi_16_di);
qc!(OP_ADDI_16_IX, MASK_OUT_Y, qc_addi_16_ix);
qc!(OP_ADDI_16_AW, MASK_EXACT, qc_addi_16_aw);
qc!(OP_ADDI_16_AL, MASK_EXACT, qc_addi_16_al);
qc!(OP_ADDI_32_DN, MASK_OUT_Y, qc_addi_32_dn);
qc!(OP_ADDI_32_PI, MASK_OUT_Y, qc_addi_32_pi);
qc!(OP_ADDI_32_PD, MASK_OUT_Y, qc_addi_32_pd);
qc!(OP_ADDI_32_AI, MASK_OUT_Y, qc_addi_32_ai);
qc!(OP_ADDI_32_DI, MASK_OUT_Y, qc_addi_32_di);
qc!(OP_ADDI_32_IX, MASK_OUT_Y, qc_addi_32_ix);
qc!(OP_ADDI_32_AW, MASK_EXACT, qc_addi_32_aw);
qc!(OP_ADDI_32_AL, MASK_EXACT, qc_addi_32_al);
qc8!(OP_ADDQ_8_DN, qc_addq_8_dn);
qc8!(OP_ADDQ_8_PI, qc_addq_8_pi);
qc8!(OP_ADDQ_8_PD, qc_addq_8_pd);
qc8!(OP_ADDQ_8_AI, qc_addq_8_ai);
qc8!(OP_ADDQ_8_DI, qc_addq_8_di);
qc8!(OP_ADDQ_8_IX, qc_addq_8_ix);
qc8!(OP_ADDQ_8_AW, MASK_OUT_X, qc_addq_8_aw);
qc8!(OP_ADDQ_8_AL, MASK_OUT_X, qc_addq_8_al);
qc!(OP_ADDQ_16_DN, qc_addq_16_dn);
qc!(OP_ADDQ_16_AN, qc_addq_16_an);
qc!(OP_ADDQ_16_PI, qc_addq_16_pi);
qc!(OP_ADDQ_16_PD, qc_addq_16_pd);
qc!(OP_ADDQ_16_AI, qc_addq_16_ai);
qc!(OP_ADDQ_16_DI, qc_addq_16_di);
qc!(OP_ADDQ_16_IX, qc_addq_16_ix);
qc!(OP_ADDQ_16_AW, MASK_OUT_X, qc_addq_16_aw);
qc!(OP_ADDQ_16_AL, MASK_OUT_X, qc_addq_16_al);
qc!(OP_ADDQ_32_DN, qc_addq_32_dn);
qc!(OP_ADDQ_32_AN, qc_addq_32_an);
qc!(OP_ADDQ_32_PI, qc_addq_32_pi);
qc!(OP_ADDQ_32_PD, qc_addq_32_pd);
qc!(OP_ADDQ_32_AI, qc_addq_32_ai);
qc!(OP_ADDQ_32_DI, qc_addq_32_di);
qc!(OP_ADDQ_32_IX, qc_addq_32_ix);
qc!(OP_ADDQ_32_AW, MASK_OUT_X, qc_addq_32_aw);
qc!(OP_ADDQ_32_AL, MASK_OUT_X, qc_addq_32_al);
qc8!(OP_ADDX_8_RR, qc_addx_8_rr);
qc8!(OP_ADDX_8_MM, qc_addx_8_mm);
qc!(OP_ADDX_16_RR, qc_addx_16_rr);
qc!(OP_ADDX_16_MM, qc_addx_16_mm);
qc!(OP_ADDX_32_RR, qc_addx_32_rr);
qc!(OP_ADDX_32_MM, qc_addx_32_mm);
qc8!(OP_AND_8_ER_DN, qc_and_8_er_dn);
qc8!(OP_AND_8_ER_PI, qc_and_8_er_pi);
qc8!(OP_AND_8_ER_PD, qc_and_8_er_pd);
qc8!(OP_AND_8_ER_AI, qc_and_8_er_ai);
qc8!(OP_AND_8_ER_DI, qc_and_8_er_di);
qc8!(OP_AND_8_ER_IX, qc_and_8_er_ix);
qc8!(OP_AND_8_ER_AW, MASK_OUT_X, qc_and_8_er_aw);
qc8!(OP_AND_8_ER_AL, MASK_OUT_X, qc_and_8_er_al);
qc8!(OP_AND_8_ER_PCDI, MASK_OUT_X, qc_and_8_er_pcdi);
qc8!(OP_AND_8_ER_PCIX, MASK_OUT_X, qc_and_8_er_pcix);
qc8!(OP_AND_8_ER_IMM, MASK_OUT_X, qc_and_8_er_imm);
qc8!(OP_AND_8_RE_PI, qc_and_8_re_pi);
qc8!(OP_AND_8_RE_PD, qc_and_8_re_pd);
qc8!(OP_AND_8_RE_AI, qc_and_8_re_ai);
qc8!(OP_AND_8_RE_DI, qc_and_8_re_di);
qc8!(OP_AND_8_RE_IX, qc_and_8_re_ix);
qc8!(OP_AND_8_RE_AW, MASK_OUT_X, qc_and_8_re_aw);
qc8!(OP_AND_8_RE_AL, MASK_OUT_X, qc_and_8_re_al);
qc!(OP_AND_16_ER_DN, qc_and_16_er_dn);
qc!(OP_AND_16_ER_PI, qc_and_16_er_pi);
qc!(OP_AND_16_ER_PD, qc_and_16_er_pd);
qc!(OP_AND_16_ER_AI, qc_and_16_er_ai);
qc!(OP_AND_16_ER_DI, qc_and_16_er_di);
qc!(OP_AND_16_ER_IX, qc_and_16_er_ix);
qc!(OP_AND_16_ER_AW, MASK_OUT_X, qc_and_16_er_aw);
qc!(OP_AND_16_ER_AL, MASK_OUT_X, qc_and_16_er_al);
qc!(OP_AND_16_ER_PCDI, MASK_OUT_X, qc_and_16_er_pcdi);
qc!(OP_AND_16_ER_PCIX, MASK_OUT_X, qc_and_16_er_pcix);
qc!(OP_AND_16_ER_IMM, MASK_OUT_X, qc_and_16_er_imm);
qc!(OP_AND_16_RE_PI, qc_and_16_re_pi);
qc!(OP_AND_16_RE_PD, qc_and_16_re_pd);
qc!(OP_AND_16_RE_AI, qc_and_16_re_ai);
qc!(OP_AND_16_RE_DI, qc_and_16_re_di);
qc!(OP_AND_16_RE_IX, qc_and_16_re_ix);
qc!(OP_AND_16_RE_AW, MASK_OUT_X, qc_and_16_re_aw);
qc!(OP_AND_16_RE_AL, MASK_OUT_X, qc_and_16_re_al);
qc!(OP_AND_32_ER_DN, qc_and_32_er_dn);
qc!(OP_AND_32_ER_PI, qc_and_32_er_pi);
qc!(OP_AND_32_ER_PD, qc_and_32_er_pd);
qc!(OP_AND_32_ER_AI, qc_and_32_er_ai);
qc!(OP_AND_32_ER_DI, qc_and_32_er_di);
qc!(OP_AND_32_ER_IX, qc_and_32_er_ix);
qc!(OP_AND_32_ER_AW, MASK_OUT_X, qc_and_32_er_aw);
qc!(OP_AND_32_ER_AL, MASK_OUT_X, qc_and_32_er_al);
qc!(OP_AND_32_ER_PCDI, MASK_OUT_X, qc_and_32_er_pcdi);
qc!(OP_AND_32_ER_PCIX, MASK_OUT_X, qc_and_32_er_pcix);
qc!(OP_AND_32_ER_IMM, MASK_OUT_X, qc_and_32_er_imm);
qc!(OP_AND_32_RE_PI, qc_and_32_re_pi);
qc!(OP_AND_32_RE_PD, qc_and_32_re_pd);
qc!(OP_AND_32_RE_AI, qc_and_32_re_ai);
qc!(OP_AND_32_RE_DI, qc_and_32_re_di);
qc!(OP_AND_32_RE_IX, qc_and_32_re_ix);
qc!(OP_AND_32_RE_AW, MASK_OUT_X, qc_and_32_re_aw);
qc!(OP_AND_32_RE_AL, MASK_OUT_X, qc_and_32_re_al);
qc8!(OP_ANDI_8_DN, MASK_OUT_Y, qc_andi_8_dn);
qc8!(OP_ANDI_8_PI, MASK_OUT_Y, qc_andi_8_pi);
qc8!(OP_ANDI_8_PD, MASK_OUT_Y, qc_andi_8_pd);
qc8!(OP_ANDI_8_AI, MASK_OUT_Y, qc_andi_8_ai);
qc8!(OP_ANDI_8_DI, MASK_OUT_Y, qc_andi_8_di);
qc8!(OP_ANDI_8_IX, MASK_OUT_Y, qc_andi_8_ix);
qc8!(OP_ANDI_8_AW, MASK_EXACT, qc_andi_8_aw);
qc8!(OP_ANDI_8_AL, MASK_EXACT, qc_andi_8_al);
qc!(OP_ANDI_16_DN, MASK_OUT_Y, qc_andi_16_dn);
qc!(OP_ANDI_16_PI, MASK_OUT_Y, qc_andi_16_pi);
qc!(OP_ANDI_16_PD, MASK_OUT_Y, qc_andi_16_pd);
qc!(OP_ANDI_16_AI, MASK_OUT_Y, qc_andi_16_ai);
qc!(OP_ANDI_16_DI, MASK_OUT_Y, qc_andi_16_di);
qc!(OP_ANDI_16_IX, MASK_OUT_Y, qc_andi_16_ix);
qc!(OP_ANDI_16_AW, MASK_EXACT, qc_andi_16_aw);
qc!(OP_ANDI_16_AL, MASK_EXACT, qc_andi_16_al);
qc!(OP_ANDI_32_DN, MASK_OUT_Y, qc_andi_32_dn);
qc!(OP_ANDI_32_PI, MASK_OUT_Y, qc_andi_32_pi);
qc!(OP_ANDI_32_PD, MASK_OUT_Y, qc_andi_32_pd);
qc!(OP_ANDI_32_AI, MASK_OUT_Y, qc_andi_32_ai);
qc!(OP_ANDI_32_DI, MASK_OUT_Y, qc_andi_32_di);
qc!(OP_ANDI_32_IX, MASK_OUT_Y, qc_andi_32_ix);
qc!(OP_ANDI_32_AW, MASK_EXACT, qc_andi_32_aw);
qc!(OP_ANDI_32_AL, MASK_EXACT, qc_andi_32_al);
qc!(OP_ANDI_16_TOC, MASK_EXACT, qc_andi_16_toc);
qc!(OP_ANDI_16_TOS, MASK_EXACT, qc_andi_16_tos);
qc8!(OP_ASR_8_S, MASK_OUT_X_Y, qc_asr_8_s);
qc!(OP_ASR_16_S, MASK_OUT_X_Y, qc_asr_16_s);
qc!(OP_ASR_32_S, MASK_OUT_X_Y, qc_asr_32_s);
qc8!(OP_ASR_8_R, MASK_OUT_X_Y, qc_asr_8_r);
qc!(OP_ASR_16_R, MASK_OUT_X_Y, qc_asr_16_r);
qc!(OP_ASR_32_R, MASK_OUT_X_Y, qc_asr_32_r);
qc8!(OP_ASL_8_S, MASK_OUT_X_Y, qc_asl_8_s);
qc!(OP_ASL_16_S, MASK_OUT_X_Y, qc_asl_16_s);
qc!(OP_ASL_32_S, MASK_OUT_X_Y, qc_asl_32_s);
qc8!(OP_ASL_8_R, MASK_OUT_X_Y, qc_asl_8_r);
qc!(OP_ASL_16_R, MASK_OUT_X_Y, qc_asl_16_r);
qc!(OP_ASL_32_R, MASK_OUT_X_Y, qc_asl_32_r);
qc!(OP_ASL_16_AI, MASK_OUT_Y, qc_asl_16_ai);
qc!(OP_ASL_16_PI, MASK_OUT_Y, qc_asl_16_pi);
qc!(OP_ASL_16_PD, MASK_OUT_Y, qc_asl_16_pd);
qc!(OP_ASL_16_DI, MASK_OUT_Y, qc_asl_16_di);
qc!(OP_ASL_16_IX, MASK_OUT_Y, qc_asl_16_ix);
qc!(OP_ASL_16_AW, MASK_EXACT, qc_asl_16_aw);
qc!(OP_ASL_16_AL, MASK_EXACT, qc_asl_16_al);
qc!(OP_ASR_16_AI, MASK_OUT_Y, qc_asr_16_ai);
qc!(OP_ASR_16_PI, MASK_OUT_Y, qc_asr_16_pi);
qc!(OP_ASR_16_PD, MASK_OUT_Y, qc_asr_16_pd);
qc!(OP_ASR_16_DI, MASK_OUT_Y, qc_asr_16_di);
qc!(OP_ASR_16_IX, MASK_OUT_Y, qc_asr_16_ix);
qc!(OP_ASR_16_AW, MASK_EXACT, qc_asr_16_aw);
qc!(OP_ASR_16_AL, MASK_EXACT, qc_asr_16_al);
const MASK_LOBYTE_QUICKER: u32 = MASK_LOBYTE + 0xe0;
qc8!(OP_BHI_8, MASK_LOBYTE_QUICKER, qc_bhi_8);
qc8!(OP_BLS_8, MASK_LOBYTE_QUICKER, qc_bls_8);
qc8!(OP_BCC_8, MASK_LOBYTE_QUICKER, qc_bcc_8);
qc8!(OP_BCS_8, MASK_LOBYTE_QUICKER, qc_bcs_8);
qc8!(OP_BNE_8, MASK_LOBYTE_QUICKER, qc_bne_8);
qc8!(OP_BEQ_8, MASK_LOBYTE_QUICKER, qc_beq_8);
qc8!(OP_BVC_8, MASK_LOBYTE_QUICKER, qc_bvc_8);
qc8!(OP_BVS_8, MASK_LOBYTE_QUICKER, qc_bvs_8);
qc8!(OP_BPL_8, MASK_LOBYTE_QUICKER, qc_bpl_8);
qc8!(OP_BMI_8, MASK_LOBYTE_QUICKER, qc_bmi_8);
qc8!(OP_BGE_8, MASK_LOBYTE_QUICKER, qc_bge_8);
qc8!(OP_BLT_8, MASK_LOBYTE_QUICKER, qc_blt_8);
qc8!(OP_BGT_8, MASK_LOBYTE_QUICKER, qc_bgt_8);
qc8!(OP_BLE_8, MASK_LOBYTE_QUICKER, qc_ble_8);
qc8!(OP_BRA_8, MASK_LOBYTE_QUICKER, qc_bra_8);
qc8!(OP_BSR_8, MASK_LOBYTE_QUICKER, qc_bsr_8);
qc!(OP_BHI_16, MASK_EXACT, qc_bhi_16);
qc!(OP_BLS_16, MASK_EXACT, qc_bls_16);
qc!(OP_BCC_16, MASK_EXACT, qc_bcc_16);
qc!(OP_BCS_16, MASK_EXACT, qc_bcs_16);
qc!(OP_BNE_16, MASK_EXACT, qc_bne_16);
qc!(OP_BEQ_16, MASK_EXACT, qc_beq_16);
qc!(OP_BVC_16, MASK_EXACT, qc_bvc_16);
qc!(OP_BVS_16, MASK_EXACT, qc_bvs_16);
qc!(OP_BPL_16, MASK_EXACT, qc_bpl_16);
qc!(OP_BMI_16, MASK_EXACT, qc_bmi_16);
qc!(OP_BGE_16, MASK_EXACT, qc_bge_16);
qc!(OP_BLT_16, MASK_EXACT, qc_blt_16);
qc!(OP_BGT_16, MASK_EXACT, qc_bgt_16);
qc!(OP_BLE_16, MASK_EXACT, qc_ble_16);
qc!(OP_BRA_16, MASK_EXACT, qc_bra_16);
qc!(OP_BSR_16, MASK_EXACT, qc_bsr_16);
qc!(OP_BCHG_32_R_DN, MASK_OUT_X_Y, qc_bchg_32_r_dn);
qc!(OP_BCHG_32_S_DN, MASK_OUT_Y, qc_bchg_32_s_dn);
qc8!(OP_BCHG_8_R_AI, MASK_OUT_X_Y, qc_bchg_8_r_ai);
qc8!(OP_BCHG_8_R_PI, MASK_OUT_X_Y, qc_bchg_8_r_pi);
qc8!(OP_BCHG_8_R_PD, MASK_OUT_X_Y, qc_bchg_8_r_pd);
qc8!(OP_BCHG_8_R_DI, MASK_OUT_X_Y, qc_bchg_8_r_di);
qc8!(OP_BCHG_8_R_IX, MASK_OUT_X_Y, qc_bchg_8_r_ix);
qc8!(OP_BCHG_8_R_AW, MASK_OUT_X, qc_bchg_8_r_aw);
qc8!(OP_BCHG_8_R_AL, MASK_OUT_X, qc_bchg_8_r_al);
qc8!(OP_BCHG_8_S_AI, MASK_OUT_Y, qc_bchg_8_s_ai);
qc8!(OP_BCHG_8_S_PI, MASK_OUT_Y, qc_bchg_8_s_pi);
qc8!(OP_BCHG_8_S_PD, MASK_OUT_Y, qc_bchg_8_s_pd);
qc8!(OP_BCHG_8_S_DI, MASK_OUT_Y, qc_bchg_8_s_di);
qc8!(OP_BCHG_8_S_IX, MASK_OUT_Y, qc_bchg_8_s_ix);
qc8!(OP_BCHG_8_S_AW, MASK_EXACT, qc_bchg_8_s_aw);
qc8!(OP_BCHG_8_S_AL, MASK_EXACT, qc_bchg_8_s_al);
qc!(OP_BCLR_32_R_DN, MASK_OUT_X_Y, qc_bclr_32_r_dn);
qc!(OP_BCLR_32_S_DN, MASK_OUT_Y, qc_bclr_32_s_dn);
qc8!(OP_BCLR_8_R_AI, MASK_OUT_X_Y, qc_bclr_8_r_ai);
qc8!(OP_BCLR_8_R_PI, MASK_OUT_X_Y, qc_bclr_8_r_pi);
qc8!(OP_BCLR_8_R_PD, MASK_OUT_X_Y, qc_bclr_8_r_pd);
qc8!(OP_BCLR_8_R_DI, MASK_OUT_X_Y, qc_bclr_8_r_di);
qc8!(OP_BCLR_8_R_IX, MASK_OUT_X_Y, qc_bclr_8_r_ix);
qc8!(OP_BCLR_8_R_AW, MASK_OUT_X, qc_bclr_8_r_aw);
qc8!(OP_BCLR_8_R_AL, MASK_OUT_X, qc_bclr_8_r_al);
qc8!(OP_BCLR_8_S_AI, MASK_OUT_Y, qc_bclr_8_s_ai);
qc8!(OP_BCLR_8_S_PI, MASK_OUT_Y, qc_bclr_8_s_pi);
qc8!(OP_BCLR_8_S_PD, MASK_OUT_Y, qc_bclr_8_s_pd);
qc8!(OP_BCLR_8_S_DI, MASK_OUT_Y, qc_bclr_8_s_di);
qc8!(OP_BCLR_8_S_IX, MASK_OUT_Y, qc_bclr_8_s_ix);
qc8!(OP_BCLR_8_S_AW, MASK_EXACT, qc_bclr_8_s_aw);
qc8!(OP_BCLR_8_S_AL, MASK_EXACT, qc_bclr_8_s_al);
qc!(OP_BSET_32_R_DN, MASK_OUT_X_Y, qc_bset_32_r_dn);
qc!(OP_BSET_32_S_DN, MASK_OUT_Y, qc_bset_32_s_dn);
qc8!(OP_BSET_8_R_AI, MASK_OUT_X_Y, qc_bset_8_r_ai);
qc8!(OP_BSET_8_R_PI, MASK_OUT_X_Y, qc_bset_8_r_pi);
qc8!(OP_BSET_8_R_PD, MASK_OUT_X_Y, qc_bset_8_r_pd);
qc8!(OP_BSET_8_R_DI, MASK_OUT_X_Y, qc_bset_8_r_di);
qc8!(OP_BSET_8_R_IX, MASK_OUT_X_Y, qc_bset_8_r_ix);
qc8!(OP_BSET_8_R_AW, MASK_OUT_X, qc_bset_8_r_aw);
qc8!(OP_BSET_8_R_AL, MASK_OUT_X, qc_bset_8_r_al);
qc8!(OP_BSET_8_S_AI, MASK_OUT_Y, qc_bset_8_s_ai);
qc8!(OP_BSET_8_S_PI, MASK_OUT_Y, qc_bset_8_s_pi);
qc8!(OP_BSET_8_S_PD, MASK_OUT_Y, qc_bset_8_s_pd);
qc8!(OP_BSET_8_S_DI, MASK_OUT_Y, qc_bset_8_s_di);
qc8!(OP_BSET_8_S_IX, MASK_OUT_Y, qc_bset_8_s_ix);
qc8!(OP_BSET_8_S_AW, MASK_EXACT, qc_bset_8_s_aw);
qc8!(OP_BSET_8_S_AL, MASK_EXACT, qc_bset_8_s_al);
qc!(OP_BTST_32_R_DN, MASK_OUT_X_Y, qc_btst_32_r_dn);
qc!(OP_BTST_32_S_DN, MASK_OUT_Y, qc_btst_32_s_dn);
qc8!(OP_BTST_8_R_AI, MASK_OUT_X_Y, qc_btst_8_r_ai);
qc8!(OP_BTST_8_R_PI, MASK_OUT_X_Y, qc_btst_8_r_pi);
qc8!(OP_BTST_8_R_PD, MASK_OUT_X_Y, qc_btst_8_r_pd);
qc8!(OP_BTST_8_R_DI, MASK_OUT_X_Y, qc_btst_8_r_di);
qc8!(OP_BTST_8_R_IX, MASK_OUT_X_Y, qc_btst_8_r_ix);
qc8!(OP_BTST_8_R_AW, MASK_OUT_X, qc_btst_8_r_aw);
qc8!(OP_BTST_8_R_AL, MASK_OUT_X, qc_btst_8_r_al);
qc8!(OP_BTST_8_S_AI, MASK_OUT_Y, qc_btst_8_s_ai);
qc8!(OP_BTST_8_S_PI, MASK_OUT_Y, qc_btst_8_s_pi);
qc8!(OP_BTST_8_S_PD, MASK_OUT_Y, qc_btst_8_s_pd);
qc8!(OP_BTST_8_S_DI, MASK_OUT_Y, qc_btst_8_s_di);
qc8!(OP_BTST_8_S_IX, MASK_OUT_Y, qc_btst_8_s_ix);
qc8!(OP_BTST_8_S_AW, MASK_EXACT, qc_btst_8_s_aw);
qc8!(OP_BTST_8_S_AL, MASK_EXACT, qc_btst_8_s_al);
qc!(OP_CHK_16_AI, MASK_OUT_X_Y, qc_chk_16_ai);
qc!(OP_CHK_16_AL, MASK_OUT_X, qc_chk_16_al);
qc!(OP_CHK_16_AW, MASK_OUT_X, qc_chk_16_aw);
qc!(OP_CHK_16_DN, MASK_OUT_X_Y, qc_chk_16_dn);
qc!(OP_CHK_16_DI, MASK_OUT_X_Y, qc_chk_16_di);
qc!(OP_CHK_16_IMM, MASK_OUT_X, qc_chk_16_imm);
qc!(OP_CHK_16_IX, MASK_OUT_X_Y, qc_chk_16_ix);
qc!(OP_CHK_16_PCDI, MASK_OUT_X, qc_chk_16_pcdi);
qc!(OP_CHK_16_PCIX, MASK_OUT_X, qc_chk_16_pcix);
qc!(OP_CHK_16_PD, MASK_OUT_X_Y, qc_chk_16_pd);
qc!(OP_CHK_16_PI, MASK_OUT_X_Y, qc_chk_16_pi);
qc8!(OP_CLR_8_DN, MASK_OUT_Y, qc_clr_8_dn);
qc8!(OP_CLR_8_AI, MASK_OUT_Y, qc_clr_8_ai);
qc8!(OP_CLR_8_PI, MASK_OUT_Y, qc_clr_8_pi);
qc8!(OP_CLR_8_PD, MASK_OUT_Y, qc_clr_8_pd);
qc8!(OP_CLR_8_DI, MASK_OUT_Y, qc_clr_8_di);
qc8!(OP_CLR_8_IX, MASK_OUT_Y, qc_clr_8_ix);
qc8!(OP_CLR_8_AW, MASK_EXACT, qc_clr_8_aw);
qc8!(OP_CLR_8_AL, MASK_EXACT, qc_clr_8_al);
qc!(OP_CLR_16_DN, MASK_OUT_Y, qc_clr_16_dn);
qc!(OP_CLR_16_AI, MASK_OUT_Y, qc_clr_16_ai);
qc!(OP_CLR_16_PI, MASK_OUT_Y, qc_clr_16_pi);
qc!(OP_CLR_16_PD, MASK_OUT_Y, qc_clr_16_pd);
qc!(OP_CLR_16_DI, MASK_OUT_Y, qc_clr_16_di);
qc!(OP_CLR_16_IX, MASK_OUT_Y, qc_clr_16_ix);
qc!(OP_CLR_16_AW, MASK_EXACT, qc_clr_16_aw);
qc!(OP_CLR_16_AL, MASK_EXACT, qc_clr_16_al);
qc!(OP_CLR_32_DN, MASK_OUT_Y, qc_clr_32_dn);
qc!(OP_CLR_32_AI, MASK_OUT_Y, qc_clr_32_ai);
qc!(OP_CLR_32_PI, MASK_OUT_Y, qc_clr_32_pi);
qc!(OP_CLR_32_PD, MASK_OUT_Y, qc_clr_32_pd);
qc!(OP_CLR_32_DI, MASK_OUT_Y, qc_clr_32_di);
qc!(OP_CLR_32_IX, MASK_OUT_Y, qc_clr_32_ix);
qc!(OP_CLR_32_AW, MASK_EXACT, qc_clr_32_aw);
qc!(OP_CLR_32_AL, MASK_EXACT, qc_clr_32_al);
qc8!(OP_CMP_8_DN, MASK_OUT_X_Y, qc_cmp_8_dn);
qc8!(OP_CMP_8_AI, MASK_OUT_X_Y, qc_cmp_8_ai);
qc8!(OP_CMP_8_PI, MASK_OUT_X_Y, qc_cmp_8_pi);
qc8!(OP_CMP_8_PD, MASK_OUT_X_Y, qc_cmp_8_pd);
qc8!(OP_CMP_8_DI, MASK_OUT_X_Y, qc_cmp_8_di);
qc8!(OP_CMP_8_IX, MASK_OUT_X_Y, qc_cmp_8_ix);
qc8!(OP_CMP_8_AW, MASK_OUT_X, qc_cmp_8_aw);
qc8!(OP_CMP_8_AL, MASK_OUT_X, qc_cmp_8_al);
qc8!(OP_CMP_8_PCDI, MASK_OUT_Y, qc_cmp_8_pcdi);
qc8!(OP_CMP_8_PCIX, MASK_OUT_Y, qc_cmp_8_pcix);
qc8!(OP_CMP_8_IMM, MASK_OUT_X, qc_cmp_8_imm);
qc!(OP_CMP_16_DN, MASK_OUT_X_Y, qc_cmp_16_dn);
qc!(OP_CMP_16_AN, MASK_OUT_X_Y, qc_cmp_16_an);
qc!(OP_CMP_16_AI, MASK_OUT_X_Y, qc_cmp_16_ai);
qc!(OP_CMP_16_PI, MASK_OUT_X_Y, qc_cmp_16_pi);
qc!(OP_CMP_16_PD, MASK_OUT_X_Y, qc_cmp_16_pd);
qc!(OP_CMP_16_DI, MASK_OUT_X_Y, qc_cmp_16_di);
qc!(OP_CMP_16_IX, MASK_OUT_X_Y, qc_cmp_16_ix);
qc!(OP_CMP_16_AW, MASK_OUT_X, qc_cmp_16_aw);
qc!(OP_CMP_16_AL, MASK_OUT_X, qc_cmp_16_al);
qc!(OP_CMP_16_PCDI, MASK_OUT_X, qc_cmp_16_pcdi);
qc!(OP_CMP_16_PCIX, MASK_OUT_X, qc_cmp_16_pcix);
qc!(OP_CMP_16_IMM, MASK_OUT_X, qc_cmp_16_imm);
qc!(OP_CMP_32_DN, MASK_OUT_X_Y, qc_cmp_32_dn);
qc!(OP_CMP_32_AN, MASK_OUT_X_Y, qc_cmp_32_an);
qc!(OP_CMP_32_AI, MASK_OUT_X_Y, qc_cmp_32_ai);
qc!(OP_CMP_32_PI, MASK_OUT_X_Y, qc_cmp_32_pi);
qc!(OP_CMP_32_PD, MASK_OUT_X_Y, qc_cmp_32_pd);
qc!(OP_CMP_32_DI, MASK_OUT_X_Y, qc_cmp_32_di);
qc!(OP_CMP_32_IX, MASK_OUT_X_Y, qc_cmp_32_ix);
qc!(OP_CMP_32_AW, MASK_OUT_X, qc_cmp_32_aw);
qc!(OP_CMP_32_AL, MASK_OUT_X, qc_cmp_32_al);
qc!(OP_CMP_32_PCDI, MASK_OUT_X, qc_cmp_32_pcdi);
qc!(OP_CMP_32_PCIX, MASK_OUT_X, qc_cmp_32_pcix);
qc!(OP_CMP_32_IMM, MASK_OUT_X, qc_cmp_32_imm);
qc!(OP_CMPA_16_DN, qc_cmpa_16_dn);
qc!(OP_CMPA_16_AN, qc_cmpa_16_an);
qc!(OP_CMPA_16_PI, qc_cmpa_16_pi);
qc!(OP_CMPA_16_PD, qc_cmpa_16_pd);
qc!(OP_CMPA_16_AI, qc_cmpa_16_ai);
qc!(OP_CMPA_16_DI, qc_cmpa_16_di);
qc!(OP_CMPA_16_IX, qc_cmpa_16_ix);
qc!(OP_CMPA_16_AW, MASK_OUT_X, qc_cmpa_16_aw);
qc!(OP_CMPA_16_AL, MASK_OUT_X, qc_cmpa_16_al);
qc!(OP_CMPA_16_PCDI, MASK_OUT_X, qc_cmpa_16_pcdi);
qc!(OP_CMPA_16_PCIX, MASK_OUT_X, qc_cmpa_16_pcix);
qc!(OP_CMPA_16_IMM, MASK_OUT_X, qc_cmpa_16_imm);
qc!(OP_CMPA_32_DN, qc_cmpa_32_dn);
qc!(OP_CMPA_32_AN, qc_cmpa_32_an);
qc!(OP_CMPA_32_PI, qc_cmpa_32_pi);
qc!(OP_CMPA_32_PD, qc_cmpa_32_pd);
qc!(OP_CMPA_32_AI, qc_cmpa_32_ai);
qc!(OP_CMPA_32_DI, qc_cmpa_32_di);
qc!(OP_CMPA_32_IX, qc_cmpa_32_ix);
qc!(OP_CMPA_32_AW, MASK_OUT_X, qc_cmpa_32_aw);
qc!(OP_CMPA_32_AL, MASK_OUT_X, qc_cmpa_32_al);
qc!(OP_CMPA_32_PCDI, MASK_OUT_X, qc_cmpa_32_pcdi);
qc!(OP_CMPA_32_PCIX, MASK_OUT_X, qc_cmpa_32_pcix);
qc!(OP_CMPA_32_IMM, MASK_OUT_X, qc_cmpa_32_imm);
qc8!(OP_CMPI_8_DN, MASK_OUT_Y, qc_cmpi_8_dn);
qc8!(OP_CMPI_8_AI, MASK_OUT_Y, qc_cmpi_8_ai);
qc8!(OP_CMPI_8_PI, MASK_OUT_Y, qc_cmpi_8_pi);
qc8!(OP_CMPI_8_PD, MASK_OUT_Y, qc_cmpi_8_pd);
qc8!(OP_CMPI_8_DI, MASK_OUT_Y, qc_cmpi_8_di);
qc8!(OP_CMPI_8_IX, MASK_OUT_Y, qc_cmpi_8_ix);
qc8!(OP_CMPI_8_AW, MASK_EXACT, qc_cmpi_8_aw);
qc8!(OP_CMPI_8_AL, MASK_EXACT, qc_cmpi_8_al);
qc!(OP_CMPI_16_DN, MASK_OUT_Y, qc_cmpi_16_dn);
qc!(OP_CMPI_16_AI, MASK_OUT_Y, qc_cmpi_16_ai);
qc!(OP_CMPI_16_PI, MASK_OUT_Y, qc_cmpi_16_pi);
qc!(OP_CMPI_16_PD, MASK_OUT_Y, qc_cmpi_16_pd);
qc!(OP_CMPI_16_DI, MASK_OUT_Y, qc_cmpi_16_di);
qc!(OP_CMPI_16_IX, MASK_OUT_Y, qc_cmpi_16_ix);
qc!(OP_CMPI_16_AW, MASK_EXACT, qc_cmpi_16_aw);
qc!(OP_CMPI_16_AL, MASK_EXACT, qc_cmpi_16_al);
qc!(OP_CMPI_32_DN, MASK_OUT_Y, qc_cmpi_32_dn);
qc!(OP_CMPI_32_AI, MASK_OUT_Y, qc_cmpi_32_ai);
qc!(OP_CMPI_32_PI, MASK_OUT_Y, qc_cmpi_32_pi);
qc!(OP_CMPI_32_PD, MASK_OUT_Y, qc_cmpi_32_pd);
qc!(OP_CMPI_32_DI, MASK_OUT_Y, qc_cmpi_32_di);
qc!(OP_CMPI_32_IX, MASK_OUT_Y, qc_cmpi_32_ix);
qc!(OP_CMPI_32_AW, MASK_EXACT, qc_cmpi_32_aw);
qc!(OP_CMPI_32_AL, MASK_EXACT, qc_cmpi_32_al);
qc8!(OP_CMPM_8, MASK_OUT_X_Y, qc_cmpm_8);
qc!(OP_CMPM_16, MASK_OUT_X_Y, qc_cmpm_16);
qc!(OP_CMPM_32, MASK_OUT_X_Y, qc_cmpm_32);
// Put qc for DBcc here
qc!(OP_DBT_16, MASK_OUT_Y, qc_dbt_16);
qc!(OP_DBF_16, MASK_OUT_Y, qc_dbf_16);
qc!(OP_DBHI_16, MASK_OUT_Y, qc_dbhi_16);
qc!(OP_DBLS_16, MASK_OUT_Y, qc_dbls_16);
qc!(OP_DBCC_16, MASK_OUT_Y, qc_dbcc_16);
qc!(OP_DBCS_16, MASK_OUT_Y, qc_dbcs_16);
qc!(OP_DBNE_16, MASK_OUT_Y, qc_dbne_16);
qc!(OP_DBEQ_16, MASK_OUT_Y, qc_dbeq_16);
qc!(OP_DBVC_16, MASK_OUT_Y, qc_dbvc_16);
qc!(OP_DBVS_16, MASK_OUT_Y, qc_dbvs_16);
qc!(OP_DBPL_16, MASK_OUT_Y, qc_dbpl_16);
qc!(OP_DBMI_16, MASK_OUT_Y, qc_dbmi_16);
qc!(OP_DBGE_16, MASK_OUT_Y, qc_dbge_16);
qc!(OP_DBLT_16, MASK_OUT_Y, qc_dblt_16);
qc!(OP_DBGT_16, MASK_OUT_Y, qc_dbgt_16);
qc!(OP_DBLE_16, MASK_OUT_Y, qc_dble_16);
// Put qc for DIVS here
qc!(OP_DIVS_16_AI, MASK_OUT_X_Y, qc_divs_16_ai);
qc!(OP_DIVS_16_AL, MASK_OUT_X, qc_divs_16_al);
qc!(OP_DIVS_16_AW, MASK_OUT_X, qc_divs_16_aw);
qc!(OP_DIVS_16_DN, MASK_OUT_X_Y, qc_divs_16_dn);
qc!(OP_DIVS_16_DI, MASK_OUT_X_Y, qc_divs_16_di);
qc!(OP_DIVS_16_IMM, MASK_OUT_X, qc_divs_16_imm);
qc!(OP_DIVS_16_IX, MASK_OUT_X_Y, qc_divs_16_ix);
qc!(OP_DIVS_16_PCDI, MASK_OUT_X, qc_divs_16_pcdi);
qc!(OP_DIVS_16_PCIX, MASK_OUT_X, qc_divs_16_pcix);
qc!(OP_DIVS_16_PD, MASK_OUT_X_Y, qc_divs_16_pd);
qc!(OP_DIVS_16_PI, MASK_OUT_X_Y, qc_divs_16_pi);
// Put qc for DIVU here
qc!(OP_DIVU_16_AI, MASK_OUT_X_Y, qc_divu_16_ai);
qc!(OP_DIVU_16_AL, MASK_OUT_X, qc_divu_16_al);
qc!(OP_DIVU_16_AW, MASK_OUT_X, qc_divu_16_aw);
qc!(OP_DIVU_16_DN, MASK_OUT_X_Y, qc_divu_16_dn);
qc!(OP_DIVU_16_DI, MASK_OUT_X_Y, qc_divu_16_di);
qc!(OP_DIVU_16_IMM, MASK_OUT_X, qc_divu_16_imm);
qc!(OP_DIVU_16_IX, MASK_OUT_X_Y, qc_divu_16_ix);
qc!(OP_DIVU_16_PCDI, MASK_OUT_X, qc_divu_16_pcdi);
qc!(OP_DIVU_16_PCIX, MASK_OUT_X, qc_divu_16_pcix);
qc!(OP_DIVU_16_PD, MASK_OUT_X_Y, qc_divu_16_pd);
qc!(OP_DIVU_16_PI, MASK_OUT_X_Y, qc_divu_16_pi);
// Put qc for EOR, EORI, EORI to CCR and EORI to SR here
qc8!(OP_EOR_8_DN, MASK_OUT_X_Y, qc_eor_8_dn);
qc8!(OP_EOR_8_AI, MASK_OUT_X_Y, qc_eor_8_ai);
qc8!(OP_EOR_8_PI, MASK_OUT_X_Y, qc_eor_8_pi);
qc8!(OP_EOR_8_PD, MASK_OUT_X_Y, qc_eor_8_pd);
qc8!(OP_EOR_8_DI, MASK_OUT_X_Y, qc_eor_8_di);
qc8!(OP_EOR_8_IX, MASK_OUT_X_Y, qc_eor_8_ix);
qc8!(OP_EOR_8_AW, MASK_OUT_X, qc_eor_8_aw);
qc8!(OP_EOR_8_AL, MASK_OUT_X, qc_eor_8_al);
qc!(OP_EOR_16_DN, MASK_OUT_X_Y, qc_eor_16_dn);
qc!(OP_EOR_16_AI, MASK_OUT_X_Y, qc_eor_16_ai);
qc!(OP_EOR_16_PI, MASK_OUT_X_Y, qc_eor_16_pi);
qc!(OP_EOR_16_PD, MASK_OUT_X_Y, qc_eor_16_pd);
qc!(OP_EOR_16_DI, MASK_OUT_X_Y, qc_eor_16_di);
qc!(OP_EOR_16_IX, MASK_OUT_X_Y, qc_eor_16_ix);
qc!(OP_EOR_16_AW, MASK_OUT_X, qc_eor_16_aw);
qc!(OP_EOR_16_AL, MASK_OUT_X, qc_eor_16_al);
qc!(OP_EOR_32_DN, MASK_OUT_X_Y, qc_eor_32_dn);
qc!(OP_EOR_32_AI, MASK_OUT_X_Y, qc_eor_32_ai);
qc!(OP_EOR_32_PI, MASK_OUT_X_Y, qc_eor_32_pi);
qc!(OP_EOR_32_PD, MASK_OUT_X_Y, qc_eor_32_pd);
qc!(OP_EOR_32_DI, MASK_OUT_X_Y, qc_eor_32_di);
qc!(OP_EOR_32_IX, MASK_OUT_X_Y, qc_eor_32_ix);
qc!(OP_EOR_32_AW, MASK_OUT_X, qc_eor_32_aw);
qc!(OP_EOR_32_AL, MASK_OUT_X, qc_eor_32_al);
qc8!(OP_EORI_8_DN, MASK_OUT_Y, qc_eori_8_dn);
qc8!(OP_EORI_8_AI, MASK_OUT_Y, qc_eori_8_ai);
qc8!(OP_EORI_8_PI, MASK_OUT_Y, qc_eori_8_pi);
qc8!(OP_EORI_8_PD, MASK_OUT_Y, qc_eori_8_pd);
qc8!(OP_EORI_8_DI, MASK_OUT_Y, qc_eori_8_di);
qc8!(OP_EORI_8_IX, MASK_OUT_Y, qc_eori_8_ix);
qc8!(OP_EORI_8_AW, MASK_EXACT, qc_eori_8_aw);
qc8!(OP_EORI_8_AL, MASK_EXACT, qc_eori_8_al);
qc!(OP_EORI_16_DN, MASK_OUT_Y, qc_eori_16_dn);
qc!(OP_EORI_16_AI, MASK_OUT_Y, qc_eori_16_ai);
qc!(OP_EORI_16_PI, MASK_OUT_Y, qc_eori_16_pi);
qc!(OP_EORI_16_PD, MASK_OUT_Y, qc_eori_16_pd);
qc!(OP_EORI_16_DI, MASK_OUT_Y, qc_eori_16_di);
qc!(OP_EORI_16_IX, MASK_OUT_Y, qc_eori_16_ix);
qc!(OP_EORI_16_AW, MASK_EXACT, qc_eori_16_aw);
qc!(OP_EORI_16_AL, MASK_EXACT, qc_eori_16_al);
qc!(OP_EORI_32_DN, MASK_OUT_Y, qc_eori_32_dn);
qc!(OP_EORI_32_AI, MASK_OUT_Y, qc_eori_32_ai);
qc!(OP_EORI_32_PI, MASK_OUT_Y, qc_eori_32_pi);
qc!(OP_EORI_32_PD, MASK_OUT_Y, qc_eori_32_pd);
qc!(OP_EORI_32_DI, MASK_OUT_Y, qc_eori_32_di);
qc!(OP_EORI_32_IX, MASK_OUT_Y, qc_eori_32_ix);
qc!(OP_EORI_32_AW, MASK_EXACT, qc_eori_32_aw);
qc!(OP_EORI_32_AL, MASK_EXACT, qc_eori_32_al);
qc!(MASK_EXACT, OP_EORI_16_TOC, qc_eori_16_toc);
qc!(MASK_EXACT, OP_EORI_16_TOS, qc_eori_16_tos);
// Put qc for EXG here
qc!(OP_EXG_32_DD, MASK_OUT_X_Y, qc_exg_32_dd);
qc!(OP_EXG_32_AA, MASK_OUT_X_Y, qc_exg_32_aa);
qc!(OP_EXG_32_DA, MASK_OUT_X_Y, qc_exg_32_da);
// Put qc for EXT here
qc!(OP_EXT_BW, MASK_OUT_Y, qc_ext_bw);
qc!(OP_EXT_WL, MASK_OUT_Y, qc_ext_wl);
// Put qc for ILLEGAL here
qc!(OP_ILLEGAL, MASK_EXACT, qc_illegal);
// Put qc for JMP here
qc!(OP_JMP_32_AI, MASK_OUT_Y, qc_jmp_32_ai);
qc!(OP_JMP_32_AL, MASK_EXACT, qc_jmp_32_al);
qc!(OP_JMP_32_AW, MASK_EXACT, qc_jmp_32_aw);
qc!(OP_JMP_32_DI, MASK_OUT_Y, qc_jmp_32_di);
qc!(OP_JMP_32_IX, MASK_OUT_Y, qc_jmp_32_ix);
qc!(OP_JMP_32_PCDI, MASK_EXACT, qc_jmp_32_pcdi);
qc!(OP_JMP_32_PCIX, MASK_EXACT, qc_jmp_32_pcix);
// Put qc for JSR here
qc!(OP_JSR_32_AI, MASK_OUT_Y, qc_jsr_32_ai);
qc!(OP_JSR_32_AL, MASK_EXACT, qc_jsr_32_al);
qc!(OP_JSR_32_AW, MASK_EXACT, qc_jsr_32_aw);
qc!(OP_JSR_32_DI, MASK_OUT_Y, qc_jsr_32_di);
qc!(OP_JSR_32_IX, MASK_OUT_Y, qc_jsr_32_ix);
qc!(OP_JSR_32_PCDI, MASK_EXACT, qc_jsr_32_pcdi);
qc!(OP_JSR_32_PCIX, MASK_EXACT, qc_jsr_32_pcix);
// Put qc for LEA here
qc!(OP_LEA_32_AI, MASK_OUT_Y, qc_lea_32_ai);
qc!(OP_LEA_32_AL, MASK_EXACT, qc_lea_32_al);
qc!(OP_LEA_32_AW, MASK_EXACT, qc_lea_32_aw);
qc!(OP_LEA_32_DI, MASK_OUT_Y, qc_lea_32_di);
qc!(OP_LEA_32_IX, MASK_OUT_Y, qc_lea_32_ix);
qc!(OP_LEA_32_PCDI, MASK_EXACT, qc_lea_32_pcdi);
qc!(OP_LEA_32_PCIX, MASK_EXACT, qc_lea_32_pcix);
// Put qc for LINK here
qc!(OP_LINK_16, MASK_OUT_Y, qc_link_16);
// Put qc for LSL, LSR here
qc8!(OP_LSR_8_S, MASK_OUT_X_Y, qc_lsr_8_s);
qc!(OP_LSR_16_S, MASK_OUT_X_Y, qc_lsr_16_s);
qc!(OP_LSR_32_S, MASK_OUT_X_Y, qc_lsr_32_s);
qc8!(OP_LSR_8_R, MASK_OUT_X_Y, qc_lsr_8_r);
qc!(OP_LSR_16_R, MASK_OUT_X_Y, qc_lsr_16_r);
qc!(OP_LSR_32_R, MASK_OUT_X_Y, qc_lsr_32_r);
qc8!(OP_LSL_8_S, MASK_OUT_X_Y, qc_lsl_8_s);
qc!(OP_LSL_16_S, MASK_OUT_X_Y, qc_lsl_16_s);
qc!(OP_LSL_32_S, MASK_OUT_X_Y, qc_lsl_32_s);
qc8!(OP_LSL_8_R, MASK_OUT_X_Y, qc_lsl_8_r);
qc!(OP_LSL_16_R, MASK_OUT_X_Y, qc_lsl_16_r);
qc!(OP_LSL_32_R, MASK_OUT_X_Y, qc_lsl_32_r);
qc!(OP_LSL_16_AI, MASK_OUT_Y, qc_lsl_16_ai);
qc!(OP_LSL_16_PI, MASK_OUT_Y, qc_lsl_16_pi);
qc!(OP_LSL_16_PD, MASK_OUT_Y, qc_lsl_16_pd);
qc!(OP_LSL_16_DI, MASK_OUT_Y, qc_lsl_16_di);
qc!(OP_LSL_16_IX, MASK_OUT_Y, qc_lsl_16_ix);
qc!(OP_LSL_16_AW, MASK_EXACT, qc_lsl_16_aw);
qc!(OP_LSL_16_AL, MASK_EXACT, qc_lsl_16_al);
qc!(OP_LSR_16_AI, MASK_OUT_Y, qc_lsr_16_ai);
qc!(OP_LSR_16_PI, MASK_OUT_Y, qc_lsr_16_pi);
qc!(OP_LSR_16_PD, MASK_OUT_Y, qc_lsr_16_pd);
qc!(OP_LSR_16_DI, MASK_OUT_Y, qc_lsr_16_di);
qc!(OP_LSR_16_IX, MASK_OUT_Y, qc_lsr_16_ix);
qc!(OP_LSR_16_AW, MASK_EXACT, qc_lsr_16_aw);
qc!(OP_LSR_16_AL, MASK_EXACT, qc_lsr_16_al);
// Put qc for MOVE here
// Put qc for MOVEA here
// Put qc for MOVE to CCR here
// Put qc for MOVE from SR here
// Put qc for MOVE to SR here
// Put qc for MOVE USP here
// Put qc for MOVEM here
// Put qc for MOVEP here
// Put qc for MOVEQ here
// Put qc for MULS here
// Put qc for MULU here
// Put qc for NBCD here
// Put qc for NEG here
// Put qc for NEGX here
// Put qc for NOP here
// Put qc for NOT here
// Put qc for OR here
// Put qc for ORI here
// Put qc for ORI to CCR here
// Put qc for ORI to SR here
// Put qc for PEA here
// Put qc for RESET here
// Put qc for ROL, ROR here
qc8!(OP_ROR_8_S, MASK_OUT_X_Y, qc_ror_8_s);
qc!(OP_ROR_16_S, MASK_OUT_X_Y, qc_ror_16_s);
qc!(OP_ROR_32_S, MASK_OUT_X_Y, qc_ror_32_s);
qc8!(OP_ROR_8_R, MASK_OUT_X_Y, qc_ror_8_r);
qc!(OP_ROR_16_R, MASK_OUT_X_Y, qc_ror_16_r);
qc!(OP_ROR_32_R, MASK_OUT_X_Y, qc_ror_32_r);
qc8!(OP_ROL_8_S, MASK_OUT_X_Y, qc_rol_8_s);
qc!(OP_ROL_16_S, MASK_OUT_X_Y, qc_rol_16_s);
qc!(OP_ROL_32_S, MASK_OUT_X_Y, qc_rol_32_s);
qc8!(OP_ROL_8_R, MASK_OUT_X_Y, qc_rol_8_r);
qc!(OP_ROL_16_R, MASK_OUT_X_Y, qc_rol_16_r);
qc!(OP_ROL_32_R, MASK_OUT_X_Y, qc_rol_32_r);
qc!(OP_ROL_16_AI, MASK_OUT_Y, qc_rol_16_ai);
qc!(OP_ROL_16_PI, MASK_OUT_Y, qc_rol_16_pi);
qc!(OP_ROL_16_PD, MASK_OUT_Y, qc_rol_16_pd);
qc!(OP_ROL_16_DI, MASK_OUT_Y, qc_rol_16_di);
qc!(OP_ROL_16_IX, MASK_OUT_Y, qc_rol_16_ix);
qc!(OP_ROL_16_AW, MASK_EXACT, qc_rol_16_aw);
qc!(OP_ROL_16_AL, MASK_EXACT, qc_rol_16_al);
qc!(OP_ROR_16_AI, MASK_OUT_Y, qc_ror_16_ai);
qc!(OP_ROR_16_PI, MASK_OUT_Y, qc_ror_16_pi);
qc!(OP_ROR_16_PD, MASK_OUT_Y, qc_ror_16_pd);
qc!(OP_ROR_16_DI, MASK_OUT_Y, qc_ror_16_di);
qc!(OP_ROR_16_IX, MASK_OUT_Y, qc_ror_16_ix);
qc!(OP_ROR_16_AW, MASK_EXACT, qc_ror_16_aw);
qc!(OP_ROR_16_AL, MASK_EXACT, qc_ror_16_al);
// Put qc for ROXL, ROXR here
qc8!(OP_ROXR_8_S, MASK_OUT_X_Y, qc_roxr_8_s);
qc!(OP_ROXR_16_S, MASK_OUT_X_Y, qc_roxr_16_s);
qc!(OP_ROXR_32_S, MASK_OUT_X_Y, qc_roxr_32_s);
qc8!(OP_ROXR_8_R, MASK_OUT_X_Y, qc_roxr_8_r);
qc!(OP_ROXR_16_R, MASK_OUT_X_Y, qc_roxr_16_r);
qc!(OP_ROXR_32_R, MASK_OUT_X_Y, qc_roxr_32_r);
qc8!(OP_ROXL_8_S, MASK_OUT_X_Y, qc_roxl_8_s);
qc!(OP_ROXL_16_S, MASK_OUT_X_Y, qc_roxl_16_s);
qc!(OP_ROXL_32_S, MASK_OUT_X_Y, qc_roxl_32_s);
qc8!(OP_ROXL_8_R, MASK_OUT_X_Y, qc_roxl_8_r);
qc!(OP_ROXL_16_R, MASK_OUT_X_Y, qc_roxl_16_r);
qc!(OP_ROXL_32_R, MASK_OUT_X_Y, qc_roxl_32_r);
qc!(OP_ROXL_16_AI, MASK_OUT_Y, qc_roxl_16_ai);
qc!(OP_ROXL_16_PI, MASK_OUT_Y, qc_roxl_16_pi);
qc!(OP_ROXL_16_PD, MASK_OUT_Y, qc_roxl_16_pd);
qc!(OP_ROXL_16_DI, MASK_OUT_Y, qc_roxl_16_di);
qc!(OP_ROXL_16_IX, MASK_OUT_Y, qc_roxl_16_ix);
qc!(OP_ROXL_16_AW, MASK_EXACT, qc_roxl_16_aw);
qc!(OP_ROXL_16_AL, MASK_EXACT, qc_roxl_16_al);
qc!(OP_ROXR_16_AI, MASK_OUT_Y, qc_roxr_16_ai);
qc!(OP_ROXR_16_PI, MASK_OUT_Y, qc_roxr_16_pi);
qc!(OP_ROXR_16_PD, MASK_OUT_Y, qc_roxr_16_pd);
qc!(OP_ROXR_16_DI, MASK_OUT_Y, qc_roxr_16_di);
qc!(OP_ROXR_16_IX, MASK_OUT_Y, qc_roxr_16_ix);
qc!(OP_ROXR_16_AW, MASK_EXACT, qc_roxr_16_aw);
qc!(OP_ROXR_16_AL, MASK_EXACT, qc_roxr_16_al);
// Put qc for RTE here
// Put qc for RTR here
// Put qc for RTS here
qc8!(OP_SBCD_8_RR, qc_sbcd_rr);
qc8!(OP_SBCD_8_MM, qc_sbcd_mm);
qc!(OP_SCC_8_AI, MASK_OUT_Y, qc_scc_8_ai);
qc!(OP_SCC_8_AL, MASK_EXACT, qc_scc_8_al);
qc!(OP_SCC_8_AW, MASK_EXACT, qc_scc_8_aw);
qc!(OP_SCC_8_DN, MASK_OUT_Y, qc_scc_8_dn);
qc!(OP_SCC_8_DI, MASK_OUT_Y, qc_scc_8_di);
qc!(OP_SCC_8_IX, MASK_OUT_Y, qc_scc_8_ix);
qc!(OP_SCC_8_PD, MASK_OUT_Y, qc_scc_8_pd);
qc!(OP_SCC_8_PI, MASK_OUT_Y, qc_scc_8_pi);
qc!(OP_SCS_8_AI, MASK_OUT_Y, qc_scs_8_ai);
qc!(OP_SCS_8_AL, MASK_EXACT, qc_scs_8_al);
qc!(OP_SCS_8_AW, MASK_EXACT, qc_scs_8_aw);
qc!(OP_SCS_8_DN, MASK_OUT_Y, qc_scs_8_dn);
qc!(OP_SCS_8_DI, MASK_OUT_Y, qc_scs_8_di);
qc!(OP_SCS_8_IX, MASK_OUT_Y, qc_scs_8_ix);
qc!(OP_SCS_8_PD, MASK_OUT_Y, qc_scs_8_pd);
qc!(OP_SCS_8_PI, MASK_OUT_Y, qc_scs_8_pi);
qc!(OP_SEQ_8_AI, MASK_OUT_Y, qc_seq_8_ai);
qc!(OP_SEQ_8_AL, MASK_EXACT, qc_seq_8_al);
qc!(OP_SEQ_8_AW, MASK_EXACT, qc_seq_8_aw);
qc!(OP_SEQ_8_DN, MASK_OUT_Y, qc_seq_8_dn);
qc!(OP_SEQ_8_DI, MASK_OUT_Y, qc_seq_8_di);
qc!(OP_SEQ_8_IX, MASK_OUT_Y, qc_seq_8_ix);
qc!(OP_SEQ_8_PD, MASK_OUT_Y, qc_seq_8_pd);
qc!(OP_SEQ_8_PI, MASK_OUT_Y, qc_seq_8_pi);
qc!(OP_SF_8_AI, MASK_OUT_Y, qc_sf_8_ai);
qc!(OP_SF_8_AL, MASK_EXACT, qc_sf_8_al);
qc!(OP_SF_8_AW, MASK_EXACT, qc_sf_8_aw);
qc!(OP_SF_8_DN, MASK_OUT_Y, qc_sf_8_dn);
qc!(OP_SF_8_DI, MASK_OUT_Y, qc_sf_8_di);
qc!(OP_SF_8_IX, MASK_OUT_Y, qc_sf_8_ix);
qc!(OP_SF_8_PD, MASK_OUT_Y, qc_sf_8_pd);
qc!(OP_SF_8_PI, MASK_OUT_Y, qc_sf_8_pi);
qc!(OP_SGE_8_AI, MASK_OUT_Y, qc_sge_8_ai);
qc!(OP_SGE_8_AL, MASK_EXACT, qc_sge_8_al);
qc!(OP_SGE_8_AW, MASK_EXACT, qc_sge_8_aw);
qc!(OP_SGE_8_DN, MASK_OUT_Y, qc_sge_8_dn);
qc!(OP_SGE_8_DI, MASK_OUT_Y, qc_sge_8_di);
qc!(OP_SGE_8_IX, MASK_OUT_Y, qc_sge_8_ix);
qc!(OP_SGE_8_PD, MASK_OUT_Y, qc_sge_8_pd);
qc!(OP_SGE_8_PI, MASK_OUT_Y, qc_sge_8_pi);
qc!(OP_SGT_8_AI, MASK_OUT_Y, qc_sgt_8_ai);
qc!(OP_SGT_8_AL, MASK_EXACT, qc_sgt_8_al);
qc!(OP_SGT_8_AW, MASK_EXACT, qc_sgt_8_aw);
qc!(OP_SGT_8_DN, MASK_OUT_Y, qc_sgt_8_dn);
qc!(OP_SGT_8_DI, MASK_OUT_Y, qc_sgt_8_di);
qc!(OP_SGT_8_IX, MASK_OUT_Y, qc_sgt_8_ix);
qc!(OP_SGT_8_PD, MASK_OUT_Y, qc_sgt_8_pd);
qc!(OP_SGT_8_PI, MASK_OUT_Y, qc_sgt_8_pi);
qc!(OP_SHI_8_AI, MASK_OUT_Y, qc_shi_8_ai);
qc!(OP_SHI_8_AL, MASK_EXACT, qc_shi_8_al);
qc!(OP_SHI_8_AW, MASK_EXACT, qc_shi_8_aw);
qc!(OP_SHI_8_DN, MASK_OUT_Y, qc_shi_8_dn);
qc!(OP_SHI_8_DI, MASK_OUT_Y, qc_shi_8_di);
qc!(OP_SHI_8_IX, MASK_OUT_Y, qc_shi_8_ix);
qc!(OP_SHI_8_PD, MASK_OUT_Y, qc_shi_8_pd);
qc!(OP_SHI_8_PI, MASK_OUT_Y, qc_shi_8_pi);
qc!(OP_SLE_8_AI, MASK_OUT_Y, qc_sle_8_ai);
qc!(OP_SLE_8_AL, MASK_EXACT, qc_sle_8_al);
qc!(OP_SLE_8_AW, MASK_EXACT, qc_sle_8_aw);
qc!(OP_SLE_8_DN, MASK_OUT_Y, qc_sle_8_dn);
qc!(OP_SLE_8_DI, MASK_OUT_Y, qc_sle_8_di);
qc!(OP_SLE_8_IX, MASK_OUT_Y, qc_sle_8_ix);
qc!(OP_SLE_8_PD, MASK_OUT_Y, qc_sle_8_pd);
qc!(OP_SLE_8_PI, MASK_OUT_Y, qc_sle_8_pi);
qc!(OP_SLS_8_AI, MASK_OUT_Y, qc_sls_8_ai);
qc!(OP_SLS_8_AL, MASK_EXACT, qc_sls_8_al);
qc!(OP_SLS_8_AW, MASK_EXACT, qc_sls_8_aw);
qc!(OP_SLS_8_DN, MASK_OUT_Y, qc_sls_8_dn);
qc!(OP_SLS_8_DI, MASK_OUT_Y, qc_sls_8_di);
qc!(OP_SLS_8_IX, MASK_OUT_Y, qc_sls_8_ix);
qc!(OP_SLS_8_PD, MASK_OUT_Y, qc_sls_8_pd);
qc!(OP_SLS_8_PI, MASK_OUT_Y, qc_sls_8_pi);
qc!(OP_SLT_8_AI, MASK_OUT_Y, qc_slt_8_ai);
qc!(OP_SLT_8_AL, MASK_EXACT, qc_slt_8_al);
qc!(OP_SLT_8_AW, MASK_EXACT, qc_slt_8_aw);
qc!(OP_SLT_8_DN, MASK_OUT_Y, qc_slt_8_dn);
qc!(OP_SLT_8_DI, MASK_OUT_Y, qc_slt_8_di);
qc!(OP_SLT_8_IX, MASK_OUT_Y, qc_slt_8_ix);
qc!(OP_SLT_8_PD, MASK_OUT_Y, qc_slt_8_pd);
qc!(OP_SLT_8_PI, MASK_OUT_Y, qc_slt_8_pi);
qc!(OP_SMI_8_AI, MASK_OUT_Y, qc_smi_8_ai);
qc!(OP_SMI_8_AL, MASK_EXACT, qc_smi_8_al);
qc!(OP_SMI_8_AW, MASK_EXACT, qc_smi_8_aw);
qc!(OP_SMI_8_DN, MASK_OUT_Y, qc_smi_8_dn);
qc!(OP_SMI_8_DI, MASK_OUT_Y, qc_smi_8_di);
qc!(OP_SMI_8_IX, MASK_OUT_Y, qc_smi_8_ix);
qc!(OP_SMI_8_PD, MASK_OUT_Y, qc_smi_8_pd);
qc!(OP_SMI_8_PI, MASK_OUT_Y, qc_smi_8_pi);
qc!(OP_SNE_8_AI, MASK_OUT_Y, qc_sne_8_ai);
qc!(OP_SNE_8_AL, MASK_EXACT, qc_sne_8_al);
qc!(OP_SNE_8_AW, MASK_EXACT, qc_sne_8_aw);
qc!(OP_SNE_8_DN, MASK_OUT_Y, qc_sne_8_dn);
qc!(OP_SNE_8_DI, MASK_OUT_Y, qc_sne_8_di);
qc!(OP_SNE_8_IX, MASK_OUT_Y, qc_sne_8_ix);
qc!(OP_SNE_8_PD, MASK_OUT_Y, qc_sne_8_pd);
qc!(OP_SNE_8_PI, MASK_OUT_Y, qc_sne_8_pi);
qc!(OP_SPL_8_AI, MASK_OUT_Y, qc_spl_8_ai);
qc!(OP_SPL_8_AL, MASK_EXACT, qc_spl_8_al);
qc!(OP_SPL_8_AW, MASK_EXACT, qc_spl_8_aw);
qc!(OP_SPL_8_DN, MASK_OUT_Y, qc_spl_8_dn);
qc!(OP_SPL_8_DI, MASK_OUT_Y, qc_spl_8_di);
qc!(OP_SPL_8_IX, MASK_OUT_Y, qc_spl_8_ix);
qc!(OP_SPL_8_PD, MASK_OUT_Y, qc_spl_8_pd);
qc!(OP_SPL_8_PI, MASK_OUT_Y, qc_spl_8_pi);
qc!(OP_ST_8_AI, MASK_OUT_Y, qc_st_8_ai);
qc!(OP_ST_8_AL, MASK_EXACT, qc_st_8_al);
qc!(OP_ST_8_AW, MASK_EXACT, qc_st_8_aw);
qc!(OP_ST_8_DN, MASK_OUT_Y, qc_st_8_dn);
qc!(OP_ST_8_DI, MASK_OUT_Y, qc_st_8_di);
qc!(OP_ST_8_IX, MASK_OUT_Y, qc_st_8_ix);
qc!(OP_ST_8_PD, MASK_OUT_Y, qc_st_8_pd);
qc!(OP_ST_8_PI, MASK_OUT_Y, qc_st_8_pi);
qc!(OP_SVC_8_AI, MASK_OUT_Y, qc_svc_8_ai);
qc!(OP_SVC_8_AL, MASK_EXACT, qc_svc_8_al);
qc!(OP_SVC_8_AW, MASK_EXACT, qc_svc_8_aw);
qc!(OP_SVC_8_DN, MASK_OUT_Y, qc_svc_8_dn);
qc!(OP_SVC_8_DI, MASK_OUT_Y, qc_svc_8_di);
qc!(OP_SVC_8_IX, MASK_OUT_Y, qc_svc_8_ix);
qc!(OP_SVC_8_PD, MASK_OUT_Y, qc_svc_8_pd);
qc!(OP_SVC_8_PI, MASK_OUT_Y, qc_svc_8_pi);
qc!(OP_SVS_8_AI, MASK_OUT_Y, qc_svs_8_ai);
qc!(OP_SVS_8_AL, MASK_EXACT, qc_svs_8_al);
qc!(OP_SVS_8_AW, MASK_EXACT, qc_svs_8_aw);
qc!(OP_SVS_8_DN, MASK_OUT_Y, qc_svs_8_dn);
qc!(OP_SVS_8_DI, MASK_OUT_Y, qc_svs_8_di);
qc!(OP_SVS_8_IX, MASK_OUT_Y, qc_svs_8_ix);
qc!(OP_SVS_8_PD, MASK_OUT_Y, qc_svs_8_pd);
qc!(OP_SVS_8_PI, MASK_OUT_Y, qc_svs_8_pi);
/*
qc!(MASK_OUT_Y, OP_SHI_8_AI, qc_shi_8_ai);
qc!(MASK_EXACT, OP_SHI_8_AL, qc_shi_8_al);
qc!(MASK_EXACT, OP_SHI_8_AW, qc_shi_8_aw);
qc!(MASK_OUT_Y, OP_SHI_8_DN, qc_shi_8_dn);
qc!(MASK_OUT_Y, OP_SHI_8_DI, qc_shi_8_di);
qc!(MASK_OUT_Y, OP_SHI_8_IX, qc_shi_8_ix);
qc!(MASK_OUT_Y, OP_SHI_8_PD, qc_shi_8_pd);
qc!(MASK_OUT_Y, OP_SHI_8_PI, qc_shi_8_pi);
*/
// Put qc for STOP here
// Put qc for SUB here
qc8!(OP_SUB_8_ER_DN, qc_sub_8_er_dn);
qc8!(OP_SUB_8_ER_PI, qc_sub_8_er_pi);
qc8!(OP_SUB_8_ER_PD, qc_sub_8_er_pd);
qc8!(OP_SUB_8_ER_AI, qc_sub_8_er_ai);
qc8!(OP_SUB_8_ER_DI, qc_sub_8_er_di);
qc8!(OP_SUB_8_ER_IX, qc_sub_8_er_ix);
qc8!(OP_SUB_8_ER_AW, MASK_OUT_X, qc_sub_8_er_aw);
qc8!(OP_SUB_8_ER_AL, MASK_OUT_X, qc_sub_8_er_al);
qc8!(OP_SUB_8_ER_PCDI, MASK_OUT_X, qc_sub_8_er_pcdi);
qc8!(OP_SUB_8_ER_PCIX, MASK_OUT_X, qc_sub_8_er_pcix);
qc8!(OP_SUB_8_ER_IMM, MASK_OUT_X, qc_sub_8_er_imm);
qc8!(OP_SUB_8_RE_PI, qc_sub_8_re_pi);
qc8!(OP_SUB_8_RE_PD, qc_sub_8_re_pd);
qc8!(OP_SUB_8_RE_AI, qc_sub_8_re_ai);
qc8!(OP_SUB_8_RE_DI, qc_sub_8_re_di);
qc8!(OP_SUB_8_RE_IX, qc_sub_8_re_ix);
qc8!(OP_SUB_8_RE_AW, MASK_OUT_X, qc_sub_8_re_aw);
qc8!(OP_SUB_8_RE_AL, MASK_OUT_X, qc_sub_8_re_al);
qc!(OP_SUB_16_ER_DN, qc_sub_16_er_dn);
qc!(OP_SUB_16_ER_AN, qc_sub_16_er_an);
qc!(OP_SUB_16_ER_PI, qc_sub_16_er_pi);
qc!(OP_SUB_16_ER_PD, qc_sub_16_er_pd);
qc!(OP_SUB_16_ER_AI, qc_sub_16_er_ai);
qc!(OP_SUB_16_ER_DI, qc_sub_16_er_di);
qc!(OP_SUB_16_ER_IX, qc_sub_16_er_ix);
qc!(OP_SUB_16_ER_AW, MASK_OUT_X, qc_sub_16_er_aw);
qc!(OP_SUB_16_ER_AL, MASK_OUT_X, qc_sub_16_er_al);
qc!(OP_SUB_16_ER_PCDI, MASK_OUT_X, qc_sub_16_er_pcdi);
qc!(OP_SUB_16_ER_PCIX, MASK_OUT_X, qc_sub_16_er_pcix);
qc!(OP_SUB_16_ER_IMM, MASK_OUT_X, qc_sub_16_er_imm);
qc!(OP_SUB_16_RE_PI, qc_sub_16_re_pi);
qc!(OP_SUB_16_RE_PD, qc_sub_16_re_pd);
qc!(OP_SUB_16_RE_AI, qc_sub_16_re_ai);
qc!(OP_SUB_16_RE_DI, qc_sub_16_re_di);
qc!(OP_SUB_16_RE_IX, qc_sub_16_re_ix);
qc!(OP_SUB_16_RE_AW, MASK_OUT_X, qc_sub_16_re_aw);
qc!(OP_SUB_16_RE_AL, MASK_OUT_X, qc_sub_16_re_al);
qc!(OP_SUB_32_ER_DN, qc_sub_32_er_dn);
qc!(OP_SUB_32_ER_AN, qc_sub_32_er_an);
qc!(OP_SUB_32_ER_PI, qc_sub_32_er_pi);
qc!(OP_SUB_32_ER_PD, qc_sub_32_er_pd);
qc!(OP_SUB_32_ER_AI, qc_sub_32_er_ai);
qc!(OP_SUB_32_ER_DI, qc_sub_32_er_di);
qc!(OP_SUB_32_ER_IX, qc_sub_32_er_ix);
qc!(OP_SUB_32_ER_AW, MASK_OUT_X, qc_sub_32_er_aw);
qc!(OP_SUB_32_ER_AL, MASK_OUT_X, qc_sub_32_er_al);
qc!(OP_SUB_32_ER_PCDI, MASK_OUT_X, qc_sub_32_er_pcdi);
qc!(OP_SUB_32_ER_PCIX, MASK_OUT_X, qc_sub_32_er_pcix);
qc!(OP_SUB_32_ER_IMM, MASK_OUT_X, qc_sub_32_er_imm);
qc!(OP_SUB_32_RE_PI, qc_sub_32_re_pi);
qc!(OP_SUB_32_RE_PD, qc_sub_32_re_pd);
qc!(OP_SUB_32_RE_AI, qc_sub_32_re_ai);
qc!(OP_SUB_32_RE_DI, qc_sub_32_re_di);
qc!(OP_SUB_32_RE_IX, qc_sub_32_re_ix);
qc!(OP_SUB_32_RE_AW, MASK_OUT_X, qc_sub_32_re_aw);
qc!(OP_SUB_32_RE_AL, MASK_OUT_X, qc_sub_32_re_al);
qc!(OP_SUBA_16_DN, qc_suba_16_dn);
qc!(OP_SUBA_16_AN, qc_suba_16_an);
qc!(OP_SUBA_16_PI, qc_suba_16_pi);
qc!(OP_SUBA_16_PD, qc_suba_16_pd);
qc!(OP_SUBA_16_AI, qc_suba_16_ai);
qc!(OP_SUBA_16_DI, qc_suba_16_di);
qc!(OP_SUBA_16_IX, qc_suba_16_ix);
qc!(OP_SUBA_16_AW, MASK_OUT_X, qc_suba_16_aw);
qc!(OP_SUBA_16_AL, MASK_OUT_X, qc_suba_16_al);
qc!(OP_SUBA_16_PCDI, MASK_OUT_X, qc_suba_16_pcdi);
qc!(OP_SUBA_16_PCIX, MASK_OUT_X, qc_suba_16_pcix);
qc!(OP_SUBA_16_IMM, MASK_OUT_X, qc_suba_16_imm);
qc!(OP_SUBA_32_DN, qc_suba_32_dn);
qc!(OP_SUBA_32_AN, qc_suba_32_an);
qc!(OP_SUBA_32_PI, qc_suba_32_pi);
qc!(OP_SUBA_32_PD, qc_suba_32_pd);
qc!(OP_SUBA_32_AI, qc_suba_32_ai);
qc!(OP_SUBA_32_DI, qc_suba_32_di);
qc!(OP_SUBA_32_IX, qc_suba_32_ix);
qc!(OP_SUBA_32_AW, MASK_OUT_X, qc_suba_32_aw);
qc!(OP_SUBA_32_AL, MASK_OUT_X, qc_suba_32_al);
qc!(OP_SUBA_32_PCDI, MASK_OUT_X, qc_suba_32_pcdi);
qc!(OP_SUBA_32_PCIX, MASK_OUT_X, qc_suba_32_pcix);
qc!(OP_SUBA_32_IMM, MASK_OUT_X, qc_suba_32_imm);
qc8!(OP_SUBI_8_DN, MASK_OUT_Y, qc_subi_8_dn);
qc8!(OP_SUBI_8_PI, MASK_OUT_Y, qc_subi_8_pi);
qc8!(OP_SUBI_8_PD, MASK_OUT_Y, qc_subi_8_pd);
qc8!(OP_SUBI_8_AI, MASK_OUT_Y, qc_subi_8_ai);
qc8!(OP_SUBI_8_DI, MASK_OUT_Y, qc_subi_8_di);
qc8!(OP_SUBI_8_IX, MASK_OUT_Y, qc_subi_8_ix);
qc8!(OP_SUBI_8_AW, MASK_EXACT, qc_subi_8_aw);
qc8!(OP_SUBI_8_AL, MASK_EXACT, qc_subi_8_al);
qc!(OP_SUBI_16_DN, MASK_OUT_Y, qc_subi_16_dn);
qc!(OP_SUBI_16_PI, MASK_OUT_Y, qc_subi_16_pi);
qc!(OP_SUBI_16_PD, MASK_OUT_Y, qc_subi_16_pd);
qc!(OP_SUBI_16_AI, MASK_OUT_Y, qc_subi_16_ai);
qc!(OP_SUBI_16_DI, MASK_OUT_Y, qc_subi_16_di);
qc!(OP_SUBI_16_IX, MASK_OUT_Y, qc_subi_16_ix);
qc!(OP_SUBI_16_AW, MASK_EXACT, qc_subi_16_aw);
qc!(OP_SUBI_16_AL, MASK_EXACT, qc_subi_16_al);
qc!(OP_SUBI_32_DN, MASK_OUT_Y, qc_subi_32_dn);
qc!(OP_SUBI_32_PI, MASK_OUT_Y, qc_subi_32_pi);
qc!(OP_SUBI_32_PD, MASK_OUT_Y, qc_subi_32_pd);
qc!(OP_SUBI_32_AI, MASK_OUT_Y, qc_subi_32_ai);
qc!(OP_SUBI_32_DI, MASK_OUT_Y, qc_subi_32_di);
qc!(OP_SUBI_32_IX, MASK_OUT_Y, qc_subi_32_ix);
qc!(OP_SUBI_32_AW, MASK_EXACT, qc_subi_32_aw);
qc!(OP_SUBI_32_AL, MASK_EXACT, qc_subi_32_al);
qc8!(OP_SUBQ_8_DN, qc_subq_8_dn);
qc8!(OP_SUBQ_8_PI, qc_subq_8_pi);
qc8!(OP_SUBQ_8_PD, qc_subq_8_pd);
qc8!(OP_SUBQ_8_AI, qc_subq_8_ai);
qc8!(OP_SUBQ_8_DI, qc_subq_8_di);
qc8!(OP_SUBQ_8_IX, qc_subq_8_ix);
qc8!(OP_SUBQ_8_AW, MASK_OUT_X, qc_subq_8_aw);
qc8!(OP_SUBQ_8_AL, MASK_OUT_X, qc_subq_8_al);
qc!(OP_SUBQ_16_DN, qc_subq_16_dn);
qc!(OP_SUBQ_16_AN, qc_subq_16_an);
qc!(OP_SUBQ_16_PI, qc_subq_16_pi);
qc!(OP_SUBQ_16_PD, qc_subq_16_pd);
qc!(OP_SUBQ_16_AI, qc_subq_16_ai);
qc!(OP_SUBQ_16_DI, qc_subq_16_di);
qc!(OP_SUBQ_16_IX, qc_subq_16_ix);
qc!(OP_SUBQ_16_AW, MASK_OUT_X, qc_subq_16_aw);
qc!(OP_SUBQ_16_AL, MASK_OUT_X, qc_subq_16_al);
qc!(OP_SUBQ_32_DN, qc_subq_32_dn);
qc!(OP_SUBQ_32_AN, qc_subq_32_an);
qc!(OP_SUBQ_32_PI, qc_subq_32_pi);
qc!(OP_SUBQ_32_PD, qc_subq_32_pd);
qc!(OP_SUBQ_32_AI, qc_subq_32_ai);
qc!(OP_SUBQ_32_DI, qc_subq_32_di);
qc!(OP_SUBQ_32_IX, qc_subq_32_ix);
qc!(OP_SUBQ_32_AW, MASK_OUT_X, qc_subq_32_aw);
qc!(OP_SUBQ_32_AL, MASK_OUT_X, qc_subq_32_al);
qc8!(OP_SUBX_8_RR, qc_subx_8_rr);
qc8!(OP_SUBX_8_MM, qc_subx_8_mm);
qc!(OP_SUBX_16_RR, qc_subx_16_rr);
qc!(OP_SUBX_16_MM, qc_subx_16_mm);
qc!(OP_SUBX_32_RR, qc_subx_32_rr);
qc!(OP_SUBX_32_MM, qc_subx_32_mm);
// Put qc for SWAP here
qc!(OP_SWAP_32_DN, qc_swap_32_dn);
//
// Put qc for TAS here
// Put qc for TRAP here
// Put qc for TRAPV here
// Put qc for TST here
// Put qc for UNLK here
macro_rules! core_eq {
($left:ident , $right:ident . $field:ident [ $index:expr ]) => ({
match (&($left.$field[$index]), &($right.$field[$index])) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}[{}]` differs \
({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), $index, stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
});
($left:ident , $right:ident . $field:ident () ?) => ({
match (&($left.$field()), &($right.$field())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}()` differs \
({}: `{:?}`, {}: `{:?}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
});
($left:ident , $right:ident . $field:ident ()) => ({
match (&($left.$field()), &($right.$field())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}()` differs \
({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
});
($left:ident , $right:ident . $field:ident) => ({
match (&($left.$field), &($right.$field)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}` differs \
({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
})
}
fn assert_cores_equal(musashi: &Core, r68k: &Core) -> bool {
// check memory accesses match up
assert_equal(get_ops(), r68k.mem.logger.ops());
core_eq!(musashi, r68k.pc);
core_eq!(musashi, r68k.flags() ?);
core_eq!(musashi, r68k.status_register());
core_eq!(musashi, r68k.ssp());
core_eq!(musashi, r68k.usp());
for i in (0..16).rev() {
core_eq!(musashi, r68k.dar[i]);
}
true
}
#[test]
fn roundtrip_d0() {
assert_eq!(256, roundtrip_register(Register::D0, 256));
}
#[test]
fn roundtrip_abcd_rr() {
let pc = 0x40;
// 0xc101: ABCD D0, D1
let mut cpu = Core::new_mem(pc, &[0xc1, 0x01, 0x00, 0x00]);
cpu.dar[0] = 0x17;
cpu.dar[1] = 0x27;
cpu.dar[5] = 0x55555;
reset_and_execute1(&mut cpu);
// 17 + 27 is 44
assert_eq!(0x44, cpu.dar[0]);
assert_eq!(0x27, cpu.dar[1]);
assert_eq!(0x55555, cpu.dar[5]);
let ops = get_ops();
assert_eq!(1, ops.len());
assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, pc, 0xc1010000), ops[0]);
}
#[test]
fn compare_abcd_rr() {
let pc = 0x40;
// 0xc300: ABCD D1, D0
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00]);
musashi.dar[0] = 0x16;
musashi.dar[1] = 0x26;
let mut r68k = musashi.clone(); // so very self-aware!
reset_and_execute1(&mut musashi);
r68k.execute1();
assert_eq!(0x42, r68k.dar[1]);
assert_cores_equal(&musashi, &r68k);
}
#[test]
#[allow(unused_variables)]
fn run_abcd_rr_twice() {
let mutex = MUSASHI_LOCK.lock().unwrap();
let pc = 0x40;
// 0xc300: ABCD D1, D0
// 0xc302: ABCD D1, D2
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]);
musashi.dar[0] = 0x16;
musashi.dar[1] = 0x26;
musashi.dar[2] = 0x31;
let mut r68k = musashi.clone(); // so very self-aware!
initialize_musashi(&mut musashi);
// execute ABCD D1, D0
execute1(&mut musashi);
r68k.execute1();
assert_eq!(0x42, musashi.dar[1]);
assert_eq!(0x42, r68k.dar[1]);
// then execute a second instruction (ABCD D1, D2) on the core
execute1(&mut musashi);
r68k.execute1();
assert_eq!(0x73, musashi.dar[1]);
assert_eq!(0x73, r68k.dar[1]);
assert_cores_equal(&musashi, &r68k);
}
#[test]
#[allow(unused_variables)]
fn compare_address_error_actions() {
let mutex = MUSASHI_LOCK.lock().unwrap();
// using an odd absolute address should force an address error
// opcodes d278,0107 is ADD.W $0107, D1
let mut musashi = Core::new_mem(0x40, &[0xd2, 0x78, 0x01, 0x07]);
let vec3 = 0x200;
musashi.mem.write_long(SUPERVISOR_PROGRAM, 3*4, vec3);
musashi.mem.write_long(SUPERVISOR_PROGRAM, vec3, 0xd2780108);
musashi.dar[15] = 0x100;
let mut r68k = musashi.clone(); // so very self-aware!
initialize_musashi(&mut musashi);
execute1(&mut musashi);
//execute1(&mut musashi);
r68k.execute1();
r68k.execute1();
assert_cores_equal(&musashi, &r68k);
}
#[test]
#[allow(unused_variables)]
fn compare_illegal_instruction_actions() {
let mutex = MUSASHI_LOCK.lock().unwrap();
// d208 is ADD.B A0,D0, which is illegal
let mut musashi = Core::new_mem(0x40, &[0xd2, 08]);
let vec4 = 0x200;
musashi.mem.write_long(SUPERVISOR_PROGRAM, 4*4, vec4);
musashi.mem.write_long(SUPERVISOR_PROGRAM, vec4, 0xd2780108);
musashi.dar[15] = 0x100;
let mut r68k = musashi.clone(); // so very self-aware!
initialize_musashi(&mut musashi);
execute1(&mut musashi);
//execute1(&mut musashi);
r68k.execute1();
//r68k.execute1();
assert_cores_equal(&musashi, &r68k);
}
use std::ptr;
use super::m68k_get_reg;
#[test]
#[allow(unused_variables)]
fn stackpointers_are_correct_when_starting_in_supervisor_mode() {
let mutex = MUSASHI_LOCK.lock().unwrap();
let pc = 0x40;
// 0xc300: ABCD D1, D0
// 0xc302: ABCD D1, D2
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]);
musashi.sr_to_flags((1<<13));
musashi.inactive_usp = 0x200; // User SP
musashi.dar[15] = 0x100; // Supa SP
initialize_musashi(&mut musashi);
unsafe {
assert!((1<<13) & m68k_get_reg(ptr::null_mut(), Register::SR) > 0);
assert_eq!(0x100, m68k_get_reg(ptr::null_mut(), Register::ISP));
assert_eq!(0x200, m68k_get_reg(ptr::null_mut(), Register::USP));
}
}
#[test]
#[allow(unused_variables)]
fn stackpointers_are_correct_when_starting_in_user_mode() {
let mutex = MUSASHI_LOCK.lock().unwrap();
let pc = 0x40;
// 0xc300: ABCD D1, D0
// 0xc302: ABCD D1, D2
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]);
musashi.sr_to_flags(0);
musashi.dar[15] = 0x200; // User SP
musashi.inactive_ssp = 0x100; // Supa SP
initialize_musashi(&mut musashi);
unsafe {
assert!((1<<13) & m68k_get_reg(ptr::null_mut(), Register::SR) == 0);
assert_eq!(0x100, m68k_get_reg(ptr::null_mut(), Register::ISP));
assert_eq!(0x200, m68k_get_reg(ptr::null_mut(), Register::USP));
}
}
}
Report hammering on the full 16 bits of the opcode
// Integration with Musashi
extern crate libc;
// Register enum copied from Musashi's m68k_register_t enum
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(dead_code)]
pub enum Register {
/* Real registers */
D0, /* Data registers */
D1,
D2,
D3,
D4,
D5,
D6,
D7,
A0, /* Address registers */
A1,
A2,
A3,
A4,
A5,
A6,
A7,
PC, /* Program Counter */
SR, /* Status Register */
SP, /* The current Stack Pointer (located in A7) */
USP, /* User Stack Pointer */
ISP, /* Interrupt Stack Pointer */
MSP, /* Master Stack Pointer */
SFC, /* Source Function Code */
DFC, /* Destination Function Code */
VBR, /* Vector Base Register */
CACR, /* Cache Control Register */
CAAR, /* Cache Address Register */
/* Assumed registers */
/* These are cheat registers which emulate the 1-longword prefetch
* present in the 68000 and 68010.
*/
PrefAddr, /* Last prefetch address */
PrefData, /* Last prefetch data */
/* Convenience registers */
PPC, /* Previous value in the program counter */
IR, /* Instruction register */
CpuType /* Type of CPU being run */
}
#[repr(C)]
#[derive(Copy, Clone)]
#[allow(dead_code)]
enum CpuType
{
Invalid,
M68000,
M68010,
M68EC020,
M68020,
M68030, /* Supported by disassembler ONLY */
M68040 /* Supported by disassembler ONLY */
}
#[link(name = "musashi", kind = "static")]
extern {
fn m68k_init();
fn m68k_set_cpu_type(cputype: CpuType);
fn m68k_pulse_reset();
fn m68k_execute(num_cycles: i32) -> i32;
fn m68k_get_reg(context: *mut libc::c_void, regnum: Register) -> u32;
fn m68k_set_reg(regnum: Register, value: u32);
}
use ram::{Operation, AddressBus, AddressSpace, SUPERVISOR_PROGRAM, SUPERVISOR_DATA, USER_PROGRAM, USER_DATA, ADDRBUS_MASK};
static mut musashi_memory: [u8; 16*1024*1024+16] = [0xaa; 16*1024*1024+16];
// as statics are not allowed to have destructors, allocate a
// big enough array to hold the small number of operations
// expected from executing a very limited number of opcodes
static mut musashi_ops: [Operation; 128] = [Operation::None; 128];
static mut musashi_opcount: usize = 0;
static mut musashi_address_space: AddressSpace = SUPERVISOR_PROGRAM;
unsafe fn register_op(op: Operation) {
if musashi_opcount < musashi_ops.len() {
// println!("mem_op {:?}", op);
musashi_ops[musashi_opcount] = op;
musashi_opcount += 1;
}
}
// callbacks from Musashi
#[no_mangle]
pub extern fn m68k_read_memory_8(address: u32) -> u32 {
unsafe {
let address = address & ADDRBUS_MASK;
let addr = address as usize;
let value = musashi_memory[addr];
let op = Operation::ReadByte(musashi_address_space, address, value);
register_op(op);
value as u32
}
}
#[no_mangle]
pub extern fn m68k_read_memory_16(address: u32) -> u32 {
unsafe {
let address = address & ADDRBUS_MASK;
let addr = address as usize;
let value = (musashi_memory[addr+0] as u16) << 8
|(musashi_memory[addr+1] as u16) << 0;
let op = Operation::ReadWord(musashi_address_space, address, value);
register_op(op);
value as u32
}
}
#[no_mangle]
pub extern fn m68k_read_memory_32(address: u32) -> u32 {
unsafe {
let addr = (address & ADDRBUS_MASK) as usize;
let value = ((musashi_memory[addr+0] as u32) << 24
|(musashi_memory[addr+1] as u32) << 16
|(musashi_memory[addr+2] as u32) << 8
|(musashi_memory[addr+3] as u32) << 0) as u32;
let op = Operation::ReadLong(musashi_address_space, address, value);
register_op(op);
value
}
}
#[no_mangle]
pub extern fn m68k_write_memory_8(address: u32, value: u32) {
unsafe {
let op = Operation::WriteByte(musashi_address_space, address, value);
let address = (address & ADDRBUS_MASK) as usize;
register_op(op);
musashi_memory[address+0] = (value & 0xff) as u8;
}
}
#[no_mangle]
pub extern fn m68k_write_memory_16(address: u32, value: u32) {
unsafe {
let op = Operation::WriteWord(musashi_address_space, address, value);
let address = (address & ADDRBUS_MASK) as usize;
register_op(op);
musashi_memory[address+0] = ((value & 0xff00) >> 8) as u8;
musashi_memory[address+1] = ((value & 0x00ff) >> 0) as u8;
}
}
#[no_mangle]
pub extern fn m68k_write_memory_32(address: u32, value: u32) {
unsafe {
let op = Operation::WriteLong(musashi_address_space, address, value);
let address = (address & ADDRBUS_MASK) as usize;
register_op(op);
musashi_memory[address+0] = ((value & 0xff000000) >> 24) as u8;
musashi_memory[address+1] = ((value & 0x00ff0000) >> 16) as u8;
musashi_memory[address+2] = ((value & 0x0000ff00) >> 8) as u8;
musashi_memory[address+3] = ((value & 0x000000ff) >> 0) as u8;
}
}
#[no_mangle]
pub extern fn cpu_pulse_reset() {panic!("pr")}
#[no_mangle]
pub extern fn cpu_long_branch() {}
#[no_mangle]
pub extern fn m68k_set_fc(fc: u32) {
unsafe {
musashi_address_space = match fc {
1 => USER_DATA,
2 => USER_PROGRAM,
5 => SUPERVISOR_DATA,
6 => SUPERVISOR_PROGRAM,
_ => panic!("unknown fc: {}", fc),
};
// println!("set_fc {:?}", musashi_address_space);
}
}
#[allow(unused_variables)]
#[no_mangle]
pub extern fn cpu_irq_ack(level: i32) -> i32 {panic!("ia")}
#[no_mangle]
pub extern fn cpu_instr_callback() {}
use std::ptr;
#[allow(unused_variables)]
pub fn experimental_communication() {
let mutex = MUSASHI_LOCK.lock().unwrap();
unsafe {
m68k_init();
m68k_set_cpu_type(CpuType::M68000);
m68k_set_reg(Register::D0, 123);
println!("D0: {}", m68k_get_reg(ptr::null_mut(), Register::D0));
}
}
#[allow(unused_variables)]
pub fn roundtrip_register(reg: Register, value: u32) -> u32 {
let mutex = MUSASHI_LOCK.lock().unwrap();
unsafe {
m68k_init();
m68k_set_cpu_type(CpuType::M68000);
m68k_set_reg(reg, value);
m68k_get_reg(ptr::null_mut(), reg)
}
}
use cpu::{Core, Cycles};
static REGS:[Register; 16] = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6, Register::A7];
fn get_ops() -> Vec<Operation> {
let mut res: Vec<Operation> = vec![];
unsafe {
for i in 0..musashi_opcount {
res.push(musashi_ops[i]);
}
}
res
}
// since we know exactly where writes have occurred, undoing is much
// less work than simply rewriting all 16M
fn undo_musashi_writes() {
for op in get_ops()
{
match op {
Operation::WriteByte(_, addr, _) => m68k_write_memory_8(addr, 0xaa),
Operation::WriteWord(_, addr, _) => m68k_write_memory_16(addr, 0xaaaa),
Operation::WriteLong(_, addr, _) => m68k_write_memory_32(addr, 0xaaaaaaaa),
_ => (),
}
}
}
pub fn initialize_musashi(core: &mut Core) {
// println!("initialize_musashi {:?}", thread::current());
unsafe {
undo_musashi_writes();
m68k_init();
m68k_set_cpu_type(CpuType::M68000);
m68k_write_memory_32(0, core.ssp());
m68k_write_memory_32(4, core.pc);
m68k_pulse_reset();
// Resetting opcount, because m68k_pulse_reset causes irrelevant
// reads from 0x00000000 to set PC/SP, a jump to PC and
// resetting of state. But we don't want to test those ops.
musashi_opcount = 0;
//m68k_set_reg(Register::PC, core.pc);
m68k_set_reg(Register::USP, core.usp());
// if SR clears S_FLAG then SSP <- A7, A7 <- USP
m68k_set_reg(Register::SR, core.status_register() as u32);
for (i, ®) in REGS.iter().enumerate() {
if i != 15 {
m68k_set_reg(reg, core.dar[i]);
}
}
// just reset first and last KB of memory, as it takes too long to
// reset all 16MB
let last_kb = (1 << 24) - 1024;
for i in 0..1024usize {
musashi_memory[i] = core.mem.read_byte(SUPERVISOR_PROGRAM, i as u32) as u8;
musashi_memory[last_kb + i] = core.mem.read_byte(SUPERVISOR_PROGRAM, (last_kb + i) as u32) as u8;
}
}
}
pub fn execute1(core: &mut Core) -> Cycles {
// println!("execute1 mushashi {:?}", thread::current());
unsafe {
let cycle_count = m68k_execute(1);
for (i, ®) in REGS.iter().enumerate() {
core.dar[i] = m68k_get_reg(ptr::null_mut(), reg);
}
core.pc = m68k_get_reg(ptr::null_mut(), Register::PC);
core.sr_to_flags(m68k_get_reg(ptr::null_mut(), Register::SR) as u16);
if core.s_flag > 0 {
core.inactive_usp = m68k_get_reg(ptr::null_mut(), Register::USP);
core.dar[15] = m68k_get_reg(ptr::null_mut(), Register::ISP);
} else {
core.dar[15] = m68k_get_reg(ptr::null_mut(), Register::USP);
core.inactive_ssp = m68k_get_reg(ptr::null_mut(), Register::ISP);
}
Cycles(cycle_count)
}
}
#[allow(unused_variables)]
pub fn reset_and_execute1(core: &mut Core) -> Cycles {
let mutex = MUSASHI_LOCK.lock().unwrap();
initialize_musashi(core);
execute1(core)
}
// Talking to Musashi isn't thread-safe, and the tests are running
// threaded, which cause intermittent test failures unless serializing
// access using something like a mutex. Musashi functions are called in
// global/static context, and statics are not allowed to have
// destructors
use std::sync::{Arc, Mutex};
// using lazy_static! to work-around "statics are not allowed to have destructors [E0493]""
lazy_static! {
static ref MUSASHI_LOCK: Arc<Mutex<i32>> = Arc::new(Mutex::new(0));
static ref QUICKCHECK_LOCK: Arc<Mutex<i32>> = Arc::new(Mutex::new(0));
}
#[cfg(test)]
mod tests {
use super::*;
use ram::SUPERVISOR_PROGRAM;
use super::MUSASHI_LOCK;
use super::QUICKCHECK_LOCK;
use ram::{Operation, AddressBus};
use cpu::Core;
extern crate quickcheck;
use self::quickcheck::*;
#[derive(Copy, Clone, Debug, PartialEq)]
struct Bitpattern(u32);
impl Arbitrary for Bitpattern {
fn arbitrary<G: Gen>(g: &mut G) -> Bitpattern {
// let m : u32 = Arbitrary::arbitrary(g);
// let mut mask: u32 = 0xF; //((m & 0xF) | (m >> 4) & 0xF) as u32;
// let mut i : u32 = Arbitrary::arbitrary(g);
// let mut sum: u32 = 0;
// println!("{}/{} when {}", i, mask, g.size());
// // 0b11001100 => 0xFF00FF00
// while i > 0 {
// sum += if i & 1 == 1 { mask } else { 0 };
// i >>= 1;
// mask <<= 4;
// }
// when size 256, could generate any 32 bit pattern
// let i1: u32 = Arbitrary::arbitrary(g);
// let i2: u32 = Arbitrary::arbitrary(g);
// let i3: u32 = Arbitrary::arbitrary(g);
// let i4: u32 = Arbitrary::arbitrary(g);
// let sum: u32 = (i1 << 24) | (i2 << 16) | (i3 << 8) | i4;
// println!("{:b} when {}", i4, g.size());
Bitpattern(Arbitrary::arbitrary(g))
}
fn shrink(&self) -> Box<Iterator<Item=Self>> {
match *self {
Bitpattern(x) => {
let xs = x.shrink(); // should shrink Bitpattern by clearing bits, not setting new ones
let tagged = xs //.inspect(|x|println!("{}", x))
.map(Bitpattern);
Box::new(tagged)
}
}
}
}
impl Arbitrary for Register {
fn arbitrary<G: Gen>(g: &mut G) -> Register {
let regs = [Register::D0, Register::D1, Register::D2, Register::D3, Register::D4, Register::D5, Register::D6, Register::D7, Register::A0, Register::A1, Register::A2, Register::A3, Register::A4, Register::A5, Register::A6,
Register::SR, // Register::A7, Register::SP, Register::PC
];
//println!("{}",i);
if let Some(®) = g.choose(®s) {
reg
} else {
unreachable!();
}
}
}
extern crate rand;
use itertools::{Itertools, assert_equal};
use cpu::ops::handlers::*;
use super::get_ops;
// struct OpSeq {
// mask: u32,
// matching: u32,
// current_op: u32,
// }
// impl OpSeq {
// fn new(mask: u32, matching: u32) -> OpSeq {
// OpSeq { mask: mask, matching: matching, current_op: 0 }
// }
// }
// impl Iterator for OpSeq {
// type Item = u32;
// fn next(&mut self) -> Option<u32> {
// if self.current_op == 0x10000 {
// None
// } else {
// while (self.current_op & self.mask) != self.matching && self.current_op < 0x10000 {
// self.current_op += 1;
// }
// if self.current_op == 0x10000 {
// return None;
// }
// let res = Some(self.current_op);
// self.current_op += 1;
// res
// }
// }
// }
fn opcodes(mask: u32, matching: u32) -> Vec<u16> {
(matching..0x10000u32)
.filter(|opcode| (opcode & mask) == matching)
.map(|v|v as u16).collect::<Vec<u16>>()
}
macro_rules! opcodes {
($mask:expr , $matching:expr) => {($matching..0x10000).filter(|opcode| (opcode & $mask) == $matching)}
}
#[test]
fn opcodes_from_mask_and_matching(){
let mut opseq = Vec::new();
opseq.extend(opcodes!(MASK_OUT_X_Y, OP_ABCD_8_RR));
assert_eq!(64, opseq.len());
let ops = opseq.iter().unique();
assert_eq!(64, ops.count());
if let Some(&min) = opseq.iter().min() {
assert_eq!(0b1100000100000000, min);
}
if let Some(&max) = opseq.iter().max() {
assert_eq!(0b1100111100000111, max);
}
for code in opseq.iter() {
assert_eq!(OP_ABCD_8_RR, code & OP_ABCD_8_RR);
}
}
static mut opcode_under_test: u16 = 0;
fn hammer_cores_even_addresses(rs: Vec<(Register, Bitpattern)>) -> bool {
let mem_mask = (2<<24)-2; // keep even
hammer_cores_with(mem_mask, rs)
}
fn hammer_cores(rs: Vec<(Register, Bitpattern)>) -> bool {
let mem_mask = (2<<24)-1; // allow odd
hammer_cores_with(mem_mask, rs)
}
fn hammer_cores_with(mem_mask: u32, rs: Vec<(Register, Bitpattern)>) -> bool {
let pc = 0x40;
let mem = unsafe {
[((opcode_under_test >> 8) & 0xff) as u8, (opcode_under_test & 0xff) as u8]
};
let mut musashi = Core::new_mem(pc, &mem);
const STACK_MASK:u32 = (1024-16); // keep even
musashi.inactive_ssp = 0x128;
musashi.inactive_usp = 0x256;
for r in 0..8 {
musashi.dar[r] = 0;
musashi.dar[8+r] = 0x128;
}
for r in rs {
match r {
(Register::D0, Bitpattern(bp)) => musashi.dar[0] = bp,
(Register::D1, Bitpattern(bp)) => musashi.dar[1] = bp,
(Register::D2, Bitpattern(bp)) => musashi.dar[2] = bp,
(Register::D3, Bitpattern(bp)) => musashi.dar[3] = bp,
(Register::D4, Bitpattern(bp)) => musashi.dar[4] = bp,
(Register::D5, Bitpattern(bp)) => musashi.dar[5] = bp,
(Register::D6, Bitpattern(bp)) => musashi.dar[6] = bp,
(Register::D7, Bitpattern(bp)) => musashi.dar[7] = bp,
// must ensure Addresses are within musashi memory space!
(Register::A0, Bitpattern(bp)) => musashi.dar[0+8] = bp & mem_mask,
(Register::A1, Bitpattern(bp)) => musashi.dar[1+8] = bp & mem_mask,
(Register::A2, Bitpattern(bp)) => musashi.dar[2+8] = bp & mem_mask,
(Register::A3, Bitpattern(bp)) => musashi.dar[3+8] = bp & mem_mask,
(Register::A4, Bitpattern(bp)) => musashi.dar[4+8] = bp & mem_mask,
(Register::A5, Bitpattern(bp)) => musashi.dar[5+8] = bp & mem_mask,
(Register::A6, Bitpattern(bp)) => musashi.dar[6+8] = bp & mem_mask,
(Register::A7, Bitpattern(bp)) => musashi.dar[7+8] = bp & STACK_MASK + 8,
(Register::USP, Bitpattern(bp)) => musashi.inactive_usp = bp & STACK_MASK + 8,
(Register::SR, Bitpattern(bp)) => musashi.sr_to_flags(bp as u16),
_ => {
panic!("No idea how to set {:?}", r.0)
},
}
}
let mut r68k = musashi.clone(); // so very self-aware!
let musashi_cycles = reset_and_execute1(&mut musashi);
let r68k_cycles = r68k.execute1();
let res = assert_cores_equal(&musashi, &r68k);
assert_eq!(musashi_cycles, r68k_cycles);
res
}
macro_rules! qc8 {
($opcode:ident, $fn_name:ident) => (qc!($opcode, MASK_OUT_X_Y, $fn_name, hammer_cores););
($opcode:ident, $opmask:ident, $fn_name:ident) => (qc!($opcode, $opmask, $fn_name, hammer_cores););
}
macro_rules! qc {
($opcode:ident, $fn_name:ident) => (qc!($opcode, MASK_OUT_X_Y, $fn_name, hammer_cores_even_addresses););
($opcode:ident, $opmask:ident, $fn_name:ident) => (qc!($opcode, $opmask, $fn_name, hammer_cores_even_addresses););
($opcode:ident, $opmask:ident, $fn_name:ident, $hammer:ident) => (
#[test]
#[ignore]
#[allow(unused_variables)]
fn $fn_name() {
// Musashi isn't thread safe, and the construct with opcode_under_test
// isn't either. :(
let mutex = QUICKCHECK_LOCK.lock().unwrap();
for opcode in opcodes($opmask, $opcode)
{
println!("Will hammer {:016b}", opcode);
unsafe {
// this is because I don't know how to make
// hammer_cores take the opcode as a parameter and
// we cannot simply use a closure either; see
// https://github.com/BurntSushi/quickcheck/issues/56
opcode_under_test = opcode;
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 256))
.tests(10)
.quickcheck($hammer as fn(Vec<(Register, Bitpattern)>) -> bool);
}
})
}
qc8!(OP_ABCD_8_RR, qc_abcd_rr);
qc8!(OP_ABCD_8_MM, qc_abcd_mm);
qc8!(OP_ADD_8_ER_DN, qc_add_8_er_dn);
qc8!(OP_ADD_8_ER_PI, qc_add_8_er_pi);
qc8!(OP_ADD_8_ER_PD, qc_add_8_er_pd);
qc8!(OP_ADD_8_ER_AI, qc_add_8_er_ai);
qc8!(OP_ADD_8_ER_DI, qc_add_8_er_di);
qc8!(OP_ADD_8_ER_IX, qc_add_8_er_ix);
qc8!(OP_ADD_8_ER_AW, MASK_OUT_X, qc_add_8_er_aw);
qc8!(OP_ADD_8_ER_AL, MASK_OUT_X, qc_add_8_er_al);
qc8!(OP_ADD_8_ER_PCDI, MASK_OUT_X, qc_add_8_er_pcdi);
qc8!(OP_ADD_8_ER_PCIX, MASK_OUT_X, qc_add_8_er_pcix);
qc8!(OP_ADD_8_ER_IMM, MASK_OUT_X, qc_add_8_er_imm);
qc8!(OP_ADD_8_RE_PI, qc_add_8_re_pi);
qc8!(OP_ADD_8_RE_PD, qc_add_8_re_pd);
qc8!(OP_ADD_8_RE_AI, qc_add_8_re_ai);
qc8!(OP_ADD_8_RE_DI, qc_add_8_re_di);
qc8!(OP_ADD_8_RE_IX, qc_add_8_re_ix);
qc8!(OP_ADD_8_RE_AW, MASK_OUT_X, qc_add_8_re_aw);
qc8!(OP_ADD_8_RE_AL, MASK_OUT_X, qc_add_8_re_al);
qc!(OP_ADD_16_ER_DN, qc_add_16_er_dn);
qc!(OP_ADD_16_ER_AN, qc_add_16_er_an);
qc!(OP_ADD_16_ER_PI, qc_add_16_er_pi);
qc!(OP_ADD_16_ER_PD, qc_add_16_er_pd);
qc!(OP_ADD_16_ER_AI, qc_add_16_er_ai);
qc!(OP_ADD_16_ER_DI, qc_add_16_er_di);
qc!(OP_ADD_16_ER_IX, qc_add_16_er_ix);
qc!(OP_ADD_16_ER_AW, MASK_OUT_X, qc_add_16_er_aw);
qc!(OP_ADD_16_ER_AL, MASK_OUT_X, qc_add_16_er_al);
qc!(OP_ADD_16_ER_PCDI, MASK_OUT_X, qc_add_16_er_pcdi);
qc!(OP_ADD_16_ER_PCIX, MASK_OUT_X, qc_add_16_er_pcix);
qc!(OP_ADD_16_ER_IMM, MASK_OUT_X, qc_add_16_er_imm);
qc!(OP_ADD_16_RE_PI, qc_add_16_re_pi);
qc!(OP_ADD_16_RE_PD, qc_add_16_re_pd);
qc!(OP_ADD_16_RE_AI, qc_add_16_re_ai);
qc!(OP_ADD_16_RE_DI, qc_add_16_re_di);
qc!(OP_ADD_16_RE_IX, qc_add_16_re_ix);
qc!(OP_ADD_16_RE_AW, MASK_OUT_X, qc_add_16_re_aw);
qc!(OP_ADD_16_RE_AL, MASK_OUT_X, qc_add_16_re_al);
qc!(OP_ADD_32_ER_DN, qc_add_32_er_dn);
qc!(OP_ADD_32_ER_AN, qc_add_32_er_an);
qc!(OP_ADD_32_ER_PI, qc_add_32_er_pi);
qc!(OP_ADD_32_ER_PD, qc_add_32_er_pd);
qc!(OP_ADD_32_ER_AI, qc_add_32_er_ai);
qc!(OP_ADD_32_ER_DI, qc_add_32_er_di);
qc!(OP_ADD_32_ER_IX, qc_add_32_er_ix);
qc!(OP_ADD_32_ER_AW, MASK_OUT_X, qc_add_32_er_aw);
qc!(OP_ADD_32_ER_AL, MASK_OUT_X, qc_add_32_er_al);
qc!(OP_ADD_32_ER_PCDI, MASK_OUT_X, qc_add_32_er_pcdi);
qc!(OP_ADD_32_ER_PCIX, MASK_OUT_X, qc_add_32_er_pcix);
qc!(OP_ADD_32_ER_IMM, MASK_OUT_X, qc_add_32_er_imm);
qc!(OP_ADD_32_RE_PI, qc_add_32_re_pi);
qc!(OP_ADD_32_RE_PD, qc_add_32_re_pd);
qc!(OP_ADD_32_RE_AI, qc_add_32_re_ai);
qc!(OP_ADD_32_RE_DI, qc_add_32_re_di);
qc!(OP_ADD_32_RE_IX, qc_add_32_re_ix);
qc!(OP_ADD_32_RE_AW, MASK_OUT_X, qc_add_32_re_aw);
qc!(OP_ADD_32_RE_AL, MASK_OUT_X, qc_add_32_re_al);
qc!(OP_ADDA_16_DN, qc_adda_16_dn);
qc!(OP_ADDA_16_AN, qc_adda_16_an);
qc!(OP_ADDA_16_PI, qc_adda_16_pi);
qc!(OP_ADDA_16_PD, qc_adda_16_pd);
qc!(OP_ADDA_16_AI, qc_adda_16_ai);
qc!(OP_ADDA_16_DI, qc_adda_16_di);
qc!(OP_ADDA_16_IX, qc_adda_16_ix);
qc!(OP_ADDA_16_AW, MASK_OUT_X, qc_adda_16_aw);
qc!(OP_ADDA_16_AL, MASK_OUT_X, qc_adda_16_al);
qc!(OP_ADDA_16_PCDI, MASK_OUT_X, qc_adda_16_pcdi);
qc!(OP_ADDA_16_PCIX, MASK_OUT_X, qc_adda_16_pcix);
qc!(OP_ADDA_16_IMM, MASK_OUT_X, qc_adda_16_imm);
qc!(OP_ADDA_32_DN, qc_adda_32_dn);
qc!(OP_ADDA_32_AN, qc_adda_32_an);
qc!(OP_ADDA_32_PI, qc_adda_32_pi);
qc!(OP_ADDA_32_PD, qc_adda_32_pd);
qc!(OP_ADDA_32_AI, qc_adda_32_ai);
qc!(OP_ADDA_32_DI, qc_adda_32_di);
qc!(OP_ADDA_32_IX, qc_adda_32_ix);
qc!(OP_ADDA_32_AW, MASK_OUT_X, qc_adda_32_aw);
qc!(OP_ADDA_32_AL, MASK_OUT_X, qc_adda_32_al);
qc!(OP_ADDA_32_PCDI, MASK_OUT_X, qc_adda_32_pcdi);
qc!(OP_ADDA_32_PCIX, MASK_OUT_X, qc_adda_32_pcix);
qc!(OP_ADDA_32_IMM, MASK_OUT_X, qc_adda_32_imm);
qc8!(OP_ADDI_8_DN, MASK_OUT_Y, qc_addi_8_dn);
qc8!(OP_ADDI_8_PI, MASK_OUT_Y, qc_addi_8_pi);
qc8!(OP_ADDI_8_PD, MASK_OUT_Y, qc_addi_8_pd);
qc8!(OP_ADDI_8_AI, MASK_OUT_Y, qc_addi_8_ai);
qc8!(OP_ADDI_8_DI, MASK_OUT_Y, qc_addi_8_di);
qc8!(OP_ADDI_8_IX, MASK_OUT_Y, qc_addi_8_ix);
qc8!(OP_ADDI_8_AW, MASK_EXACT, qc_addi_8_aw);
qc8!(OP_ADDI_8_AL, MASK_EXACT, qc_addi_8_al);
qc!(OP_ADDI_16_DN, MASK_OUT_Y, qc_addi_16_dn);
qc!(OP_ADDI_16_PI, MASK_OUT_Y, qc_addi_16_pi);
qc!(OP_ADDI_16_PD, MASK_OUT_Y, qc_addi_16_pd);
qc!(OP_ADDI_16_AI, MASK_OUT_Y, qc_addi_16_ai);
qc!(OP_ADDI_16_DI, MASK_OUT_Y, qc_addi_16_di);
qc!(OP_ADDI_16_IX, MASK_OUT_Y, qc_addi_16_ix);
qc!(OP_ADDI_16_AW, MASK_EXACT, qc_addi_16_aw);
qc!(OP_ADDI_16_AL, MASK_EXACT, qc_addi_16_al);
qc!(OP_ADDI_32_DN, MASK_OUT_Y, qc_addi_32_dn);
qc!(OP_ADDI_32_PI, MASK_OUT_Y, qc_addi_32_pi);
qc!(OP_ADDI_32_PD, MASK_OUT_Y, qc_addi_32_pd);
qc!(OP_ADDI_32_AI, MASK_OUT_Y, qc_addi_32_ai);
qc!(OP_ADDI_32_DI, MASK_OUT_Y, qc_addi_32_di);
qc!(OP_ADDI_32_IX, MASK_OUT_Y, qc_addi_32_ix);
qc!(OP_ADDI_32_AW, MASK_EXACT, qc_addi_32_aw);
qc!(OP_ADDI_32_AL, MASK_EXACT, qc_addi_32_al);
qc8!(OP_ADDQ_8_DN, qc_addq_8_dn);
qc8!(OP_ADDQ_8_PI, qc_addq_8_pi);
qc8!(OP_ADDQ_8_PD, qc_addq_8_pd);
qc8!(OP_ADDQ_8_AI, qc_addq_8_ai);
qc8!(OP_ADDQ_8_DI, qc_addq_8_di);
qc8!(OP_ADDQ_8_IX, qc_addq_8_ix);
qc8!(OP_ADDQ_8_AW, MASK_OUT_X, qc_addq_8_aw);
qc8!(OP_ADDQ_8_AL, MASK_OUT_X, qc_addq_8_al);
qc!(OP_ADDQ_16_DN, qc_addq_16_dn);
qc!(OP_ADDQ_16_AN, qc_addq_16_an);
qc!(OP_ADDQ_16_PI, qc_addq_16_pi);
qc!(OP_ADDQ_16_PD, qc_addq_16_pd);
qc!(OP_ADDQ_16_AI, qc_addq_16_ai);
qc!(OP_ADDQ_16_DI, qc_addq_16_di);
qc!(OP_ADDQ_16_IX, qc_addq_16_ix);
qc!(OP_ADDQ_16_AW, MASK_OUT_X, qc_addq_16_aw);
qc!(OP_ADDQ_16_AL, MASK_OUT_X, qc_addq_16_al);
qc!(OP_ADDQ_32_DN, qc_addq_32_dn);
qc!(OP_ADDQ_32_AN, qc_addq_32_an);
qc!(OP_ADDQ_32_PI, qc_addq_32_pi);
qc!(OP_ADDQ_32_PD, qc_addq_32_pd);
qc!(OP_ADDQ_32_AI, qc_addq_32_ai);
qc!(OP_ADDQ_32_DI, qc_addq_32_di);
qc!(OP_ADDQ_32_IX, qc_addq_32_ix);
qc!(OP_ADDQ_32_AW, MASK_OUT_X, qc_addq_32_aw);
qc!(OP_ADDQ_32_AL, MASK_OUT_X, qc_addq_32_al);
qc8!(OP_ADDX_8_RR, qc_addx_8_rr);
qc8!(OP_ADDX_8_MM, qc_addx_8_mm);
qc!(OP_ADDX_16_RR, qc_addx_16_rr);
qc!(OP_ADDX_16_MM, qc_addx_16_mm);
qc!(OP_ADDX_32_RR, qc_addx_32_rr);
qc!(OP_ADDX_32_MM, qc_addx_32_mm);
qc8!(OP_AND_8_ER_DN, qc_and_8_er_dn);
qc8!(OP_AND_8_ER_PI, qc_and_8_er_pi);
qc8!(OP_AND_8_ER_PD, qc_and_8_er_pd);
qc8!(OP_AND_8_ER_AI, qc_and_8_er_ai);
qc8!(OP_AND_8_ER_DI, qc_and_8_er_di);
qc8!(OP_AND_8_ER_IX, qc_and_8_er_ix);
qc8!(OP_AND_8_ER_AW, MASK_OUT_X, qc_and_8_er_aw);
qc8!(OP_AND_8_ER_AL, MASK_OUT_X, qc_and_8_er_al);
qc8!(OP_AND_8_ER_PCDI, MASK_OUT_X, qc_and_8_er_pcdi);
qc8!(OP_AND_8_ER_PCIX, MASK_OUT_X, qc_and_8_er_pcix);
qc8!(OP_AND_8_ER_IMM, MASK_OUT_X, qc_and_8_er_imm);
qc8!(OP_AND_8_RE_PI, qc_and_8_re_pi);
qc8!(OP_AND_8_RE_PD, qc_and_8_re_pd);
qc8!(OP_AND_8_RE_AI, qc_and_8_re_ai);
qc8!(OP_AND_8_RE_DI, qc_and_8_re_di);
qc8!(OP_AND_8_RE_IX, qc_and_8_re_ix);
qc8!(OP_AND_8_RE_AW, MASK_OUT_X, qc_and_8_re_aw);
qc8!(OP_AND_8_RE_AL, MASK_OUT_X, qc_and_8_re_al);
qc!(OP_AND_16_ER_DN, qc_and_16_er_dn);
qc!(OP_AND_16_ER_PI, qc_and_16_er_pi);
qc!(OP_AND_16_ER_PD, qc_and_16_er_pd);
qc!(OP_AND_16_ER_AI, qc_and_16_er_ai);
qc!(OP_AND_16_ER_DI, qc_and_16_er_di);
qc!(OP_AND_16_ER_IX, qc_and_16_er_ix);
qc!(OP_AND_16_ER_AW, MASK_OUT_X, qc_and_16_er_aw);
qc!(OP_AND_16_ER_AL, MASK_OUT_X, qc_and_16_er_al);
qc!(OP_AND_16_ER_PCDI, MASK_OUT_X, qc_and_16_er_pcdi);
qc!(OP_AND_16_ER_PCIX, MASK_OUT_X, qc_and_16_er_pcix);
qc!(OP_AND_16_ER_IMM, MASK_OUT_X, qc_and_16_er_imm);
qc!(OP_AND_16_RE_PI, qc_and_16_re_pi);
qc!(OP_AND_16_RE_PD, qc_and_16_re_pd);
qc!(OP_AND_16_RE_AI, qc_and_16_re_ai);
qc!(OP_AND_16_RE_DI, qc_and_16_re_di);
qc!(OP_AND_16_RE_IX, qc_and_16_re_ix);
qc!(OP_AND_16_RE_AW, MASK_OUT_X, qc_and_16_re_aw);
qc!(OP_AND_16_RE_AL, MASK_OUT_X, qc_and_16_re_al);
qc!(OP_AND_32_ER_DN, qc_and_32_er_dn);
qc!(OP_AND_32_ER_PI, qc_and_32_er_pi);
qc!(OP_AND_32_ER_PD, qc_and_32_er_pd);
qc!(OP_AND_32_ER_AI, qc_and_32_er_ai);
qc!(OP_AND_32_ER_DI, qc_and_32_er_di);
qc!(OP_AND_32_ER_IX, qc_and_32_er_ix);
qc!(OP_AND_32_ER_AW, MASK_OUT_X, qc_and_32_er_aw);
qc!(OP_AND_32_ER_AL, MASK_OUT_X, qc_and_32_er_al);
qc!(OP_AND_32_ER_PCDI, MASK_OUT_X, qc_and_32_er_pcdi);
qc!(OP_AND_32_ER_PCIX, MASK_OUT_X, qc_and_32_er_pcix);
qc!(OP_AND_32_ER_IMM, MASK_OUT_X, qc_and_32_er_imm);
qc!(OP_AND_32_RE_PI, qc_and_32_re_pi);
qc!(OP_AND_32_RE_PD, qc_and_32_re_pd);
qc!(OP_AND_32_RE_AI, qc_and_32_re_ai);
qc!(OP_AND_32_RE_DI, qc_and_32_re_di);
qc!(OP_AND_32_RE_IX, qc_and_32_re_ix);
qc!(OP_AND_32_RE_AW, MASK_OUT_X, qc_and_32_re_aw);
qc!(OP_AND_32_RE_AL, MASK_OUT_X, qc_and_32_re_al);
qc8!(OP_ANDI_8_DN, MASK_OUT_Y, qc_andi_8_dn);
qc8!(OP_ANDI_8_PI, MASK_OUT_Y, qc_andi_8_pi);
qc8!(OP_ANDI_8_PD, MASK_OUT_Y, qc_andi_8_pd);
qc8!(OP_ANDI_8_AI, MASK_OUT_Y, qc_andi_8_ai);
qc8!(OP_ANDI_8_DI, MASK_OUT_Y, qc_andi_8_di);
qc8!(OP_ANDI_8_IX, MASK_OUT_Y, qc_andi_8_ix);
qc8!(OP_ANDI_8_AW, MASK_EXACT, qc_andi_8_aw);
qc8!(OP_ANDI_8_AL, MASK_EXACT, qc_andi_8_al);
qc!(OP_ANDI_16_DN, MASK_OUT_Y, qc_andi_16_dn);
qc!(OP_ANDI_16_PI, MASK_OUT_Y, qc_andi_16_pi);
qc!(OP_ANDI_16_PD, MASK_OUT_Y, qc_andi_16_pd);
qc!(OP_ANDI_16_AI, MASK_OUT_Y, qc_andi_16_ai);
qc!(OP_ANDI_16_DI, MASK_OUT_Y, qc_andi_16_di);
qc!(OP_ANDI_16_IX, MASK_OUT_Y, qc_andi_16_ix);
qc!(OP_ANDI_16_AW, MASK_EXACT, qc_andi_16_aw);
qc!(OP_ANDI_16_AL, MASK_EXACT, qc_andi_16_al);
qc!(OP_ANDI_32_DN, MASK_OUT_Y, qc_andi_32_dn);
qc!(OP_ANDI_32_PI, MASK_OUT_Y, qc_andi_32_pi);
qc!(OP_ANDI_32_PD, MASK_OUT_Y, qc_andi_32_pd);
qc!(OP_ANDI_32_AI, MASK_OUT_Y, qc_andi_32_ai);
qc!(OP_ANDI_32_DI, MASK_OUT_Y, qc_andi_32_di);
qc!(OP_ANDI_32_IX, MASK_OUT_Y, qc_andi_32_ix);
qc!(OP_ANDI_32_AW, MASK_EXACT, qc_andi_32_aw);
qc!(OP_ANDI_32_AL, MASK_EXACT, qc_andi_32_al);
qc!(OP_ANDI_16_TOC, MASK_EXACT, qc_andi_16_toc);
qc!(OP_ANDI_16_TOS, MASK_EXACT, qc_andi_16_tos);
qc8!(OP_ASR_8_S, MASK_OUT_X_Y, qc_asr_8_s);
qc!(OP_ASR_16_S, MASK_OUT_X_Y, qc_asr_16_s);
qc!(OP_ASR_32_S, MASK_OUT_X_Y, qc_asr_32_s);
qc8!(OP_ASR_8_R, MASK_OUT_X_Y, qc_asr_8_r);
qc!(OP_ASR_16_R, MASK_OUT_X_Y, qc_asr_16_r);
qc!(OP_ASR_32_R, MASK_OUT_X_Y, qc_asr_32_r);
qc8!(OP_ASL_8_S, MASK_OUT_X_Y, qc_asl_8_s);
qc!(OP_ASL_16_S, MASK_OUT_X_Y, qc_asl_16_s);
qc!(OP_ASL_32_S, MASK_OUT_X_Y, qc_asl_32_s);
qc8!(OP_ASL_8_R, MASK_OUT_X_Y, qc_asl_8_r);
qc!(OP_ASL_16_R, MASK_OUT_X_Y, qc_asl_16_r);
qc!(OP_ASL_32_R, MASK_OUT_X_Y, qc_asl_32_r);
qc!(OP_ASL_16_AI, MASK_OUT_Y, qc_asl_16_ai);
qc!(OP_ASL_16_PI, MASK_OUT_Y, qc_asl_16_pi);
qc!(OP_ASL_16_PD, MASK_OUT_Y, qc_asl_16_pd);
qc!(OP_ASL_16_DI, MASK_OUT_Y, qc_asl_16_di);
qc!(OP_ASL_16_IX, MASK_OUT_Y, qc_asl_16_ix);
qc!(OP_ASL_16_AW, MASK_EXACT, qc_asl_16_aw);
qc!(OP_ASL_16_AL, MASK_EXACT, qc_asl_16_al);
qc!(OP_ASR_16_AI, MASK_OUT_Y, qc_asr_16_ai);
qc!(OP_ASR_16_PI, MASK_OUT_Y, qc_asr_16_pi);
qc!(OP_ASR_16_PD, MASK_OUT_Y, qc_asr_16_pd);
qc!(OP_ASR_16_DI, MASK_OUT_Y, qc_asr_16_di);
qc!(OP_ASR_16_IX, MASK_OUT_Y, qc_asr_16_ix);
qc!(OP_ASR_16_AW, MASK_EXACT, qc_asr_16_aw);
qc!(OP_ASR_16_AL, MASK_EXACT, qc_asr_16_al);
const MASK_LOBYTE_QUICKER: u32 = MASK_LOBYTE + 0xe0;
qc8!(OP_BHI_8, MASK_LOBYTE_QUICKER, qc_bhi_8);
qc8!(OP_BLS_8, MASK_LOBYTE_QUICKER, qc_bls_8);
qc8!(OP_BCC_8, MASK_LOBYTE_QUICKER, qc_bcc_8);
qc8!(OP_BCS_8, MASK_LOBYTE_QUICKER, qc_bcs_8);
qc8!(OP_BNE_8, MASK_LOBYTE_QUICKER, qc_bne_8);
qc8!(OP_BEQ_8, MASK_LOBYTE_QUICKER, qc_beq_8);
qc8!(OP_BVC_8, MASK_LOBYTE_QUICKER, qc_bvc_8);
qc8!(OP_BVS_8, MASK_LOBYTE_QUICKER, qc_bvs_8);
qc8!(OP_BPL_8, MASK_LOBYTE_QUICKER, qc_bpl_8);
qc8!(OP_BMI_8, MASK_LOBYTE_QUICKER, qc_bmi_8);
qc8!(OP_BGE_8, MASK_LOBYTE_QUICKER, qc_bge_8);
qc8!(OP_BLT_8, MASK_LOBYTE_QUICKER, qc_blt_8);
qc8!(OP_BGT_8, MASK_LOBYTE_QUICKER, qc_bgt_8);
qc8!(OP_BLE_8, MASK_LOBYTE_QUICKER, qc_ble_8);
qc8!(OP_BRA_8, MASK_LOBYTE_QUICKER, qc_bra_8);
qc8!(OP_BSR_8, MASK_LOBYTE_QUICKER, qc_bsr_8);
qc!(OP_BHI_16, MASK_EXACT, qc_bhi_16);
qc!(OP_BLS_16, MASK_EXACT, qc_bls_16);
qc!(OP_BCC_16, MASK_EXACT, qc_bcc_16);
qc!(OP_BCS_16, MASK_EXACT, qc_bcs_16);
qc!(OP_BNE_16, MASK_EXACT, qc_bne_16);
qc!(OP_BEQ_16, MASK_EXACT, qc_beq_16);
qc!(OP_BVC_16, MASK_EXACT, qc_bvc_16);
qc!(OP_BVS_16, MASK_EXACT, qc_bvs_16);
qc!(OP_BPL_16, MASK_EXACT, qc_bpl_16);
qc!(OP_BMI_16, MASK_EXACT, qc_bmi_16);
qc!(OP_BGE_16, MASK_EXACT, qc_bge_16);
qc!(OP_BLT_16, MASK_EXACT, qc_blt_16);
qc!(OP_BGT_16, MASK_EXACT, qc_bgt_16);
qc!(OP_BLE_16, MASK_EXACT, qc_ble_16);
qc!(OP_BRA_16, MASK_EXACT, qc_bra_16);
qc!(OP_BSR_16, MASK_EXACT, qc_bsr_16);
qc!(OP_BCHG_32_R_DN, MASK_OUT_X_Y, qc_bchg_32_r_dn);
qc!(OP_BCHG_32_S_DN, MASK_OUT_Y, qc_bchg_32_s_dn);
qc8!(OP_BCHG_8_R_AI, MASK_OUT_X_Y, qc_bchg_8_r_ai);
qc8!(OP_BCHG_8_R_PI, MASK_OUT_X_Y, qc_bchg_8_r_pi);
qc8!(OP_BCHG_8_R_PD, MASK_OUT_X_Y, qc_bchg_8_r_pd);
qc8!(OP_BCHG_8_R_DI, MASK_OUT_X_Y, qc_bchg_8_r_di);
qc8!(OP_BCHG_8_R_IX, MASK_OUT_X_Y, qc_bchg_8_r_ix);
qc8!(OP_BCHG_8_R_AW, MASK_OUT_X, qc_bchg_8_r_aw);
qc8!(OP_BCHG_8_R_AL, MASK_OUT_X, qc_bchg_8_r_al);
qc8!(OP_BCHG_8_S_AI, MASK_OUT_Y, qc_bchg_8_s_ai);
qc8!(OP_BCHG_8_S_PI, MASK_OUT_Y, qc_bchg_8_s_pi);
qc8!(OP_BCHG_8_S_PD, MASK_OUT_Y, qc_bchg_8_s_pd);
qc8!(OP_BCHG_8_S_DI, MASK_OUT_Y, qc_bchg_8_s_di);
qc8!(OP_BCHG_8_S_IX, MASK_OUT_Y, qc_bchg_8_s_ix);
qc8!(OP_BCHG_8_S_AW, MASK_EXACT, qc_bchg_8_s_aw);
qc8!(OP_BCHG_8_S_AL, MASK_EXACT, qc_bchg_8_s_al);
qc!(OP_BCLR_32_R_DN, MASK_OUT_X_Y, qc_bclr_32_r_dn);
qc!(OP_BCLR_32_S_DN, MASK_OUT_Y, qc_bclr_32_s_dn);
qc8!(OP_BCLR_8_R_AI, MASK_OUT_X_Y, qc_bclr_8_r_ai);
qc8!(OP_BCLR_8_R_PI, MASK_OUT_X_Y, qc_bclr_8_r_pi);
qc8!(OP_BCLR_8_R_PD, MASK_OUT_X_Y, qc_bclr_8_r_pd);
qc8!(OP_BCLR_8_R_DI, MASK_OUT_X_Y, qc_bclr_8_r_di);
qc8!(OP_BCLR_8_R_IX, MASK_OUT_X_Y, qc_bclr_8_r_ix);
qc8!(OP_BCLR_8_R_AW, MASK_OUT_X, qc_bclr_8_r_aw);
qc8!(OP_BCLR_8_R_AL, MASK_OUT_X, qc_bclr_8_r_al);
qc8!(OP_BCLR_8_S_AI, MASK_OUT_Y, qc_bclr_8_s_ai);
qc8!(OP_BCLR_8_S_PI, MASK_OUT_Y, qc_bclr_8_s_pi);
qc8!(OP_BCLR_8_S_PD, MASK_OUT_Y, qc_bclr_8_s_pd);
qc8!(OP_BCLR_8_S_DI, MASK_OUT_Y, qc_bclr_8_s_di);
qc8!(OP_BCLR_8_S_IX, MASK_OUT_Y, qc_bclr_8_s_ix);
qc8!(OP_BCLR_8_S_AW, MASK_EXACT, qc_bclr_8_s_aw);
qc8!(OP_BCLR_8_S_AL, MASK_EXACT, qc_bclr_8_s_al);
qc!(OP_BSET_32_R_DN, MASK_OUT_X_Y, qc_bset_32_r_dn);
qc!(OP_BSET_32_S_DN, MASK_OUT_Y, qc_bset_32_s_dn);
qc8!(OP_BSET_8_R_AI, MASK_OUT_X_Y, qc_bset_8_r_ai);
qc8!(OP_BSET_8_R_PI, MASK_OUT_X_Y, qc_bset_8_r_pi);
qc8!(OP_BSET_8_R_PD, MASK_OUT_X_Y, qc_bset_8_r_pd);
qc8!(OP_BSET_8_R_DI, MASK_OUT_X_Y, qc_bset_8_r_di);
qc8!(OP_BSET_8_R_IX, MASK_OUT_X_Y, qc_bset_8_r_ix);
qc8!(OP_BSET_8_R_AW, MASK_OUT_X, qc_bset_8_r_aw);
qc8!(OP_BSET_8_R_AL, MASK_OUT_X, qc_bset_8_r_al);
qc8!(OP_BSET_8_S_AI, MASK_OUT_Y, qc_bset_8_s_ai);
qc8!(OP_BSET_8_S_PI, MASK_OUT_Y, qc_bset_8_s_pi);
qc8!(OP_BSET_8_S_PD, MASK_OUT_Y, qc_bset_8_s_pd);
qc8!(OP_BSET_8_S_DI, MASK_OUT_Y, qc_bset_8_s_di);
qc8!(OP_BSET_8_S_IX, MASK_OUT_Y, qc_bset_8_s_ix);
qc8!(OP_BSET_8_S_AW, MASK_EXACT, qc_bset_8_s_aw);
qc8!(OP_BSET_8_S_AL, MASK_EXACT, qc_bset_8_s_al);
qc!(OP_BTST_32_R_DN, MASK_OUT_X_Y, qc_btst_32_r_dn);
qc!(OP_BTST_32_S_DN, MASK_OUT_Y, qc_btst_32_s_dn);
qc8!(OP_BTST_8_R_AI, MASK_OUT_X_Y, qc_btst_8_r_ai);
qc8!(OP_BTST_8_R_PI, MASK_OUT_X_Y, qc_btst_8_r_pi);
qc8!(OP_BTST_8_R_PD, MASK_OUT_X_Y, qc_btst_8_r_pd);
qc8!(OP_BTST_8_R_DI, MASK_OUT_X_Y, qc_btst_8_r_di);
qc8!(OP_BTST_8_R_IX, MASK_OUT_X_Y, qc_btst_8_r_ix);
qc8!(OP_BTST_8_R_AW, MASK_OUT_X, qc_btst_8_r_aw);
qc8!(OP_BTST_8_R_AL, MASK_OUT_X, qc_btst_8_r_al);
qc8!(OP_BTST_8_S_AI, MASK_OUT_Y, qc_btst_8_s_ai);
qc8!(OP_BTST_8_S_PI, MASK_OUT_Y, qc_btst_8_s_pi);
qc8!(OP_BTST_8_S_PD, MASK_OUT_Y, qc_btst_8_s_pd);
qc8!(OP_BTST_8_S_DI, MASK_OUT_Y, qc_btst_8_s_di);
qc8!(OP_BTST_8_S_IX, MASK_OUT_Y, qc_btst_8_s_ix);
qc8!(OP_BTST_8_S_AW, MASK_EXACT, qc_btst_8_s_aw);
qc8!(OP_BTST_8_S_AL, MASK_EXACT, qc_btst_8_s_al);
qc!(OP_CHK_16_AI, MASK_OUT_X_Y, qc_chk_16_ai);
qc!(OP_CHK_16_AL, MASK_OUT_X, qc_chk_16_al);
qc!(OP_CHK_16_AW, MASK_OUT_X, qc_chk_16_aw);
qc!(OP_CHK_16_DN, MASK_OUT_X_Y, qc_chk_16_dn);
qc!(OP_CHK_16_DI, MASK_OUT_X_Y, qc_chk_16_di);
qc!(OP_CHK_16_IMM, MASK_OUT_X, qc_chk_16_imm);
qc!(OP_CHK_16_IX, MASK_OUT_X_Y, qc_chk_16_ix);
qc!(OP_CHK_16_PCDI, MASK_OUT_X, qc_chk_16_pcdi);
qc!(OP_CHK_16_PCIX, MASK_OUT_X, qc_chk_16_pcix);
qc!(OP_CHK_16_PD, MASK_OUT_X_Y, qc_chk_16_pd);
qc!(OP_CHK_16_PI, MASK_OUT_X_Y, qc_chk_16_pi);
qc8!(OP_CLR_8_DN, MASK_OUT_Y, qc_clr_8_dn);
qc8!(OP_CLR_8_AI, MASK_OUT_Y, qc_clr_8_ai);
qc8!(OP_CLR_8_PI, MASK_OUT_Y, qc_clr_8_pi);
qc8!(OP_CLR_8_PD, MASK_OUT_Y, qc_clr_8_pd);
qc8!(OP_CLR_8_DI, MASK_OUT_Y, qc_clr_8_di);
qc8!(OP_CLR_8_IX, MASK_OUT_Y, qc_clr_8_ix);
qc8!(OP_CLR_8_AW, MASK_EXACT, qc_clr_8_aw);
qc8!(OP_CLR_8_AL, MASK_EXACT, qc_clr_8_al);
qc!(OP_CLR_16_DN, MASK_OUT_Y, qc_clr_16_dn);
qc!(OP_CLR_16_AI, MASK_OUT_Y, qc_clr_16_ai);
qc!(OP_CLR_16_PI, MASK_OUT_Y, qc_clr_16_pi);
qc!(OP_CLR_16_PD, MASK_OUT_Y, qc_clr_16_pd);
qc!(OP_CLR_16_DI, MASK_OUT_Y, qc_clr_16_di);
qc!(OP_CLR_16_IX, MASK_OUT_Y, qc_clr_16_ix);
qc!(OP_CLR_16_AW, MASK_EXACT, qc_clr_16_aw);
qc!(OP_CLR_16_AL, MASK_EXACT, qc_clr_16_al);
qc!(OP_CLR_32_DN, MASK_OUT_Y, qc_clr_32_dn);
qc!(OP_CLR_32_AI, MASK_OUT_Y, qc_clr_32_ai);
qc!(OP_CLR_32_PI, MASK_OUT_Y, qc_clr_32_pi);
qc!(OP_CLR_32_PD, MASK_OUT_Y, qc_clr_32_pd);
qc!(OP_CLR_32_DI, MASK_OUT_Y, qc_clr_32_di);
qc!(OP_CLR_32_IX, MASK_OUT_Y, qc_clr_32_ix);
qc!(OP_CLR_32_AW, MASK_EXACT, qc_clr_32_aw);
qc!(OP_CLR_32_AL, MASK_EXACT, qc_clr_32_al);
qc8!(OP_CMP_8_DN, MASK_OUT_X_Y, qc_cmp_8_dn);
qc8!(OP_CMP_8_AI, MASK_OUT_X_Y, qc_cmp_8_ai);
qc8!(OP_CMP_8_PI, MASK_OUT_X_Y, qc_cmp_8_pi);
qc8!(OP_CMP_8_PD, MASK_OUT_X_Y, qc_cmp_8_pd);
qc8!(OP_CMP_8_DI, MASK_OUT_X_Y, qc_cmp_8_di);
qc8!(OP_CMP_8_IX, MASK_OUT_X_Y, qc_cmp_8_ix);
qc8!(OP_CMP_8_AW, MASK_OUT_X, qc_cmp_8_aw);
qc8!(OP_CMP_8_AL, MASK_OUT_X, qc_cmp_8_al);
qc8!(OP_CMP_8_PCDI, MASK_OUT_Y, qc_cmp_8_pcdi);
qc8!(OP_CMP_8_PCIX, MASK_OUT_Y, qc_cmp_8_pcix);
qc8!(OP_CMP_8_IMM, MASK_OUT_X, qc_cmp_8_imm);
qc!(OP_CMP_16_DN, MASK_OUT_X_Y, qc_cmp_16_dn);
qc!(OP_CMP_16_AN, MASK_OUT_X_Y, qc_cmp_16_an);
qc!(OP_CMP_16_AI, MASK_OUT_X_Y, qc_cmp_16_ai);
qc!(OP_CMP_16_PI, MASK_OUT_X_Y, qc_cmp_16_pi);
qc!(OP_CMP_16_PD, MASK_OUT_X_Y, qc_cmp_16_pd);
qc!(OP_CMP_16_DI, MASK_OUT_X_Y, qc_cmp_16_di);
qc!(OP_CMP_16_IX, MASK_OUT_X_Y, qc_cmp_16_ix);
qc!(OP_CMP_16_AW, MASK_OUT_X, qc_cmp_16_aw);
qc!(OP_CMP_16_AL, MASK_OUT_X, qc_cmp_16_al);
qc!(OP_CMP_16_PCDI, MASK_OUT_X, qc_cmp_16_pcdi);
qc!(OP_CMP_16_PCIX, MASK_OUT_X, qc_cmp_16_pcix);
qc!(OP_CMP_16_IMM, MASK_OUT_X, qc_cmp_16_imm);
qc!(OP_CMP_32_DN, MASK_OUT_X_Y, qc_cmp_32_dn);
qc!(OP_CMP_32_AN, MASK_OUT_X_Y, qc_cmp_32_an);
qc!(OP_CMP_32_AI, MASK_OUT_X_Y, qc_cmp_32_ai);
qc!(OP_CMP_32_PI, MASK_OUT_X_Y, qc_cmp_32_pi);
qc!(OP_CMP_32_PD, MASK_OUT_X_Y, qc_cmp_32_pd);
qc!(OP_CMP_32_DI, MASK_OUT_X_Y, qc_cmp_32_di);
qc!(OP_CMP_32_IX, MASK_OUT_X_Y, qc_cmp_32_ix);
qc!(OP_CMP_32_AW, MASK_OUT_X, qc_cmp_32_aw);
qc!(OP_CMP_32_AL, MASK_OUT_X, qc_cmp_32_al);
qc!(OP_CMP_32_PCDI, MASK_OUT_X, qc_cmp_32_pcdi);
qc!(OP_CMP_32_PCIX, MASK_OUT_X, qc_cmp_32_pcix);
qc!(OP_CMP_32_IMM, MASK_OUT_X, qc_cmp_32_imm);
qc!(OP_CMPA_16_DN, qc_cmpa_16_dn);
qc!(OP_CMPA_16_AN, qc_cmpa_16_an);
qc!(OP_CMPA_16_PI, qc_cmpa_16_pi);
qc!(OP_CMPA_16_PD, qc_cmpa_16_pd);
qc!(OP_CMPA_16_AI, qc_cmpa_16_ai);
qc!(OP_CMPA_16_DI, qc_cmpa_16_di);
qc!(OP_CMPA_16_IX, qc_cmpa_16_ix);
qc!(OP_CMPA_16_AW, MASK_OUT_X, qc_cmpa_16_aw);
qc!(OP_CMPA_16_AL, MASK_OUT_X, qc_cmpa_16_al);
qc!(OP_CMPA_16_PCDI, MASK_OUT_X, qc_cmpa_16_pcdi);
qc!(OP_CMPA_16_PCIX, MASK_OUT_X, qc_cmpa_16_pcix);
qc!(OP_CMPA_16_IMM, MASK_OUT_X, qc_cmpa_16_imm);
qc!(OP_CMPA_32_DN, qc_cmpa_32_dn);
qc!(OP_CMPA_32_AN, qc_cmpa_32_an);
qc!(OP_CMPA_32_PI, qc_cmpa_32_pi);
qc!(OP_CMPA_32_PD, qc_cmpa_32_pd);
qc!(OP_CMPA_32_AI, qc_cmpa_32_ai);
qc!(OP_CMPA_32_DI, qc_cmpa_32_di);
qc!(OP_CMPA_32_IX, qc_cmpa_32_ix);
qc!(OP_CMPA_32_AW, MASK_OUT_X, qc_cmpa_32_aw);
qc!(OP_CMPA_32_AL, MASK_OUT_X, qc_cmpa_32_al);
qc!(OP_CMPA_32_PCDI, MASK_OUT_X, qc_cmpa_32_pcdi);
qc!(OP_CMPA_32_PCIX, MASK_OUT_X, qc_cmpa_32_pcix);
qc!(OP_CMPA_32_IMM, MASK_OUT_X, qc_cmpa_32_imm);
qc8!(OP_CMPI_8_DN, MASK_OUT_Y, qc_cmpi_8_dn);
qc8!(OP_CMPI_8_AI, MASK_OUT_Y, qc_cmpi_8_ai);
qc8!(OP_CMPI_8_PI, MASK_OUT_Y, qc_cmpi_8_pi);
qc8!(OP_CMPI_8_PD, MASK_OUT_Y, qc_cmpi_8_pd);
qc8!(OP_CMPI_8_DI, MASK_OUT_Y, qc_cmpi_8_di);
qc8!(OP_CMPI_8_IX, MASK_OUT_Y, qc_cmpi_8_ix);
qc8!(OP_CMPI_8_AW, MASK_EXACT, qc_cmpi_8_aw);
qc8!(OP_CMPI_8_AL, MASK_EXACT, qc_cmpi_8_al);
qc!(OP_CMPI_16_DN, MASK_OUT_Y, qc_cmpi_16_dn);
qc!(OP_CMPI_16_AI, MASK_OUT_Y, qc_cmpi_16_ai);
qc!(OP_CMPI_16_PI, MASK_OUT_Y, qc_cmpi_16_pi);
qc!(OP_CMPI_16_PD, MASK_OUT_Y, qc_cmpi_16_pd);
qc!(OP_CMPI_16_DI, MASK_OUT_Y, qc_cmpi_16_di);
qc!(OP_CMPI_16_IX, MASK_OUT_Y, qc_cmpi_16_ix);
qc!(OP_CMPI_16_AW, MASK_EXACT, qc_cmpi_16_aw);
qc!(OP_CMPI_16_AL, MASK_EXACT, qc_cmpi_16_al);
qc!(OP_CMPI_32_DN, MASK_OUT_Y, qc_cmpi_32_dn);
qc!(OP_CMPI_32_AI, MASK_OUT_Y, qc_cmpi_32_ai);
qc!(OP_CMPI_32_PI, MASK_OUT_Y, qc_cmpi_32_pi);
qc!(OP_CMPI_32_PD, MASK_OUT_Y, qc_cmpi_32_pd);
qc!(OP_CMPI_32_DI, MASK_OUT_Y, qc_cmpi_32_di);
qc!(OP_CMPI_32_IX, MASK_OUT_Y, qc_cmpi_32_ix);
qc!(OP_CMPI_32_AW, MASK_EXACT, qc_cmpi_32_aw);
qc!(OP_CMPI_32_AL, MASK_EXACT, qc_cmpi_32_al);
qc8!(OP_CMPM_8, MASK_OUT_X_Y, qc_cmpm_8);
qc!(OP_CMPM_16, MASK_OUT_X_Y, qc_cmpm_16);
qc!(OP_CMPM_32, MASK_OUT_X_Y, qc_cmpm_32);
// Put qc for DBcc here
qc!(OP_DBT_16, MASK_OUT_Y, qc_dbt_16);
qc!(OP_DBF_16, MASK_OUT_Y, qc_dbf_16);
qc!(OP_DBHI_16, MASK_OUT_Y, qc_dbhi_16);
qc!(OP_DBLS_16, MASK_OUT_Y, qc_dbls_16);
qc!(OP_DBCC_16, MASK_OUT_Y, qc_dbcc_16);
qc!(OP_DBCS_16, MASK_OUT_Y, qc_dbcs_16);
qc!(OP_DBNE_16, MASK_OUT_Y, qc_dbne_16);
qc!(OP_DBEQ_16, MASK_OUT_Y, qc_dbeq_16);
qc!(OP_DBVC_16, MASK_OUT_Y, qc_dbvc_16);
qc!(OP_DBVS_16, MASK_OUT_Y, qc_dbvs_16);
qc!(OP_DBPL_16, MASK_OUT_Y, qc_dbpl_16);
qc!(OP_DBMI_16, MASK_OUT_Y, qc_dbmi_16);
qc!(OP_DBGE_16, MASK_OUT_Y, qc_dbge_16);
qc!(OP_DBLT_16, MASK_OUT_Y, qc_dblt_16);
qc!(OP_DBGT_16, MASK_OUT_Y, qc_dbgt_16);
qc!(OP_DBLE_16, MASK_OUT_Y, qc_dble_16);
// Put qc for DIVS here
qc!(OP_DIVS_16_AI, MASK_OUT_X_Y, qc_divs_16_ai);
qc!(OP_DIVS_16_AL, MASK_OUT_X, qc_divs_16_al);
qc!(OP_DIVS_16_AW, MASK_OUT_X, qc_divs_16_aw);
qc!(OP_DIVS_16_DN, MASK_OUT_X_Y, qc_divs_16_dn);
qc!(OP_DIVS_16_DI, MASK_OUT_X_Y, qc_divs_16_di);
qc!(OP_DIVS_16_IMM, MASK_OUT_X, qc_divs_16_imm);
qc!(OP_DIVS_16_IX, MASK_OUT_X_Y, qc_divs_16_ix);
qc!(OP_DIVS_16_PCDI, MASK_OUT_X, qc_divs_16_pcdi);
qc!(OP_DIVS_16_PCIX, MASK_OUT_X, qc_divs_16_pcix);
qc!(OP_DIVS_16_PD, MASK_OUT_X_Y, qc_divs_16_pd);
qc!(OP_DIVS_16_PI, MASK_OUT_X_Y, qc_divs_16_pi);
// Put qc for DIVU here
qc!(OP_DIVU_16_AI, MASK_OUT_X_Y, qc_divu_16_ai);
qc!(OP_DIVU_16_AL, MASK_OUT_X, qc_divu_16_al);
qc!(OP_DIVU_16_AW, MASK_OUT_X, qc_divu_16_aw);
qc!(OP_DIVU_16_DN, MASK_OUT_X_Y, qc_divu_16_dn);
qc!(OP_DIVU_16_DI, MASK_OUT_X_Y, qc_divu_16_di);
qc!(OP_DIVU_16_IMM, MASK_OUT_X, qc_divu_16_imm);
qc!(OP_DIVU_16_IX, MASK_OUT_X_Y, qc_divu_16_ix);
qc!(OP_DIVU_16_PCDI, MASK_OUT_X, qc_divu_16_pcdi);
qc!(OP_DIVU_16_PCIX, MASK_OUT_X, qc_divu_16_pcix);
qc!(OP_DIVU_16_PD, MASK_OUT_X_Y, qc_divu_16_pd);
qc!(OP_DIVU_16_PI, MASK_OUT_X_Y, qc_divu_16_pi);
// Put qc for EOR, EORI, EORI to CCR and EORI to SR here
qc8!(OP_EOR_8_DN, MASK_OUT_X_Y, qc_eor_8_dn);
qc8!(OP_EOR_8_AI, MASK_OUT_X_Y, qc_eor_8_ai);
qc8!(OP_EOR_8_PI, MASK_OUT_X_Y, qc_eor_8_pi);
qc8!(OP_EOR_8_PD, MASK_OUT_X_Y, qc_eor_8_pd);
qc8!(OP_EOR_8_DI, MASK_OUT_X_Y, qc_eor_8_di);
qc8!(OP_EOR_8_IX, MASK_OUT_X_Y, qc_eor_8_ix);
qc8!(OP_EOR_8_AW, MASK_OUT_X, qc_eor_8_aw);
qc8!(OP_EOR_8_AL, MASK_OUT_X, qc_eor_8_al);
qc!(OP_EOR_16_DN, MASK_OUT_X_Y, qc_eor_16_dn);
qc!(OP_EOR_16_AI, MASK_OUT_X_Y, qc_eor_16_ai);
qc!(OP_EOR_16_PI, MASK_OUT_X_Y, qc_eor_16_pi);
qc!(OP_EOR_16_PD, MASK_OUT_X_Y, qc_eor_16_pd);
qc!(OP_EOR_16_DI, MASK_OUT_X_Y, qc_eor_16_di);
qc!(OP_EOR_16_IX, MASK_OUT_X_Y, qc_eor_16_ix);
qc!(OP_EOR_16_AW, MASK_OUT_X, qc_eor_16_aw);
qc!(OP_EOR_16_AL, MASK_OUT_X, qc_eor_16_al);
qc!(OP_EOR_32_DN, MASK_OUT_X_Y, qc_eor_32_dn);
qc!(OP_EOR_32_AI, MASK_OUT_X_Y, qc_eor_32_ai);
qc!(OP_EOR_32_PI, MASK_OUT_X_Y, qc_eor_32_pi);
qc!(OP_EOR_32_PD, MASK_OUT_X_Y, qc_eor_32_pd);
qc!(OP_EOR_32_DI, MASK_OUT_X_Y, qc_eor_32_di);
qc!(OP_EOR_32_IX, MASK_OUT_X_Y, qc_eor_32_ix);
qc!(OP_EOR_32_AW, MASK_OUT_X, qc_eor_32_aw);
qc!(OP_EOR_32_AL, MASK_OUT_X, qc_eor_32_al);
qc8!(OP_EORI_8_DN, MASK_OUT_Y, qc_eori_8_dn);
qc8!(OP_EORI_8_AI, MASK_OUT_Y, qc_eori_8_ai);
qc8!(OP_EORI_8_PI, MASK_OUT_Y, qc_eori_8_pi);
qc8!(OP_EORI_8_PD, MASK_OUT_Y, qc_eori_8_pd);
qc8!(OP_EORI_8_DI, MASK_OUT_Y, qc_eori_8_di);
qc8!(OP_EORI_8_IX, MASK_OUT_Y, qc_eori_8_ix);
qc8!(OP_EORI_8_AW, MASK_EXACT, qc_eori_8_aw);
qc8!(OP_EORI_8_AL, MASK_EXACT, qc_eori_8_al);
qc!(OP_EORI_16_DN, MASK_OUT_Y, qc_eori_16_dn);
qc!(OP_EORI_16_AI, MASK_OUT_Y, qc_eori_16_ai);
qc!(OP_EORI_16_PI, MASK_OUT_Y, qc_eori_16_pi);
qc!(OP_EORI_16_PD, MASK_OUT_Y, qc_eori_16_pd);
qc!(OP_EORI_16_DI, MASK_OUT_Y, qc_eori_16_di);
qc!(OP_EORI_16_IX, MASK_OUT_Y, qc_eori_16_ix);
qc!(OP_EORI_16_AW, MASK_EXACT, qc_eori_16_aw);
qc!(OP_EORI_16_AL, MASK_EXACT, qc_eori_16_al);
qc!(OP_EORI_32_DN, MASK_OUT_Y, qc_eori_32_dn);
qc!(OP_EORI_32_AI, MASK_OUT_Y, qc_eori_32_ai);
qc!(OP_EORI_32_PI, MASK_OUT_Y, qc_eori_32_pi);
qc!(OP_EORI_32_PD, MASK_OUT_Y, qc_eori_32_pd);
qc!(OP_EORI_32_DI, MASK_OUT_Y, qc_eori_32_di);
qc!(OP_EORI_32_IX, MASK_OUT_Y, qc_eori_32_ix);
qc!(OP_EORI_32_AW, MASK_EXACT, qc_eori_32_aw);
qc!(OP_EORI_32_AL, MASK_EXACT, qc_eori_32_al);
qc!(MASK_EXACT, OP_EORI_16_TOC, qc_eori_16_toc);
qc!(MASK_EXACT, OP_EORI_16_TOS, qc_eori_16_tos);
// Put qc for EXG here
qc!(OP_EXG_32_DD, MASK_OUT_X_Y, qc_exg_32_dd);
qc!(OP_EXG_32_AA, MASK_OUT_X_Y, qc_exg_32_aa);
qc!(OP_EXG_32_DA, MASK_OUT_X_Y, qc_exg_32_da);
// Put qc for EXT here
qc!(OP_EXT_BW, MASK_OUT_Y, qc_ext_bw);
qc!(OP_EXT_WL, MASK_OUT_Y, qc_ext_wl);
// Put qc for ILLEGAL here
qc!(OP_ILLEGAL, MASK_EXACT, qc_illegal);
// Put qc for JMP here
qc!(OP_JMP_32_AI, MASK_OUT_Y, qc_jmp_32_ai);
qc!(OP_JMP_32_AL, MASK_EXACT, qc_jmp_32_al);
qc!(OP_JMP_32_AW, MASK_EXACT, qc_jmp_32_aw);
qc!(OP_JMP_32_DI, MASK_OUT_Y, qc_jmp_32_di);
qc!(OP_JMP_32_IX, MASK_OUT_Y, qc_jmp_32_ix);
qc!(OP_JMP_32_PCDI, MASK_EXACT, qc_jmp_32_pcdi);
qc!(OP_JMP_32_PCIX, MASK_EXACT, qc_jmp_32_pcix);
// Put qc for JSR here
qc!(OP_JSR_32_AI, MASK_OUT_Y, qc_jsr_32_ai);
qc!(OP_JSR_32_AL, MASK_EXACT, qc_jsr_32_al);
qc!(OP_JSR_32_AW, MASK_EXACT, qc_jsr_32_aw);
qc!(OP_JSR_32_DI, MASK_OUT_Y, qc_jsr_32_di);
qc!(OP_JSR_32_IX, MASK_OUT_Y, qc_jsr_32_ix);
qc!(OP_JSR_32_PCDI, MASK_EXACT, qc_jsr_32_pcdi);
qc!(OP_JSR_32_PCIX, MASK_EXACT, qc_jsr_32_pcix);
// Put qc for LEA here
qc!(OP_LEA_32_AI, MASK_OUT_Y, qc_lea_32_ai);
qc!(OP_LEA_32_AL, MASK_EXACT, qc_lea_32_al);
qc!(OP_LEA_32_AW, MASK_EXACT, qc_lea_32_aw);
qc!(OP_LEA_32_DI, MASK_OUT_Y, qc_lea_32_di);
qc!(OP_LEA_32_IX, MASK_OUT_Y, qc_lea_32_ix);
qc!(OP_LEA_32_PCDI, MASK_EXACT, qc_lea_32_pcdi);
qc!(OP_LEA_32_PCIX, MASK_EXACT, qc_lea_32_pcix);
// Put qc for LINK here
qc!(OP_LINK_16, MASK_OUT_Y, qc_link_16);
// Put qc for LSL, LSR here
qc8!(OP_LSR_8_S, MASK_OUT_X_Y, qc_lsr_8_s);
qc!(OP_LSR_16_S, MASK_OUT_X_Y, qc_lsr_16_s);
qc!(OP_LSR_32_S, MASK_OUT_X_Y, qc_lsr_32_s);
qc8!(OP_LSR_8_R, MASK_OUT_X_Y, qc_lsr_8_r);
qc!(OP_LSR_16_R, MASK_OUT_X_Y, qc_lsr_16_r);
qc!(OP_LSR_32_R, MASK_OUT_X_Y, qc_lsr_32_r);
qc8!(OP_LSL_8_S, MASK_OUT_X_Y, qc_lsl_8_s);
qc!(OP_LSL_16_S, MASK_OUT_X_Y, qc_lsl_16_s);
qc!(OP_LSL_32_S, MASK_OUT_X_Y, qc_lsl_32_s);
qc8!(OP_LSL_8_R, MASK_OUT_X_Y, qc_lsl_8_r);
qc!(OP_LSL_16_R, MASK_OUT_X_Y, qc_lsl_16_r);
qc!(OP_LSL_32_R, MASK_OUT_X_Y, qc_lsl_32_r);
qc!(OP_LSL_16_AI, MASK_OUT_Y, qc_lsl_16_ai);
qc!(OP_LSL_16_PI, MASK_OUT_Y, qc_lsl_16_pi);
qc!(OP_LSL_16_PD, MASK_OUT_Y, qc_lsl_16_pd);
qc!(OP_LSL_16_DI, MASK_OUT_Y, qc_lsl_16_di);
qc!(OP_LSL_16_IX, MASK_OUT_Y, qc_lsl_16_ix);
qc!(OP_LSL_16_AW, MASK_EXACT, qc_lsl_16_aw);
qc!(OP_LSL_16_AL, MASK_EXACT, qc_lsl_16_al);
qc!(OP_LSR_16_AI, MASK_OUT_Y, qc_lsr_16_ai);
qc!(OP_LSR_16_PI, MASK_OUT_Y, qc_lsr_16_pi);
qc!(OP_LSR_16_PD, MASK_OUT_Y, qc_lsr_16_pd);
qc!(OP_LSR_16_DI, MASK_OUT_Y, qc_lsr_16_di);
qc!(OP_LSR_16_IX, MASK_OUT_Y, qc_lsr_16_ix);
qc!(OP_LSR_16_AW, MASK_EXACT, qc_lsr_16_aw);
qc!(OP_LSR_16_AL, MASK_EXACT, qc_lsr_16_al);
// Put qc for MOVE here
// Put qc for MOVEA here
// Put qc for MOVE to CCR here
// Put qc for MOVE from SR here
// Put qc for MOVE to SR here
// Put qc for MOVE USP here
// Put qc for MOVEM here
// Put qc for MOVEP here
// Put qc for MOVEQ here
// Put qc for MULS here
// Put qc for MULU here
// Put qc for NBCD here
// Put qc for NEG here
// Put qc for NEGX here
// Put qc for NOP here
// Put qc for NOT here
// Put qc for OR here
// Put qc for ORI here
// Put qc for ORI to CCR here
// Put qc for ORI to SR here
// Put qc for PEA here
// Put qc for RESET here
// Put qc for ROL, ROR here
qc8!(OP_ROR_8_S, MASK_OUT_X_Y, qc_ror_8_s);
qc!(OP_ROR_16_S, MASK_OUT_X_Y, qc_ror_16_s);
qc!(OP_ROR_32_S, MASK_OUT_X_Y, qc_ror_32_s);
qc8!(OP_ROR_8_R, MASK_OUT_X_Y, qc_ror_8_r);
qc!(OP_ROR_16_R, MASK_OUT_X_Y, qc_ror_16_r);
qc!(OP_ROR_32_R, MASK_OUT_X_Y, qc_ror_32_r);
qc8!(OP_ROL_8_S, MASK_OUT_X_Y, qc_rol_8_s);
qc!(OP_ROL_16_S, MASK_OUT_X_Y, qc_rol_16_s);
qc!(OP_ROL_32_S, MASK_OUT_X_Y, qc_rol_32_s);
qc8!(OP_ROL_8_R, MASK_OUT_X_Y, qc_rol_8_r);
qc!(OP_ROL_16_R, MASK_OUT_X_Y, qc_rol_16_r);
qc!(OP_ROL_32_R, MASK_OUT_X_Y, qc_rol_32_r);
qc!(OP_ROL_16_AI, MASK_OUT_Y, qc_rol_16_ai);
qc!(OP_ROL_16_PI, MASK_OUT_Y, qc_rol_16_pi);
qc!(OP_ROL_16_PD, MASK_OUT_Y, qc_rol_16_pd);
qc!(OP_ROL_16_DI, MASK_OUT_Y, qc_rol_16_di);
qc!(OP_ROL_16_IX, MASK_OUT_Y, qc_rol_16_ix);
qc!(OP_ROL_16_AW, MASK_EXACT, qc_rol_16_aw);
qc!(OP_ROL_16_AL, MASK_EXACT, qc_rol_16_al);
qc!(OP_ROR_16_AI, MASK_OUT_Y, qc_ror_16_ai);
qc!(OP_ROR_16_PI, MASK_OUT_Y, qc_ror_16_pi);
qc!(OP_ROR_16_PD, MASK_OUT_Y, qc_ror_16_pd);
qc!(OP_ROR_16_DI, MASK_OUT_Y, qc_ror_16_di);
qc!(OP_ROR_16_IX, MASK_OUT_Y, qc_ror_16_ix);
qc!(OP_ROR_16_AW, MASK_EXACT, qc_ror_16_aw);
qc!(OP_ROR_16_AL, MASK_EXACT, qc_ror_16_al);
// Put qc for ROXL, ROXR here
qc8!(OP_ROXR_8_S, MASK_OUT_X_Y, qc_roxr_8_s);
qc!(OP_ROXR_16_S, MASK_OUT_X_Y, qc_roxr_16_s);
qc!(OP_ROXR_32_S, MASK_OUT_X_Y, qc_roxr_32_s);
qc8!(OP_ROXR_8_R, MASK_OUT_X_Y, qc_roxr_8_r);
qc!(OP_ROXR_16_R, MASK_OUT_X_Y, qc_roxr_16_r);
qc!(OP_ROXR_32_R, MASK_OUT_X_Y, qc_roxr_32_r);
qc8!(OP_ROXL_8_S, MASK_OUT_X_Y, qc_roxl_8_s);
qc!(OP_ROXL_16_S, MASK_OUT_X_Y, qc_roxl_16_s);
qc!(OP_ROXL_32_S, MASK_OUT_X_Y, qc_roxl_32_s);
qc8!(OP_ROXL_8_R, MASK_OUT_X_Y, qc_roxl_8_r);
qc!(OP_ROXL_16_R, MASK_OUT_X_Y, qc_roxl_16_r);
qc!(OP_ROXL_32_R, MASK_OUT_X_Y, qc_roxl_32_r);
qc!(OP_ROXL_16_AI, MASK_OUT_Y, qc_roxl_16_ai);
qc!(OP_ROXL_16_PI, MASK_OUT_Y, qc_roxl_16_pi);
qc!(OP_ROXL_16_PD, MASK_OUT_Y, qc_roxl_16_pd);
qc!(OP_ROXL_16_DI, MASK_OUT_Y, qc_roxl_16_di);
qc!(OP_ROXL_16_IX, MASK_OUT_Y, qc_roxl_16_ix);
qc!(OP_ROXL_16_AW, MASK_EXACT, qc_roxl_16_aw);
qc!(OP_ROXL_16_AL, MASK_EXACT, qc_roxl_16_al);
qc!(OP_ROXR_16_AI, MASK_OUT_Y, qc_roxr_16_ai);
qc!(OP_ROXR_16_PI, MASK_OUT_Y, qc_roxr_16_pi);
qc!(OP_ROXR_16_PD, MASK_OUT_Y, qc_roxr_16_pd);
qc!(OP_ROXR_16_DI, MASK_OUT_Y, qc_roxr_16_di);
qc!(OP_ROXR_16_IX, MASK_OUT_Y, qc_roxr_16_ix);
qc!(OP_ROXR_16_AW, MASK_EXACT, qc_roxr_16_aw);
qc!(OP_ROXR_16_AL, MASK_EXACT, qc_roxr_16_al);
// Put qc for RTE here
// Put qc for RTR here
// Put qc for RTS here
qc8!(OP_SBCD_8_RR, qc_sbcd_rr);
qc8!(OP_SBCD_8_MM, qc_sbcd_mm);
qc!(OP_SCC_8_AI, MASK_OUT_Y, qc_scc_8_ai);
qc!(OP_SCC_8_AL, MASK_EXACT, qc_scc_8_al);
qc!(OP_SCC_8_AW, MASK_EXACT, qc_scc_8_aw);
qc!(OP_SCC_8_DN, MASK_OUT_Y, qc_scc_8_dn);
qc!(OP_SCC_8_DI, MASK_OUT_Y, qc_scc_8_di);
qc!(OP_SCC_8_IX, MASK_OUT_Y, qc_scc_8_ix);
qc!(OP_SCC_8_PD, MASK_OUT_Y, qc_scc_8_pd);
qc!(OP_SCC_8_PI, MASK_OUT_Y, qc_scc_8_pi);
qc!(OP_SCS_8_AI, MASK_OUT_Y, qc_scs_8_ai);
qc!(OP_SCS_8_AL, MASK_EXACT, qc_scs_8_al);
qc!(OP_SCS_8_AW, MASK_EXACT, qc_scs_8_aw);
qc!(OP_SCS_8_DN, MASK_OUT_Y, qc_scs_8_dn);
qc!(OP_SCS_8_DI, MASK_OUT_Y, qc_scs_8_di);
qc!(OP_SCS_8_IX, MASK_OUT_Y, qc_scs_8_ix);
qc!(OP_SCS_8_PD, MASK_OUT_Y, qc_scs_8_pd);
qc!(OP_SCS_8_PI, MASK_OUT_Y, qc_scs_8_pi);
qc!(OP_SEQ_8_AI, MASK_OUT_Y, qc_seq_8_ai);
qc!(OP_SEQ_8_AL, MASK_EXACT, qc_seq_8_al);
qc!(OP_SEQ_8_AW, MASK_EXACT, qc_seq_8_aw);
qc!(OP_SEQ_8_DN, MASK_OUT_Y, qc_seq_8_dn);
qc!(OP_SEQ_8_DI, MASK_OUT_Y, qc_seq_8_di);
qc!(OP_SEQ_8_IX, MASK_OUT_Y, qc_seq_8_ix);
qc!(OP_SEQ_8_PD, MASK_OUT_Y, qc_seq_8_pd);
qc!(OP_SEQ_8_PI, MASK_OUT_Y, qc_seq_8_pi);
qc!(OP_SF_8_AI, MASK_OUT_Y, qc_sf_8_ai);
qc!(OP_SF_8_AL, MASK_EXACT, qc_sf_8_al);
qc!(OP_SF_8_AW, MASK_EXACT, qc_sf_8_aw);
qc!(OP_SF_8_DN, MASK_OUT_Y, qc_sf_8_dn);
qc!(OP_SF_8_DI, MASK_OUT_Y, qc_sf_8_di);
qc!(OP_SF_8_IX, MASK_OUT_Y, qc_sf_8_ix);
qc!(OP_SF_8_PD, MASK_OUT_Y, qc_sf_8_pd);
qc!(OP_SF_8_PI, MASK_OUT_Y, qc_sf_8_pi);
qc!(OP_SGE_8_AI, MASK_OUT_Y, qc_sge_8_ai);
qc!(OP_SGE_8_AL, MASK_EXACT, qc_sge_8_al);
qc!(OP_SGE_8_AW, MASK_EXACT, qc_sge_8_aw);
qc!(OP_SGE_8_DN, MASK_OUT_Y, qc_sge_8_dn);
qc!(OP_SGE_8_DI, MASK_OUT_Y, qc_sge_8_di);
qc!(OP_SGE_8_IX, MASK_OUT_Y, qc_sge_8_ix);
qc!(OP_SGE_8_PD, MASK_OUT_Y, qc_sge_8_pd);
qc!(OP_SGE_8_PI, MASK_OUT_Y, qc_sge_8_pi);
qc!(OP_SGT_8_AI, MASK_OUT_Y, qc_sgt_8_ai);
qc!(OP_SGT_8_AL, MASK_EXACT, qc_sgt_8_al);
qc!(OP_SGT_8_AW, MASK_EXACT, qc_sgt_8_aw);
qc!(OP_SGT_8_DN, MASK_OUT_Y, qc_sgt_8_dn);
qc!(OP_SGT_8_DI, MASK_OUT_Y, qc_sgt_8_di);
qc!(OP_SGT_8_IX, MASK_OUT_Y, qc_sgt_8_ix);
qc!(OP_SGT_8_PD, MASK_OUT_Y, qc_sgt_8_pd);
qc!(OP_SGT_8_PI, MASK_OUT_Y, qc_sgt_8_pi);
qc!(OP_SHI_8_AI, MASK_OUT_Y, qc_shi_8_ai);
qc!(OP_SHI_8_AL, MASK_EXACT, qc_shi_8_al);
qc!(OP_SHI_8_AW, MASK_EXACT, qc_shi_8_aw);
qc!(OP_SHI_8_DN, MASK_OUT_Y, qc_shi_8_dn);
qc!(OP_SHI_8_DI, MASK_OUT_Y, qc_shi_8_di);
qc!(OP_SHI_8_IX, MASK_OUT_Y, qc_shi_8_ix);
qc!(OP_SHI_8_PD, MASK_OUT_Y, qc_shi_8_pd);
qc!(OP_SHI_8_PI, MASK_OUT_Y, qc_shi_8_pi);
qc!(OP_SLE_8_AI, MASK_OUT_Y, qc_sle_8_ai);
qc!(OP_SLE_8_AL, MASK_EXACT, qc_sle_8_al);
qc!(OP_SLE_8_AW, MASK_EXACT, qc_sle_8_aw);
qc!(OP_SLE_8_DN, MASK_OUT_Y, qc_sle_8_dn);
qc!(OP_SLE_8_DI, MASK_OUT_Y, qc_sle_8_di);
qc!(OP_SLE_8_IX, MASK_OUT_Y, qc_sle_8_ix);
qc!(OP_SLE_8_PD, MASK_OUT_Y, qc_sle_8_pd);
qc!(OP_SLE_8_PI, MASK_OUT_Y, qc_sle_8_pi);
qc!(OP_SLS_8_AI, MASK_OUT_Y, qc_sls_8_ai);
qc!(OP_SLS_8_AL, MASK_EXACT, qc_sls_8_al);
qc!(OP_SLS_8_AW, MASK_EXACT, qc_sls_8_aw);
qc!(OP_SLS_8_DN, MASK_OUT_Y, qc_sls_8_dn);
qc!(OP_SLS_8_DI, MASK_OUT_Y, qc_sls_8_di);
qc!(OP_SLS_8_IX, MASK_OUT_Y, qc_sls_8_ix);
qc!(OP_SLS_8_PD, MASK_OUT_Y, qc_sls_8_pd);
qc!(OP_SLS_8_PI, MASK_OUT_Y, qc_sls_8_pi);
qc!(OP_SLT_8_AI, MASK_OUT_Y, qc_slt_8_ai);
qc!(OP_SLT_8_AL, MASK_EXACT, qc_slt_8_al);
qc!(OP_SLT_8_AW, MASK_EXACT, qc_slt_8_aw);
qc!(OP_SLT_8_DN, MASK_OUT_Y, qc_slt_8_dn);
qc!(OP_SLT_8_DI, MASK_OUT_Y, qc_slt_8_di);
qc!(OP_SLT_8_IX, MASK_OUT_Y, qc_slt_8_ix);
qc!(OP_SLT_8_PD, MASK_OUT_Y, qc_slt_8_pd);
qc!(OP_SLT_8_PI, MASK_OUT_Y, qc_slt_8_pi);
qc!(OP_SMI_8_AI, MASK_OUT_Y, qc_smi_8_ai);
qc!(OP_SMI_8_AL, MASK_EXACT, qc_smi_8_al);
qc!(OP_SMI_8_AW, MASK_EXACT, qc_smi_8_aw);
qc!(OP_SMI_8_DN, MASK_OUT_Y, qc_smi_8_dn);
qc!(OP_SMI_8_DI, MASK_OUT_Y, qc_smi_8_di);
qc!(OP_SMI_8_IX, MASK_OUT_Y, qc_smi_8_ix);
qc!(OP_SMI_8_PD, MASK_OUT_Y, qc_smi_8_pd);
qc!(OP_SMI_8_PI, MASK_OUT_Y, qc_smi_8_pi);
qc!(OP_SNE_8_AI, MASK_OUT_Y, qc_sne_8_ai);
qc!(OP_SNE_8_AL, MASK_EXACT, qc_sne_8_al);
qc!(OP_SNE_8_AW, MASK_EXACT, qc_sne_8_aw);
qc!(OP_SNE_8_DN, MASK_OUT_Y, qc_sne_8_dn);
qc!(OP_SNE_8_DI, MASK_OUT_Y, qc_sne_8_di);
qc!(OP_SNE_8_IX, MASK_OUT_Y, qc_sne_8_ix);
qc!(OP_SNE_8_PD, MASK_OUT_Y, qc_sne_8_pd);
qc!(OP_SNE_8_PI, MASK_OUT_Y, qc_sne_8_pi);
qc!(OP_SPL_8_AI, MASK_OUT_Y, qc_spl_8_ai);
qc!(OP_SPL_8_AL, MASK_EXACT, qc_spl_8_al);
qc!(OP_SPL_8_AW, MASK_EXACT, qc_spl_8_aw);
qc!(OP_SPL_8_DN, MASK_OUT_Y, qc_spl_8_dn);
qc!(OP_SPL_8_DI, MASK_OUT_Y, qc_spl_8_di);
qc!(OP_SPL_8_IX, MASK_OUT_Y, qc_spl_8_ix);
qc!(OP_SPL_8_PD, MASK_OUT_Y, qc_spl_8_pd);
qc!(OP_SPL_8_PI, MASK_OUT_Y, qc_spl_8_pi);
qc!(OP_ST_8_AI, MASK_OUT_Y, qc_st_8_ai);
qc!(OP_ST_8_AL, MASK_EXACT, qc_st_8_al);
qc!(OP_ST_8_AW, MASK_EXACT, qc_st_8_aw);
qc!(OP_ST_8_DN, MASK_OUT_Y, qc_st_8_dn);
qc!(OP_ST_8_DI, MASK_OUT_Y, qc_st_8_di);
qc!(OP_ST_8_IX, MASK_OUT_Y, qc_st_8_ix);
qc!(OP_ST_8_PD, MASK_OUT_Y, qc_st_8_pd);
qc!(OP_ST_8_PI, MASK_OUT_Y, qc_st_8_pi);
qc!(OP_SVC_8_AI, MASK_OUT_Y, qc_svc_8_ai);
qc!(OP_SVC_8_AL, MASK_EXACT, qc_svc_8_al);
qc!(OP_SVC_8_AW, MASK_EXACT, qc_svc_8_aw);
qc!(OP_SVC_8_DN, MASK_OUT_Y, qc_svc_8_dn);
qc!(OP_SVC_8_DI, MASK_OUT_Y, qc_svc_8_di);
qc!(OP_SVC_8_IX, MASK_OUT_Y, qc_svc_8_ix);
qc!(OP_SVC_8_PD, MASK_OUT_Y, qc_svc_8_pd);
qc!(OP_SVC_8_PI, MASK_OUT_Y, qc_svc_8_pi);
qc!(OP_SVS_8_AI, MASK_OUT_Y, qc_svs_8_ai);
qc!(OP_SVS_8_AL, MASK_EXACT, qc_svs_8_al);
qc!(OP_SVS_8_AW, MASK_EXACT, qc_svs_8_aw);
qc!(OP_SVS_8_DN, MASK_OUT_Y, qc_svs_8_dn);
qc!(OP_SVS_8_DI, MASK_OUT_Y, qc_svs_8_di);
qc!(OP_SVS_8_IX, MASK_OUT_Y, qc_svs_8_ix);
qc!(OP_SVS_8_PD, MASK_OUT_Y, qc_svs_8_pd);
qc!(OP_SVS_8_PI, MASK_OUT_Y, qc_svs_8_pi);
/*
qc!(MASK_OUT_Y, OP_SHI_8_AI, qc_shi_8_ai);
qc!(MASK_EXACT, OP_SHI_8_AL, qc_shi_8_al);
qc!(MASK_EXACT, OP_SHI_8_AW, qc_shi_8_aw);
qc!(MASK_OUT_Y, OP_SHI_8_DN, qc_shi_8_dn);
qc!(MASK_OUT_Y, OP_SHI_8_DI, qc_shi_8_di);
qc!(MASK_OUT_Y, OP_SHI_8_IX, qc_shi_8_ix);
qc!(MASK_OUT_Y, OP_SHI_8_PD, qc_shi_8_pd);
qc!(MASK_OUT_Y, OP_SHI_8_PI, qc_shi_8_pi);
*/
// Put qc for STOP here
// Put qc for SUB here
qc8!(OP_SUB_8_ER_DN, qc_sub_8_er_dn);
qc8!(OP_SUB_8_ER_PI, qc_sub_8_er_pi);
qc8!(OP_SUB_8_ER_PD, qc_sub_8_er_pd);
qc8!(OP_SUB_8_ER_AI, qc_sub_8_er_ai);
qc8!(OP_SUB_8_ER_DI, qc_sub_8_er_di);
qc8!(OP_SUB_8_ER_IX, qc_sub_8_er_ix);
qc8!(OP_SUB_8_ER_AW, MASK_OUT_X, qc_sub_8_er_aw);
qc8!(OP_SUB_8_ER_AL, MASK_OUT_X, qc_sub_8_er_al);
qc8!(OP_SUB_8_ER_PCDI, MASK_OUT_X, qc_sub_8_er_pcdi);
qc8!(OP_SUB_8_ER_PCIX, MASK_OUT_X, qc_sub_8_er_pcix);
qc8!(OP_SUB_8_ER_IMM, MASK_OUT_X, qc_sub_8_er_imm);
qc8!(OP_SUB_8_RE_PI, qc_sub_8_re_pi);
qc8!(OP_SUB_8_RE_PD, qc_sub_8_re_pd);
qc8!(OP_SUB_8_RE_AI, qc_sub_8_re_ai);
qc8!(OP_SUB_8_RE_DI, qc_sub_8_re_di);
qc8!(OP_SUB_8_RE_IX, qc_sub_8_re_ix);
qc8!(OP_SUB_8_RE_AW, MASK_OUT_X, qc_sub_8_re_aw);
qc8!(OP_SUB_8_RE_AL, MASK_OUT_X, qc_sub_8_re_al);
qc!(OP_SUB_16_ER_DN, qc_sub_16_er_dn);
qc!(OP_SUB_16_ER_AN, qc_sub_16_er_an);
qc!(OP_SUB_16_ER_PI, qc_sub_16_er_pi);
qc!(OP_SUB_16_ER_PD, qc_sub_16_er_pd);
qc!(OP_SUB_16_ER_AI, qc_sub_16_er_ai);
qc!(OP_SUB_16_ER_DI, qc_sub_16_er_di);
qc!(OP_SUB_16_ER_IX, qc_sub_16_er_ix);
qc!(OP_SUB_16_ER_AW, MASK_OUT_X, qc_sub_16_er_aw);
qc!(OP_SUB_16_ER_AL, MASK_OUT_X, qc_sub_16_er_al);
qc!(OP_SUB_16_ER_PCDI, MASK_OUT_X, qc_sub_16_er_pcdi);
qc!(OP_SUB_16_ER_PCIX, MASK_OUT_X, qc_sub_16_er_pcix);
qc!(OP_SUB_16_ER_IMM, MASK_OUT_X, qc_sub_16_er_imm);
qc!(OP_SUB_16_RE_PI, qc_sub_16_re_pi);
qc!(OP_SUB_16_RE_PD, qc_sub_16_re_pd);
qc!(OP_SUB_16_RE_AI, qc_sub_16_re_ai);
qc!(OP_SUB_16_RE_DI, qc_sub_16_re_di);
qc!(OP_SUB_16_RE_IX, qc_sub_16_re_ix);
qc!(OP_SUB_16_RE_AW, MASK_OUT_X, qc_sub_16_re_aw);
qc!(OP_SUB_16_RE_AL, MASK_OUT_X, qc_sub_16_re_al);
qc!(OP_SUB_32_ER_DN, qc_sub_32_er_dn);
qc!(OP_SUB_32_ER_AN, qc_sub_32_er_an);
qc!(OP_SUB_32_ER_PI, qc_sub_32_er_pi);
qc!(OP_SUB_32_ER_PD, qc_sub_32_er_pd);
qc!(OP_SUB_32_ER_AI, qc_sub_32_er_ai);
qc!(OP_SUB_32_ER_DI, qc_sub_32_er_di);
qc!(OP_SUB_32_ER_IX, qc_sub_32_er_ix);
qc!(OP_SUB_32_ER_AW, MASK_OUT_X, qc_sub_32_er_aw);
qc!(OP_SUB_32_ER_AL, MASK_OUT_X, qc_sub_32_er_al);
qc!(OP_SUB_32_ER_PCDI, MASK_OUT_X, qc_sub_32_er_pcdi);
qc!(OP_SUB_32_ER_PCIX, MASK_OUT_X, qc_sub_32_er_pcix);
qc!(OP_SUB_32_ER_IMM, MASK_OUT_X, qc_sub_32_er_imm);
qc!(OP_SUB_32_RE_PI, qc_sub_32_re_pi);
qc!(OP_SUB_32_RE_PD, qc_sub_32_re_pd);
qc!(OP_SUB_32_RE_AI, qc_sub_32_re_ai);
qc!(OP_SUB_32_RE_DI, qc_sub_32_re_di);
qc!(OP_SUB_32_RE_IX, qc_sub_32_re_ix);
qc!(OP_SUB_32_RE_AW, MASK_OUT_X, qc_sub_32_re_aw);
qc!(OP_SUB_32_RE_AL, MASK_OUT_X, qc_sub_32_re_al);
qc!(OP_SUBA_16_DN, qc_suba_16_dn);
qc!(OP_SUBA_16_AN, qc_suba_16_an);
qc!(OP_SUBA_16_PI, qc_suba_16_pi);
qc!(OP_SUBA_16_PD, qc_suba_16_pd);
qc!(OP_SUBA_16_AI, qc_suba_16_ai);
qc!(OP_SUBA_16_DI, qc_suba_16_di);
qc!(OP_SUBA_16_IX, qc_suba_16_ix);
qc!(OP_SUBA_16_AW, MASK_OUT_X, qc_suba_16_aw);
qc!(OP_SUBA_16_AL, MASK_OUT_X, qc_suba_16_al);
qc!(OP_SUBA_16_PCDI, MASK_OUT_X, qc_suba_16_pcdi);
qc!(OP_SUBA_16_PCIX, MASK_OUT_X, qc_suba_16_pcix);
qc!(OP_SUBA_16_IMM, MASK_OUT_X, qc_suba_16_imm);
qc!(OP_SUBA_32_DN, qc_suba_32_dn);
qc!(OP_SUBA_32_AN, qc_suba_32_an);
qc!(OP_SUBA_32_PI, qc_suba_32_pi);
qc!(OP_SUBA_32_PD, qc_suba_32_pd);
qc!(OP_SUBA_32_AI, qc_suba_32_ai);
qc!(OP_SUBA_32_DI, qc_suba_32_di);
qc!(OP_SUBA_32_IX, qc_suba_32_ix);
qc!(OP_SUBA_32_AW, MASK_OUT_X, qc_suba_32_aw);
qc!(OP_SUBA_32_AL, MASK_OUT_X, qc_suba_32_al);
qc!(OP_SUBA_32_PCDI, MASK_OUT_X, qc_suba_32_pcdi);
qc!(OP_SUBA_32_PCIX, MASK_OUT_X, qc_suba_32_pcix);
qc!(OP_SUBA_32_IMM, MASK_OUT_X, qc_suba_32_imm);
qc8!(OP_SUBI_8_DN, MASK_OUT_Y, qc_subi_8_dn);
qc8!(OP_SUBI_8_PI, MASK_OUT_Y, qc_subi_8_pi);
qc8!(OP_SUBI_8_PD, MASK_OUT_Y, qc_subi_8_pd);
qc8!(OP_SUBI_8_AI, MASK_OUT_Y, qc_subi_8_ai);
qc8!(OP_SUBI_8_DI, MASK_OUT_Y, qc_subi_8_di);
qc8!(OP_SUBI_8_IX, MASK_OUT_Y, qc_subi_8_ix);
qc8!(OP_SUBI_8_AW, MASK_EXACT, qc_subi_8_aw);
qc8!(OP_SUBI_8_AL, MASK_EXACT, qc_subi_8_al);
qc!(OP_SUBI_16_DN, MASK_OUT_Y, qc_subi_16_dn);
qc!(OP_SUBI_16_PI, MASK_OUT_Y, qc_subi_16_pi);
qc!(OP_SUBI_16_PD, MASK_OUT_Y, qc_subi_16_pd);
qc!(OP_SUBI_16_AI, MASK_OUT_Y, qc_subi_16_ai);
qc!(OP_SUBI_16_DI, MASK_OUT_Y, qc_subi_16_di);
qc!(OP_SUBI_16_IX, MASK_OUT_Y, qc_subi_16_ix);
qc!(OP_SUBI_16_AW, MASK_EXACT, qc_subi_16_aw);
qc!(OP_SUBI_16_AL, MASK_EXACT, qc_subi_16_al);
qc!(OP_SUBI_32_DN, MASK_OUT_Y, qc_subi_32_dn);
qc!(OP_SUBI_32_PI, MASK_OUT_Y, qc_subi_32_pi);
qc!(OP_SUBI_32_PD, MASK_OUT_Y, qc_subi_32_pd);
qc!(OP_SUBI_32_AI, MASK_OUT_Y, qc_subi_32_ai);
qc!(OP_SUBI_32_DI, MASK_OUT_Y, qc_subi_32_di);
qc!(OP_SUBI_32_IX, MASK_OUT_Y, qc_subi_32_ix);
qc!(OP_SUBI_32_AW, MASK_EXACT, qc_subi_32_aw);
qc!(OP_SUBI_32_AL, MASK_EXACT, qc_subi_32_al);
qc8!(OP_SUBQ_8_DN, qc_subq_8_dn);
qc8!(OP_SUBQ_8_PI, qc_subq_8_pi);
qc8!(OP_SUBQ_8_PD, qc_subq_8_pd);
qc8!(OP_SUBQ_8_AI, qc_subq_8_ai);
qc8!(OP_SUBQ_8_DI, qc_subq_8_di);
qc8!(OP_SUBQ_8_IX, qc_subq_8_ix);
qc8!(OP_SUBQ_8_AW, MASK_OUT_X, qc_subq_8_aw);
qc8!(OP_SUBQ_8_AL, MASK_OUT_X, qc_subq_8_al);
qc!(OP_SUBQ_16_DN, qc_subq_16_dn);
qc!(OP_SUBQ_16_AN, qc_subq_16_an);
qc!(OP_SUBQ_16_PI, qc_subq_16_pi);
qc!(OP_SUBQ_16_PD, qc_subq_16_pd);
qc!(OP_SUBQ_16_AI, qc_subq_16_ai);
qc!(OP_SUBQ_16_DI, qc_subq_16_di);
qc!(OP_SUBQ_16_IX, qc_subq_16_ix);
qc!(OP_SUBQ_16_AW, MASK_OUT_X, qc_subq_16_aw);
qc!(OP_SUBQ_16_AL, MASK_OUT_X, qc_subq_16_al);
qc!(OP_SUBQ_32_DN, qc_subq_32_dn);
qc!(OP_SUBQ_32_AN, qc_subq_32_an);
qc!(OP_SUBQ_32_PI, qc_subq_32_pi);
qc!(OP_SUBQ_32_PD, qc_subq_32_pd);
qc!(OP_SUBQ_32_AI, qc_subq_32_ai);
qc!(OP_SUBQ_32_DI, qc_subq_32_di);
qc!(OP_SUBQ_32_IX, qc_subq_32_ix);
qc!(OP_SUBQ_32_AW, MASK_OUT_X, qc_subq_32_aw);
qc!(OP_SUBQ_32_AL, MASK_OUT_X, qc_subq_32_al);
qc8!(OP_SUBX_8_RR, qc_subx_8_rr);
qc8!(OP_SUBX_8_MM, qc_subx_8_mm);
qc!(OP_SUBX_16_RR, qc_subx_16_rr);
qc!(OP_SUBX_16_MM, qc_subx_16_mm);
qc!(OP_SUBX_32_RR, qc_subx_32_rr);
qc!(OP_SUBX_32_MM, qc_subx_32_mm);
// Put qc for SWAP here
qc!(OP_SWAP_32_DN, qc_swap_32_dn);
//
// Put qc for TAS here
// Put qc for TRAP here
// Put qc for TRAPV here
// Put qc for TST here
// Put qc for UNLK here
macro_rules! core_eq {
($left:ident , $right:ident . $field:ident [ $index:expr ]) => ({
match (&($left.$field[$index]), &($right.$field[$index])) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}[{}]` differs \
({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), $index, stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
});
($left:ident , $right:ident . $field:ident () ?) => ({
match (&($left.$field()), &($right.$field())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}()` differs \
({}: `{:?}`, {}: `{:?}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
});
($left:ident , $right:ident . $field:ident ()) => ({
match (&($left.$field()), &($right.$field())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}()` differs \
({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
});
($left:ident , $right:ident . $field:ident) => ({
match (&($left.$field), &($right.$field)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
println!("core incoherence: `{}` differs \
({}: `0x{:x}`, {}: `0x{:x}`)", stringify!($field), stringify!($left), left_val, stringify!($right), right_val);
return false;
}
}
}
})
}
fn assert_cores_equal(musashi: &Core, r68k: &Core) -> bool {
// check memory accesses match up
assert_equal(get_ops(), r68k.mem.logger.ops());
core_eq!(musashi, r68k.pc);
core_eq!(musashi, r68k.flags() ?);
core_eq!(musashi, r68k.status_register());
core_eq!(musashi, r68k.ssp());
core_eq!(musashi, r68k.usp());
for i in (0..16).rev() {
core_eq!(musashi, r68k.dar[i]);
}
true
}
#[test]
fn roundtrip_d0() {
assert_eq!(256, roundtrip_register(Register::D0, 256));
}
#[test]
fn roundtrip_abcd_rr() {
let pc = 0x40;
// 0xc101: ABCD D0, D1
let mut cpu = Core::new_mem(pc, &[0xc1, 0x01, 0x00, 0x00]);
cpu.dar[0] = 0x17;
cpu.dar[1] = 0x27;
cpu.dar[5] = 0x55555;
reset_and_execute1(&mut cpu);
// 17 + 27 is 44
assert_eq!(0x44, cpu.dar[0]);
assert_eq!(0x27, cpu.dar[1]);
assert_eq!(0x55555, cpu.dar[5]);
let ops = get_ops();
assert_eq!(1, ops.len());
assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, pc, 0xc1010000), ops[0]);
}
#[test]
fn compare_abcd_rr() {
let pc = 0x40;
// 0xc300: ABCD D1, D0
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00]);
musashi.dar[0] = 0x16;
musashi.dar[1] = 0x26;
let mut r68k = musashi.clone(); // so very self-aware!
reset_and_execute1(&mut musashi);
r68k.execute1();
assert_eq!(0x42, r68k.dar[1]);
assert_cores_equal(&musashi, &r68k);
}
#[test]
#[allow(unused_variables)]
fn run_abcd_rr_twice() {
let mutex = MUSASHI_LOCK.lock().unwrap();
let pc = 0x40;
// 0xc300: ABCD D1, D0
// 0xc302: ABCD D1, D2
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]);
musashi.dar[0] = 0x16;
musashi.dar[1] = 0x26;
musashi.dar[2] = 0x31;
let mut r68k = musashi.clone(); // so very self-aware!
initialize_musashi(&mut musashi);
// execute ABCD D1, D0
execute1(&mut musashi);
r68k.execute1();
assert_eq!(0x42, musashi.dar[1]);
assert_eq!(0x42, r68k.dar[1]);
// then execute a second instruction (ABCD D1, D2) on the core
execute1(&mut musashi);
r68k.execute1();
assert_eq!(0x73, musashi.dar[1]);
assert_eq!(0x73, r68k.dar[1]);
assert_cores_equal(&musashi, &r68k);
}
#[test]
#[allow(unused_variables)]
fn compare_address_error_actions() {
let mutex = MUSASHI_LOCK.lock().unwrap();
// using an odd absolute address should force an address error
// opcodes d278,0107 is ADD.W $0107, D1
let mut musashi = Core::new_mem(0x40, &[0xd2, 0x78, 0x01, 0x07]);
let vec3 = 0x200;
musashi.mem.write_long(SUPERVISOR_PROGRAM, 3*4, vec3);
musashi.mem.write_long(SUPERVISOR_PROGRAM, vec3, 0xd2780108);
musashi.dar[15] = 0x100;
let mut r68k = musashi.clone(); // so very self-aware!
initialize_musashi(&mut musashi);
execute1(&mut musashi);
//execute1(&mut musashi);
r68k.execute1();
r68k.execute1();
assert_cores_equal(&musashi, &r68k);
}
#[test]
#[allow(unused_variables)]
fn compare_illegal_instruction_actions() {
let mutex = MUSASHI_LOCK.lock().unwrap();
// d208 is ADD.B A0,D0, which is illegal
let mut musashi = Core::new_mem(0x40, &[0xd2, 08]);
let vec4 = 0x200;
musashi.mem.write_long(SUPERVISOR_PROGRAM, 4*4, vec4);
musashi.mem.write_long(SUPERVISOR_PROGRAM, vec4, 0xd2780108);
musashi.dar[15] = 0x100;
let mut r68k = musashi.clone(); // so very self-aware!
initialize_musashi(&mut musashi);
execute1(&mut musashi);
//execute1(&mut musashi);
r68k.execute1();
//r68k.execute1();
assert_cores_equal(&musashi, &r68k);
}
use std::ptr;
use super::m68k_get_reg;
#[test]
#[allow(unused_variables)]
fn stackpointers_are_correct_when_starting_in_supervisor_mode() {
let mutex = MUSASHI_LOCK.lock().unwrap();
let pc = 0x40;
// 0xc300: ABCD D1, D0
// 0xc302: ABCD D1, D2
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]);
musashi.sr_to_flags((1<<13));
musashi.inactive_usp = 0x200; // User SP
musashi.dar[15] = 0x100; // Supa SP
initialize_musashi(&mut musashi);
unsafe {
assert!((1<<13) & m68k_get_reg(ptr::null_mut(), Register::SR) > 0);
assert_eq!(0x100, m68k_get_reg(ptr::null_mut(), Register::ISP));
assert_eq!(0x200, m68k_get_reg(ptr::null_mut(), Register::USP));
}
}
#[test]
#[allow(unused_variables)]
fn stackpointers_are_correct_when_starting_in_user_mode() {
let mutex = MUSASHI_LOCK.lock().unwrap();
let pc = 0x40;
// 0xc300: ABCD D1, D0
// 0xc302: ABCD D1, D2
let mut musashi = Core::new_mem(pc, &[0xc3, 0x00, 0xc3, 0x02]);
musashi.sr_to_flags(0);
musashi.dar[15] = 0x200; // User SP
musashi.inactive_ssp = 0x100; // Supa SP
initialize_musashi(&mut musashi);
unsafe {
assert!((1<<13) & m68k_get_reg(ptr::null_mut(), Register::SR) == 0);
assert_eq!(0x100, m68k_get_reg(ptr::null_mut(), Register::ISP));
assert_eq!(0x200, m68k_get_reg(ptr::null_mut(), Register::USP));
}
}
}
|
use messages::outgoing;
use mio;
use node::Node;
use node;
use routing_table::{InsertOutcome, RoutingTable};
use routing_table;
use std::collections::HashMap;
use std::io::Cursor;
use std::thread;
use transaction::{TransactionId, TransactionIdGenerator};
pub enum ScheduledTask {
ContinueBootstrap,
ContinueHealthCheck
}
pub enum OneshotTask {
Incoming(Vec<u8>),
StartBootstrap
}
enum TableAction {
Bootstrap(mio::Timeout),
HealthCheck(mio::Timeout)
}
enum Status {
Bootstrapping,
Idle
}
pub struct Handler {
port: u16,
routing_table: RoutingTable,
self_node: Box<node::Node + 'static>,
transaction_ids: TransactionIdGenerator,
status: Status,
pending_actions: HashMap<TransactionId, TableAction>
}
impl Handler {
pub fn new<N: node::Node + 'static>(self_node: N, port: u16, routers: Vec<Box<Node>>) -> Handler {
let self_address = self_node.get_address();
let routing_table = routing_table::RoutingTable::new(8, self_address, routers);
Handler {
port: port,
routing_table: routing_table,
self_node: Box::new(self_node),
transaction_ids: TransactionIdGenerator::new(),
status: Status::Idle,
pending_actions: HashMap::new()
}
}
pub fn run(mut self) {
let mut event_loop = mio::EventLoop::new().unwrap();
let loop_channel = event_loop.channel();
create_incoming_udp_channel(self.port, loop_channel.clone());
loop_channel.send(OneshotTask::StartBootstrap).unwrap();
event_loop.run(&mut self).unwrap();
}
fn handle_incoming(&mut self, data: Vec<u8>, event_loop: &mut mio::EventLoop<Handler>) {
use messages::incoming;
use messages::incoming::*;
let mut data = Cursor::new(data);
let message = incoming::parse_from_reader(&mut data).unwrap();
match message {
Message::Query(transaction_id, origin, query) => {
let origin_address = origin.get_address();
match query {
Query::FindNode(target) => {
let response: Vec<u8> = outgoing::create_find_node_response(
transaction_id,
&self.self_node,
self.routing_table.nearest_to(&target, false));
origin.send(response);
self.routing_table.insert(origin).unwrap();
{
if let Some(origin) = self.routing_table.find_node(&origin_address) {
origin.received_query(transaction_id);
}
}
},
Query::Ping => {
let response = outgoing::create_ping_response(
transaction_id,
&self.self_node);
origin.send(response);
self.routing_table.insert(origin).unwrap();
if let Some(origin) = self.routing_table.find_node(&origin_address) {
origin.received_query(transaction_id);
}
}
}
}
Message::Response(transaction_id, origin, response) => {
let origin_address = origin.get_address();
match response {
Response::FindNode(mut nodes) => {
let mut encounted_new_node = false;
let mut tail = vec![origin];
for node in nodes.drain(..).chain(tail.drain(..)) {
match self.routing_table.insert(node) {
Ok(InsertOutcome::Inserted) => {
encounted_new_node = true;
}
_ => { }
}
}
{
let mut origin = self.routing_table
.find_node(&origin_address)
.expect("Got find node response from unknown node");
origin.received_response(transaction_id);
}
match self.pending_actions.remove(&transaction_id) {
Some(TableAction::Bootstrap(timeout)) => {
event_loop.clear_timeout(timeout);
}
Some(TableAction::HealthCheck(timeout)) => {
event_loop.clear_timeout(timeout);
}
None => { }
}
match self.status {
Status::Bootstrapping => {
if encounted_new_node {
// Continue botstrapping
self.continue_bootstrap(event_loop);
} else {
self.status = Status::Idle;
self.continue_health_check(event_loop);
}
}
_ => { }
}
}
Response::Ping => {
let mut origin = self.routing_table.find_node(&origin_address).expect("Got ping response from unknown node");
origin.received_response(transaction_id);
}
}
}
}
}
fn start_bootstrap(&mut self, event_loop: &mut mio::EventLoop<Self>) {
self.status = Status::Bootstrapping;
self.continue_bootstrap(event_loop);
}
fn continue_bootstrap(&mut self, event_loop: &mut mio::EventLoop<Self>) {
let transaction_id = self.find_self();
let timeout = event_loop.timeout_ms(ScheduledTask::ContinueBootstrap, 1000).unwrap();
self.pending_actions.insert(transaction_id, TableAction::Bootstrap(timeout));
}
fn continue_health_check(&mut self, event_loop: &mut mio::EventLoop<Self>) {
let transaction_id = self.health_check();
let timeout = event_loop.timeout_ms(ScheduledTask::ContinueHealthCheck, 1000).unwrap();
self.pending_actions.insert(transaction_id, TableAction::HealthCheck(timeout));
}
fn find_self(&mut self) -> TransactionId {
let transaction_id = self.transaction_ids.generate();
let query = outgoing::create_find_node_query(
transaction_id,
&self.self_node,
self.self_node.get_address());
if let Some(node) = self.routing_table.nearest().get_mut(0) {
{
node.send(query);
}
node.sent_query(transaction_id);
}
transaction_id
}
fn health_check(&mut self) -> TransactionId {
let transaction_id = self.transaction_ids.generate();
if let Some(node) = self.routing_table.questionable_nodes().get_mut(0) {
let query = outgoing::create_ping_query(
transaction_id, &self.self_node);
node.send(query);
node.sent_query(transaction_id);
}
transaction_id
}
}
impl mio::Handler for Handler {
type Timeout = ScheduledTask;
type Message = OneshotTask;
fn notify(&mut self, event_loop: &mut mio::EventLoop<Handler>, task: OneshotTask) {
match task {
OneshotTask::Incoming(data) => self.handle_incoming(data, event_loop),
OneshotTask::StartBootstrap => self.start_bootstrap(event_loop)
}
}
fn timeout(&mut self, event_loop: &mut mio::EventLoop<Handler>, timeout: ScheduledTask) {
match timeout {
ScheduledTask::ContinueBootstrap => self.continue_bootstrap(event_loop),
ScheduledTask::ContinueHealthCheck => self.continue_health_check(event_loop)
}
}
}
fn create_incoming_udp_channel(port: u16, sender: mio::Sender<OneshotTask>) {
use std::net::UdpSocket;
thread::spawn(move || {
let address = ("0.0.0.0", port);
let socket = UdpSocket::bind(address).unwrap();
loop {
let mut buf = [0; 4096];
match socket.recv_from(&mut buf) {
Ok((size, _src)) => {
sender.send(OneshotTask::Incoming(buf[..size].iter().cloned().collect())).unwrap();
}
Err(e) => panic!("Error receiving from server: {}", e)
}
}
});
}
Separate network logic from Handler
use messages::outgoing;
use mio;
use node::Node;
use node;
use routing_table::{InsertOutcome, RoutingTable};
use routing_table;
use std::collections::HashMap;
use std::io::Cursor;
use std::thread;
use transaction::{TransactionId, TransactionIdGenerator};
pub enum ScheduledTask {
ContinueBootstrap,
ContinueHealthCheck
}
pub enum OneshotTask {
Incoming(Vec<u8>),
StartBootstrap
}
enum TableAction {
Bootstrap(mio::Timeout),
HealthCheck(mio::Timeout)
}
enum Status {
Bootstrapping,
Idle
}
pub struct Network {
port: u16,
routing_table: RoutingTable,
self_node: Box<node::Node + 'static>,
transaction_ids: TransactionIdGenerator,
status: Status,
pending_actions: HashMap<TransactionId, TableAction>
}
impl Network {
pub fn new<N: node::Node + 'static>(self_node: N, port: u16, routers: Vec<Box<Node>>) -> Network {
let self_address = self_node.get_address().clone();
let routing_table = routing_table::RoutingTable::new(8, self_address, routers);
Network {
port: port,
routing_table: routing_table,
self_node: Box::new(self_node),
transaction_ids: TransactionIdGenerator::new(),
status: Status::Idle,
pending_actions: HashMap::new()
}
}
let mut event_loop = mio::EventLoop::new().unwrap();
pub fn run(self) {
let loop_channel = event_loop.channel();
create_incoming_udp_channel(self.port, loop_channel.clone());
loop_channel.send(OneshotTask::StartBootstrap).unwrap();
let mut handler = Handler::new(self);
event_loop.run(&mut handler).unwrap();
}
fn handle_incoming(&mut self, data: Vec<u8>, event_loop: &mut mio::EventLoop<Handler>) {
use messages::incoming;
use messages::incoming::*;
let mut data = Cursor::new(data);
let message = incoming::parse_from_reader(&mut data).unwrap();
match message {
Message::Query(transaction_id, origin, query) => {
let origin_address = origin.get_address();
match query {
Query::FindNode(target) => {
let response: Vec<u8> = outgoing::create_find_node_response(
transaction_id,
&self.self_node,
self.routing_table.nearest_to(&target, false));
origin.send(response);
self.routing_table.insert(origin).unwrap();
{
if let Some(origin) = self.routing_table.find_node(&origin_address) {
origin.received_query(transaction_id);
}
}
},
Query::Ping => {
let response = outgoing::create_ping_response(
transaction_id,
&self.self_node);
origin.send(response);
self.routing_table.insert(origin).unwrap();
if let Some(origin) = self.routing_table.find_node(&origin_address) {
origin.received_query(transaction_id);
}
}
}
}
Message::Response(transaction_id, origin, response) => {
let origin_address = origin.get_address();
match response {
Response::FindNode(mut nodes) => {
let mut encounted_new_node = false;
let mut tail = vec![origin];
for node in nodes.drain(..).chain(tail.drain(..)) {
match self.routing_table.insert(node) {
Ok(InsertOutcome::Inserted) => {
encounted_new_node = true;
}
_ => { }
}
}
{
let mut origin = self.routing_table
.find_node(&origin_address)
.expect("Got find node response from unknown node");
origin.received_response(transaction_id);
}
match self.pending_actions.remove(&transaction_id) {
Some(TableAction::Bootstrap(timeout)) => {
event_loop.clear_timeout(timeout);
}
Some(TableAction::HealthCheck(timeout)) => {
event_loop.clear_timeout(timeout);
}
None => { }
}
match self.status {
Status::Bootstrapping => {
if encounted_new_node {
// Continue botstrapping
self.continue_bootstrap(event_loop);
} else {
self.status = Status::Idle;
self.continue_health_check(event_loop);
}
}
_ => { }
}
}
Response::Ping => {
let mut origin = self.routing_table.find_node(&origin_address).expect("Got ping response from unknown node");
origin.received_response(transaction_id);
}
}
}
}
}
fn start_bootstrap(&mut self, event_loop: &mut mio::EventLoop<Handler>) {
self.status = Status::Bootstrapping;
self.continue_bootstrap(event_loop);
}
let transaction_id = self.find_self();
fn continue_bootstrap(&mut self, event_loop: &mut mio::EventLoop<Handler>) {
let timeout = event_loop.timeout_ms(ScheduledTask::ContinueBootstrap, 1000).unwrap();
self.pending_actions.insert(transaction_id, TableAction::Bootstrap(timeout));
}
fn continue_health_check(&mut self, event_loop: &mut mio::EventLoop<Handler>) {
let transaction_id = self.health_check();
let timeout = event_loop.timeout_ms(ScheduledTask::ContinueHealthCheck, 1000).unwrap();
self.pending_actions.insert(transaction_id, TableAction::HealthCheck(timeout));
}
fn find_self(&mut self) -> TransactionId {
let transaction_id = self.transaction_ids.generate();
let query = outgoing::create_find_node_query(
transaction_id,
&self.self_node,
self.self_node.get_address());
if let Some(node) = self.routing_table.nearest().get_mut(0) {
{
node.send(query);
}
node.sent_query(transaction_id);
}
transaction_id
}
fn health_check(&mut self) -> TransactionId {
let transaction_id = self.transaction_ids.generate();
if let Some(node) = self.routing_table.questionable_nodes().get_mut(0) {
let query = outgoing::create_ping_query(
transaction_id, &self.self_node);
node.send(query);
node.sent_query(transaction_id);
}
transaction_id
}
struct Handler {
network: Network
}
impl Handler {
fn new(network: Network) -> Handler {
Handler {
network: network
}
}
}
impl mio::Handler for Handler {
type Timeout = ScheduledTask;
type Message = OneshotTask;
fn notify(&mut self, event_loop: &mut mio::EventLoop<Handler>, task: OneshotTask) {
match task {
OneshotTask::Incoming(data) => self.network.handle_incoming(data, event_loop),
OneshotTask::StartBootstrap => self.network.start_bootstrap(event_loop)
}
}
fn timeout(&mut self, event_loop: &mut mio::EventLoop<Handler>, timeout: ScheduledTask) {
match timeout {
ScheduledTask::ContinueBootstrap => self.network.continue_bootstrap(event_loop),
ScheduledTask::ContinueHealthCheck => self.network.continue_health_check(event_loop),
ScheduledTask::ContinueRefresh => self.network.continue_refresh(event_loop)
}
}
}
fn create_incoming_udp_channel(port: u16, sender: mio::Sender<OneshotTask>) {
use std::net::UdpSocket;
thread::spawn(move || {
let address = ("0.0.0.0", port);
let socket = UdpSocket::bind(address).unwrap();
loop {
let mut buf = [0; 4096];
match socket.recv_from(&mut buf) {
Ok((size, _src)) => {
sender.send(OneshotTask::Incoming(buf[..size].iter().cloned().collect())).unwrap();
}
Err(e) => panic!("Error receiving from server: {}", e)
}
}
});
}
|
extern crate ndarray;
use ndarray_ext::NdArray;
use std::cell::RefCell;
use std::rc::Rc;
use tensor::{RawTensor, Tensor};
mod dummy_op;
mod index;
mod random_ops;
mod clip;
mod add_n;
mod logsumexp;
mod log_softmax;
mod identity;
mod cmp_ops;
mod math_ops;
mod concat;
mod tile;
mod binary_ops;
mod softmax;
mod sigmoid;
mod elu;
mod relu;
mod slice;
mod sigmoid_cross_entropy;
mod softmax_cross_entropy;
mod sparse_softmax_cross_entropy;
mod gather;
mod matmul;
mod batch_matmul;
mod reverse_axes;
mod transpose;
mod reshape;
mod reduction_ops;
mod squeeze;
mod expand_dims;
#[doc(hidden)]
/// Represents a operation node in a computation graph.
/// `Tensor` wraps trait-object of this.
pub trait Op {
/// Name of this op
fn name(&self) -> &str;
/// Actually runs this op.
/// num of inputs : N,
/// num of outputs: 1
fn compute(&mut self, xs: &[&NdArray], train: bool) -> NdArray;
/// Returns symbolic gradient for each input node by use of output gradient etc.
///
/// # Arguments
/// * `gy` - Symbolic representation of the gradient of `compute`'s return value
/// * `inputs` - Symbolic representation of `compute::xs`
/// * `output` - Symbolic representation of `compute`'s return value
///
/// NOTE:
/// The number of return values must be same as `inputs.len()`.
fn grad(&self, gy: &Tensor, inputs: &[&Tensor], output: &Tensor) -> Vec<Option<Tensor>>;
}
impl Tensor {
/// Gets a symbolic float32 element from a tensor.
///
/// `idx` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::variable(ndarray::arr2(&[[2., 3.], [4., 5.]]));
/// let ref b = a.get(2);
/// assert_eq!(b.eval()[0], 4.);
/// ```
pub fn get(&self, idx: isize) -> Tensor
{
apply_op(index::IndexOp { index: idx }, &[self])
}
}
#[doc(hidden)]
#[inline]
/// Helper function to generate a symbolic tensor
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[4, 2]);
/// let ref v = ag::zeros(&[2, 3]);
/// let ref b = ag::zeros(&[4, 3]);
/// let ref z = ag::matmul(a, v) + b;
/// let mut vars = [a, v, b, z];
/// // `sort_by_key` don't reverse the order of `a` and `v`
/// vars.sort_by_key(|a| a.borrow().rank);
/// assert!(vars == [a, v, b, z])
/// ```
fn apply_op<T: Op + 'static>(op: T, inputs: &[&Tensor]) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(op),
inputs: inputs.iter().map(|a| (*a).clone()).collect::<Vec<Tensor>>(),
param: None,
rank: inputs
.iter()
.map(|a| a.borrow().rank)
.max()
.map(|a| a + 1)
.unwrap_or(0),
})))
}
// ---------------------------------------
// -- Ops to manipulate `Tensor` object --
// ---------------------------------------
/// Creates a placeholder tensor.
///
/// The placeholder tensor is a dynamic input node to the computation graph,
/// which can be filled on evaluation time.
/// To fill the placeholders, use `autograd::Feed`.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x: ag::Tensor = ag::placeholder();
/// let ref y: ag::Tensor = 3 * x;
///
/// // Fills placeholder `x`.
/// let feed = ag::Feed::new().add(x, ndarray::arr1(&[2.]));
/// assert_eq!(6., y.eval_with_input(feed)[0]);
/// ```
#[inline]
pub fn placeholder() -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Placeholder".to_string() }),
inputs: vec![],
param: None,
rank: 0,
})))
}
/// Creates a shared variable tensor from rust-ndarray's array object.
///
/// The shared variable behaves like any other tensors, except that
/// it can be optimized with gradient descent methods
/// implemented in `autograd::sgd`.
/// For the usages, see https://github.com/perrier1034/rust-autograd/tree/master/examples
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x: ag::Tensor = ag::variable(ndarray::arr1(&[2.]));
/// let ref y: ag::Tensor = 3 * x;
///
/// assert_eq!(6., y.eval()[0]);
/// ```
#[inline]
pub fn variable<T: ndarray::Dimension>(array: ndarray::Array<f32, T>) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Variable".to_string() }),
inputs: vec![],
param: Some(array.into_dyn()),
rank: 0,
})))
}
#[inline]
/// Returns gradient tensors wrt variables.
///
/// # Arguments
/// * `objective` - Target of differentiation.
/// * `variables` - Variable tensors with which differentiate `objective`.
/// * `initial_grad` - This is required **if objective is not a scalar**. In most cases,
/// this is initialized with 1s.
///
/// # Returns
/// Symbolic gradient tensors corresponding to `variables` in the same order as `variables`
///
/// # Example1
/// Partial derivatives of `z = 2x^2 + 3y + 1`.
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x = ag::placeholder();
/// let ref y = ag::variable(ndarray::arr1(&[0.]));
/// let ref z = 2*x*x + 3*y + 1;
///
/// // dz/dy
/// let ref g1 = ag::gradients(z, &[y], None)[0];
/// // dz/dx
/// let ref g2 = ag::gradients(z, &[x], None)[0];
///
/// // ddz/dx (differentiates `z` again)
/// let ref gg = ag::gradients(g2, &[x], None)[0];
///
/// // evaluation of symbolic gradients
/// assert_eq!(3., g1.eval()[0]);
/// assert_eq!(4., gg.eval()[0]);
///
/// // dz/dx requires to fill the placeholder `x`
/// let feed = ag::Feed::new().add(x, ndarray::arr1(&[2.]));
/// assert_eq!(8., g2.eval_with_input(feed)[0]);
///
/// ```
///
/// # Example2
/// The case where objective is not a scalar
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::variable(ag::ndarray_ext::zeros(&[4, 2]));
/// let ref b = ag::zeros(&[2, 3]);
/// let ref c = ag::matmul(a, b);
/// let ref g = ag::gradients(c, &[a], Some(ag::ndarray_ext::ones(&[4, 2])))[0];
/// ```
pub fn gradients(
objective: &Tensor,
variables: &[&Tensor],
initial_grad: Option<&Tensor>,
) -> Vec<Tensor>
{
::topology::symbolic_gradients(objective, variables, initial_grad)
}
#[inline]
/// Elementwise sine
pub fn sin(x: &Tensor) -> Tensor
{
apply_op(math_ops::Sin, &[x])
}
#[inline]
/// Elementwise cosine
pub fn cos(x: &Tensor) -> Tensor
{
apply_op(math_ops::Cos, &[x])
}
#[inline]
/// Elementwise tangent
pub fn tan(x: &Tensor) -> Tensor
{
apply_op(math_ops::Tan, &[x])
}
#[inline]
/// Elementwise arcsin
pub fn asin(x: &Tensor) -> Tensor
{
apply_op(math_ops::Asin, &[x])
}
#[inline]
/// Elementwise arccos
pub fn acos(x: &Tensor) -> Tensor
{
apply_op(math_ops::Acos, &[x])
}
#[inline]
/// Elementwise arctan
pub fn atan(x: &Tensor) -> Tensor
{
apply_op(math_ops::Atan, &[x])
}
#[inline]
/// Elementwise hyperbolic sine
pub fn sinh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Sinh, &[x])
}
#[inline]
/// Elementwise hyperbolic cosine
pub fn cosh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Cosh, &[x])
}
#[inline]
/// Elementwise hyperbolic tangent
pub fn tanh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Tanh, &[x])
}
#[inline]
/// Elementwise hyperbolic arcsin
pub fn asinh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Asinh, &[x])
}
#[inline]
/// Elementwise hyperbolic arccos
pub fn acosh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Acosh, &[x])
}
#[inline]
/// Elementwise hyperbolic arctan
pub fn atanh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Atanh, &[x])
}
#[inline]
/// Identity function
pub fn identity(a: &Tensor) -> Tensor
{
apply_op(identity::Identity, &[a])
}
#[inline]
/// Adds two tensors
pub fn add(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseAdd, &[a, b])
}
#[inline]
/// Subtracts `a` from `b`
pub fn sub(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseSub, &[a, b])
}
#[inline]
/// Multiplies two tensors
pub fn mul(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseMul, &[a, b])
}
#[inline]
/// Divides `a` with `b`
pub fn div(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseDiv, &[a, b])
}
#[inline]
/// Elementwise sqrt
pub fn sqrt(x: &Tensor) -> Tensor
{
apply_op(math_ops::Sqrt, &[x])
}
#[inline]
/// Elementwise pow
pub fn pow(x: &Tensor, a: f32) -> Tensor
{
apply_op(math_ops::Pow { a: a }, &[x])
}
/// Elementwise log
#[inline]
pub fn log(x: &Tensor, a: f32) -> Tensor
{
apply_op(math_ops::Log { a: a }, &[x])
}
/// Elementwise exponential
#[inline]
pub fn exp(x: &Tensor) -> Tensor
{
apply_op(math_ops::Exp, &[x])
}
#[inline]
/// Adds all input tensors inplace
///
/// All the input tensors must have same shapes.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::ones(&[2, 2]);
/// let ref b = ag::ones(&[2, 2]);
/// let ref c = ag::ones(&[2, 2]);
/// let ref d = ag::add_n(&[a, b, c]);
/// assert_eq!(d.eval().shape(), &[2, 2]);
/// assert_eq!(d.eval(), ndarray::arr2(&[[3., 3.], [3., 3.]]).into_dyn());
/// ```
pub fn add_n(xs: &[&Tensor]) -> Tensor
{
apply_op(add_n::AddN, xs)
}
#[inline]
/// Compares two tensors and returns a binary tensor.
///
/// if `a[i] == b[i]` then `return_value[i]` will be 1 else 0
///
/// # Panics
/// When `a's shape` != `b's shape`.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ndarray::arr1(&[1., 2., 3.]));
/// let ref b = ag::constant(ndarray::arr1(&[3., 2., 1.]));
/// let ref c = ag::equals(a, b);
/// assert_eq!(c.eval(), ndarray::arr1(&[0., 1., 0.]).into_dyn());
/// ```
pub fn equals(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(cmp_ops::Equals, &[a, b])
}
#[inline]
/// Takes argmax along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let input_arr = ndarray::arr2(&[[1., 2.], [3., 4.], [6., 5.]]);
/// let answer = ndarray::arr1(&[1., 1., 0.]).into_dyn();
/// let input = ag::constant(input_arr);
/// let result = ag::argmax(&input, 1, false);
/// assert_eq!(result.eval(), answer);
/// ```
pub fn argmax(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ArgMax {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Expands dims.
///
/// Negative axes are acceptable.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ag::ndarray_ext::standard_normal(&[3]));
/// let ref b = ag::expand_dims(a, &[0, 2]);
/// assert_eq!(b.eval().shape(), &[1, 3, 1]);
/// ```
pub fn expand_dims(x: &Tensor, axes: &[isize]) -> Tensor
{
let mut axes = axes.to_vec();
axes.sort();
apply_op(expand_dims::ExpandDims { axes: axes }, &[x])
}
#[inline]
/// Squeezes dims.
///
/// Negative axes are acceptable.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ag::ndarray_ext::standard_normal(&[1, 3, 1]));
/// let ref b = ag::squeeze(a, &[0, 2]);
/// assert_eq!(b.eval().shape(), &[3]);
/// ```
pub fn squeeze(x: &Tensor, axes: &[isize]) -> Tensor
{
let mut axes = axes.to_vec();
axes.sort();
apply_op(squeeze::Squeeze { axes: axes }, &[x])
}
#[inline]
/// Tiles input tensor along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x = ag::constant(ndarray::arr2(&[[2., 2.], [3., 3.]]));
/// let ref y = ag::tile(x, 0, 2);
/// assert_eq!(
/// y.eval(),
/// ndarray::arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.]]).into_dyn()
/// );
/// ```
pub fn tile(x: &Tensor, axis: isize, num: usize) -> Tensor
{
let op = tile::Tile {
axis: axis,
num: num,
};
apply_op(op, &[x])
}
#[inline]
/// Limits all elements so as to be within `[min, max]`
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x = ag::constant(ndarray::arr1(&[2., 4., 6.]));
/// let ref y = ag::clip(x, 3., 5.);
/// assert_eq!(y.eval(), ndarray::arr1(&[3., 4., 5.]).into_dyn());
/// ```
pub fn clip(x: &Tensor, min: f32, max: f32) -> Tensor
{
let op = clip::Clip { min: min, max: max };
apply_op(op, &[x])
}
#[inline]
/// Takes max along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_max(&x, 0, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[3., 4.]).into_dyn());
/// ```
pub fn reduce_max(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceMax {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Takes min along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_min(&x, 0, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[2., 1.]).into_dyn());
/// ```
pub fn reduce_min(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceMin {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Takes mean along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_mean(&x, 1, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[3., 2.]).into_dyn());
/// ```
pub fn reduce_mean(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceMean {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Takes sum along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_sum(&x, 1, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[6., 4.]).into_dyn());
/// ```
pub fn reduce_sum(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceSum {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Reshapes input tensor.
///
/// Only one dim in `shape` can be `-1`.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::zeros(&[3, 2, 2]);
/// let y = ag::reshape(&x, &[3, 4]);
/// assert_eq!(y.eval(), x.eval().into_shape(ndarray::IxDyn(&[3, 4])).unwrap());
/// ```
pub fn reshape(x: &Tensor, shape: &[isize]) -> Tensor
{
let mut minus_one_found = false;
let shape = shape
.iter()
.map(|&len| if len == -1 {
if minus_one_found {
panic!("`shape` has two or more `-1` dim.");
}
minus_one_found = true;
None
} else if len < -1 {
panic!("`shape` contains invalid dim size: {}", len);
} else {
Some(len as usize)
})
.collect::<Vec<_>>();
let op = reshape::Reshape { target_shape: shape };
apply_op(op, &[x])
}
#[inline]
/// Returns 1-ranked tensor (vector)
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref x = ag::zeros(&[3, 2, 2]);
/// assert_eq!(ag::flatten(x).eval().shape(), &[12]);
/// ```
pub fn flatten(x: &Tensor) -> Tensor
{
let op = reshape::Reshape { target_shape: vec![None] };
apply_op(op, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn greater(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::Greater { a: a }, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn greater_equal(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::GreaterEqual { a: a }, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn lesser(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::Lesser { a: a }, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn lesser_equal(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::LesserEqual { a: a }, &[x])
}
#[inline]
/// Reverses axes of the input tensor.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ag::ndarray_ext::zeros(&[2, 3, 4, 5]));
/// let ref b = ag::reverse_axes(a);
/// assert_eq!(b.eval().shape(), &[5, 4, 3, 2]);
/// ```
pub fn reverse_axes(x: &Tensor) -> Tensor
{
apply_op(reverse_axes::ReverseAxes, &[x])
}
#[inline]
/// Elementwise logistic sigmoid function.
pub fn sigmoid(x: &Tensor) -> Tensor
{
apply_op(sigmoid::Sigmoid, &[x])
}
#[inline]
/// Elementwise exponential linear unit function.
///
/// See https://arxiv.org/abs/1511.07289
pub fn elu(x: &Tensor, alpha: f32) -> Tensor
{
apply_op(elu::ELU { alpha: alpha }, &[x])
}
#[inline]
/// Elementwise rectified linear unit function.
pub fn relu(x: &Tensor) -> Tensor
{
apply_op(relu::ReLU, &[x])
}
#[inline]
/// Computes `log(sum(exp(x)))` along specified axis.
pub fn logsumexp(x: &Tensor, axis: isize) -> Tensor
{
let op = logsumexp::LogSumExp { axis: axis };
apply_op(op, &[x])
}
#[inline]
/// Log softmax function.
///
/// `axis` can be negative.
///
/// Computes `softmax(x)` along specified axis and
/// takes logarithm of it.
pub fn log_softmax(x: &Tensor, axis: isize) -> Tensor
{
// TODO: Composing from "node level" LogSumExp.
let op = log_softmax::LogSoftmax { axis: axis };
apply_op(op, &[x])
}
#[inline]
/// Takes softmax along specified axis
///
/// `axis` can be negative.
pub fn softmax(x: &Tensor, axis: isize) -> Tensor
{
let op = softmax::Softmax { axis: axis };
apply_op(op, &[x])
}
#[inline]
/// Computes `binary_cross_entropy(sigmoid(y), t)`.
///
/// This function is better than that combination in that it can prevent
/// underflow of `log(sigmoid)`.
///
/// # Arguments
/// * `y` - Tensor with arbitrary shape
/// * `t` - Tensor with arbitrary shape
///
/// # Panics
/// When y.shape != t.shape.
///
/// # Returns
/// Loss tensor with same shape as inputs's shapes
pub fn sigmoid_cross_entropy(y: &Tensor, t: &Tensor) -> Tensor
{
let op = sigmoid_cross_entropy::SigmoidCrossEntropy;
apply_op(op, &[y, t])
}
#[inline]
/// Computes `categorical_cross_entropy(softmax(y), t)`.
///
/// This function is better than that combination in that it can prevent
/// underflow of `log(softmax)`.
///
/// # Arguments
/// * `y` - Tensor with shape (batch_size, num_classes)
/// * `t` - Tensor with shape (batch_size, num_classes)
///
/// # Returns
/// Loss tensor with shape (batch_size, 1)
pub fn softmax_cross_entropy(y: &Tensor, t: &Tensor) -> Tensor
{
let op = softmax_cross_entropy::SoftmaxCrossEntropy;
apply_op(op, &[y, t])
}
#[inline]
/// A variant of `softmax_cross_entropy`.
///
/// The behavior of this function is same as `softmax_cross_entropy`
/// except that `t` is **not** batch of one-hot distributions but batch of ground truth label ids.
///
/// # Arguments
/// * `y` - Tensor with shape (batch_size, num_classes)
/// * `t` - Tensor with shape (batch_size, 1)
///
/// # Returns
/// Loss tensor with shape (batch_size, 1)
pub fn sparse_softmax_cross_entropy(y: &Tensor, t: &Tensor) -> Tensor
{
let op = sparse_softmax_cross_entropy::SparseSoftmaxCrossEntropy;
apply_op(op, &[y, t])
}
#[inline]
/// Matrix multiplication.
///
/// Both `a` and `b` must be 2-ranked tensors.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[4, 2]);
/// let ref b = ag::zeros(&[2, 3]);
/// let ref c = ag::matmul(a, b);
/// assert_eq!(c.eval().shape(), &[4, 3]);
/// ```
pub fn matmul(a: &Tensor, b: &Tensor) -> Tensor
{
let op = matmul::MatMul {
transpose_a: false,
transpose_b: false,
};
apply_op(op, &[a, b])
}
#[inline]
/// Matrix multiplication.
///
/// Similar specification as `matmul` but, if `transpose_a` is true, `a` is transposed
/// before actual matrix multiplication. It is the same for `transpose_b`.
///
/// The performance is better than explicitly computing like `ag::matmul(ag::transpose)`.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[2, 4]);
/// let ref b = ag::zeros(&[2, 3]);
/// let ref c = ag::matmul_t(a, b, true, false);
/// assert_eq!(c.eval().shape(), &[4, 3]);
/// ```
pub fn matmul_t(a: &Tensor, b: &Tensor, transpose_a: bool, transpose_b: bool) -> Tensor
{
let op = matmul::MatMul {
transpose_a: transpose_a,
transpose_b: transpose_b,
};
apply_op(op, &[a, b])
}
#[inline]
/// Computes tensor dot product (tensor contraction) along specified axes.
///
/// Negative axes are acceptable.
///
/// # Panics
/// When `axes[0].len()` != `axes[1].len()`
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[3, 4, 5]);
/// let ref b = ag::zeros(&[4, 3, 2]);
/// let ref c = ag::tensordot(a, b, &[3, 4, 5], &[4, 3, 2], [&[1, 0], &[0, 1]]);
/// assert_eq!(c.eval().shape(), &[5, 2]);
///
/// // another example (simple matmul broadcast)
/// let ref a = ag::zeros(&[2, 3, 4]);
/// let ref b = ag::zeros(&[4, 2]);
/// let ref c = ag::tensordot(a, b, &[2, 3, 4], &[4, 2], [&[2], &[0]]);
/// assert_eq!(c.eval().shape(), &[2, 3, 2]);
/// ```
///
/// For detailed description,
/// see see https://docs.scipy.org/doc/numpy/reference/generated/numpy.tensordot.html.
pub fn tensordot(
a: &Tensor,
b: &Tensor,
a_shape: &[usize],
b_shape: &[usize],
axes: [&[isize]; 2],
) -> Tensor
{
assert_eq!(axes[0].len(), axes[1].len());
fn preprocess(x: &Tensor, x_shape: &[usize], axes: &[isize], flip: bool)
-> (Tensor, Vec<isize>)
{
let axes = axes.iter()
.map(|&i| if i >= 0 {
i as usize
} else {
(i + x_shape.len() as isize) as usize
})
.collect::<Vec<_>>();
let mut free: Vec<usize> = vec![];
for i in 0..x_shape.len() {
if !axes.contains(&i) {
free.push(i);
}
}
let free_dims = free.clone()
.into_iter()
.map(|i| x_shape[i] as isize)
.collect::<Vec<_>>();
let prod_free: isize = free_dims.clone().into_iter().product();
let prod_axes: usize = axes.iter().cloned().map(|a| x_shape[a]).product();
let perm = if flip {
axes.into_iter().chain(free).collect::<Vec<_>>()
} else {
free.into_iter().chain(axes).collect::<Vec<_>>()
};
let new_shape = if flip {
[prod_axes as isize, prod_free]
} else {
[prod_free, prod_axes as isize]
};
let reshaped = reshape(&transpose(x, perm.as_slice()), &new_shape);
(reshaped, free_dims)
}
let (a_reshaped, a_free_dims) = preprocess(a, a_shape, axes[0], false);
let (b_reshaped, b_free_dims) = preprocess(b, b_shape, axes[1], true);
let ref dot = matmul(&a_reshaped, &b_reshaped);
let final_shape = a_free_dims
.into_iter()
.chain(b_free_dims.into_iter())
.collect::<Vec<isize>>();
reshape(dot, final_shape.as_slice())
}
#[inline]
/// Batched matrix multiplication.
///
/// Performs matrix multiplication between corresponding dimensions of `a` and `b`.
/// So the rank of `a` and `b` must be equals.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[2, 3, 4, 2]);
/// let ref b = ag::zeros(&[2, 3, 2, 3]);
/// let ref c = ag::batch_matmul(a, b);
/// assert_eq!(c.eval().shape(), &[2, 3, 4, 3]);
/// ```
///
/// For detailed description, see https://www.tensorflow.org/api_docs/python/tf/matmul
pub fn batch_matmul(a: &Tensor, b: &Tensor) -> Tensor
{
let op = batch_matmul::BatchMatMul {
transpose_a: false,
transpose_b: false,
};
apply_op(op, &[a, b])
}
#[inline]
/// Permutes dimensions.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[1, 2, 3, 4, 5]);
/// let ref b = ag::transpose(a, &[4, 2, 3, 0, 1]);
/// assert_eq!(b.eval().shape(), &[5, 3, 4, 1, 2]);
/// ```
pub fn transpose(x: &Tensor, perm: &[usize]) -> Tensor
{
let src_dst = perm.iter().cloned().zip(0..perm.len()).collect::<Vec<_>>();
let op = transpose::Transpose { src_dst_sorted: src_dst };
apply_op(op, &[x])
}
#[inline]
/// Slices input tensor with indices.
///
/// # Arguments
/// * `x` - Tensor with arbitrary shape.
/// * `starts` - Start indices for each dimensions
/// * `ends` - End indices for each dimensions. `-1` representing the last index is acceptable.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[4, 4]);
/// let ref b = ag::slice(a, &[0, 0], &[-1, 2]); // numpy equivalent is a[:, 0:2]
/// assert_eq!(b.eval().shape(), &[4, 2]);
/// ```
pub fn slice(x: &Tensor, starts: &[isize], ends: &[isize]) -> Tensor
{
assert_eq!(starts.len(), ends.len());
let starts_ends = starts.iter().zip(ends.iter());
let indices = starts_ends
.map(|(s, e)| {
ndarray::Si(*s, if *e == -1 { None } else { Some(*e) }, 1)
})
.collect::<Vec<ndarray::Si>>();
let op = slice::Slice { indices: indices.into_boxed_slice() };
apply_op(op, &[x])
}
#[inline]
/// Concatenates (stacks) input tensors along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[3, 2]);
/// let ref b = ag::zeros(&[3, 2]);
/// let ref c = ag::zeros(&[3, 2]);
/// let ref d = ag::concat(&[a, b, c], 0);
/// assert_eq!(d.eval().shape(), &[9, 2]);
/// ```
pub fn concat(tensors: &[&Tensor], axis: isize) -> Tensor
{
apply_op(concat::Concat { axis: axis }, tensors)
}
#[inline]
/// Gathers slices.
///
/// Along `axis`, slices subviews from `param` with `indices`, and then gathers those.
/// For example, this can be used for embedding vector lookup.
/// `axis` can be negative.
///
/// See also https://www.tensorflow.org/api_docs/python/tf/gather.
///
/// # Arguments
/// * `param` - Target of slicing.
/// * `indices` - Index tensor with which slices `param`. This cab be arbitrary shape.
/// * `axis` - Slices sub tensors along this axis.
///
/// # Returns
/// Tensor with shape `param.shape[..axis] + indices.shape + param.shape[axis+1..]`
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref param = ag::constant(ag::ndarray_ext::zeros(&[5, 4, 8, 2]));
/// let ref indices = ag::constant(ndarray::arr2(&[[5., 4., 3.], [2., 1., 0.]]));
/// let y = ag::gather(param, indices, 2);
/// assert_eq!(y.eval().shape(), &[5, 4, 2, 3, 2])
/// ```
pub fn gather(param: &Tensor, indices: &Tensor, axis: isize) -> Tensor
{
let op = gather::Gather { axis: axis };
apply_op(op, &[indices, param])
}
#[inline]
/// Applies recurrent net unit to the input.
///
/// This func processes a time step in the batch of sequences in parallel.
///
/// # Arguments
/// * `x` - Input tensor for this step
/// * `rnn` - RNN struct
/// * `with_new_state` - If true, calls `rnn.reset_state()` before running a step
///
/// # Returns
/// Output of `rnn.step()`
///
/// For the usage, see `lstm_lm()` in `tests/test_tensor_ops_grad.rs` and `nn_impl::rnn`
pub fn rnn_step<T>(x: &Tensor, rnn: &mut T, with_new_state: bool) -> Tensor
where
T: ::nn_impl::rnn::RNN,
{
if with_new_state {
rnn.reset_state();
}
rnn.step(x)
}
/// Returns a constant tensor
pub fn zeros(shape: &[usize]) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Constant".to_string() }),
inputs: vec![],
param: Some(::ndarray_ext::zeros(shape)),
rank: 0,
})))
}
/// Returns a constant tensor
pub fn ones(shape: &[usize]) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Constant".to_string() }),
inputs: vec![],
param: Some(::ndarray_ext::ones(shape)),
rank: 0,
})))
}
/// Creates a constant tensor.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let arr = ndarray::arr1(&[0., 0., 0.]).into_dyn();
/// assert_eq!(arr.clone(), ag::constant(arr).eval())
/// ```
#[inline]
pub fn constant<T: ndarray::Dimension>(array: ndarray::Array<f32, T>) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Constant".to_string() }),
inputs: vec![],
param: Some(array.into_dyn()),
rank: 0,
})))
}
/// Creates a constant tensor.
#[inline]
pub fn scalar(a: f32) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Scalar".to_string() }),
inputs: vec![],
param: Some(NdArray::from_elem(ndarray::IxDyn(&[1]), a)),
rank: 0,
})))
}
/// Creates a constant tensor.
#[inline]
pub fn range(start: usize, end: usize, step: usize) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Scalar".to_string() }),
inputs: vec![],
param: Some(
ndarray::Array1::range(start as f32, end as f32, step as f32).into_dyn(),
),
rank: 0,
})))
}
/// Outputs values sampled from the normal distribution.
pub fn random_normal(shape: &[usize], mean: f64, stddev: f64) -> Tensor
{
let op = random_ops::RandomNormal {
shape: shape.to_vec(),
mean: mean,
stddev: stddev,
};
apply_op(op, &[])
}
/// Outputs values sampled from the uniform distribution.
pub fn random_uniform(shape: &[usize], min: f64, max: f64) -> Tensor
{
let op = random_ops::RandomUniform {
shape: shape.to_vec(),
min: min,
max: max,
};
apply_op(op, &[])
}
/// Outputs values sampled from the standard normal distribution.
pub fn standard_normal(shape: &[usize]) -> Tensor
{
let op = random_ops::StandardNormal { shape: shape.to_vec() };
apply_op(op, &[])
}
/// Outputs values sampled from the standard uniform distribution.
pub fn standard_uniform(shape: &[usize]) -> Tensor
{
let op = random_ops::StandardUniform { shape: shape.to_vec() };
apply_op(op, &[])
}
/// Outputs values sampled from the bernoulli distribution.
pub fn bernoulli(shape: &[usize], p: f64) -> Tensor
{
let op = random_ops::Bernoulli {
shape: shape.to_vec(),
p: p,
};
apply_op(op, &[])
}
/// Outputs values sampled from the exponential distribution.
pub fn random_exp(shape: &[usize], lambda: f64) -> Tensor
{
let op = random_ops::Exponential {
shape: shape.to_vec(),
lambda: lambda,
};
apply_op(op, &[])
}
/// Outputs values sampled from the gamma distribution.
pub fn gamma(shape: &[usize], shape_param: f64, scale: f64) -> Tensor
{
let op = random_ops::Gamma {
shape: shape.to_vec(),
shape_param: shape_param,
scale: scale,
};
apply_op(op, &[])
}
/// Outputs values sampled from the log-normal distribution.
pub fn log_normal(shape: &[usize], mean: f64, stddev: f64) -> Tensor
{
let op = random_ops::LogNormal {
shape: shape.to_vec(),
mean: mean,
stddev: stddev,
};
apply_op(op, &[])
}
Implement jacobians
extern crate ndarray;
use ndarray_ext::NdArray;
use std::cell::RefCell;
use std::rc::Rc;
use tensor::{RawTensor, Tensor};
mod dummy_op;
mod index;
mod random_ops;
mod clip;
mod add_n;
mod logsumexp;
mod log_softmax;
mod identity;
mod cmp_ops;
mod math_ops;
mod concat;
mod tile;
mod binary_ops;
mod softmax;
mod sigmoid;
mod elu;
mod relu;
mod slice;
mod sigmoid_cross_entropy;
mod softmax_cross_entropy;
mod sparse_softmax_cross_entropy;
mod gather;
mod matmul;
mod batch_matmul;
mod reverse_axes;
mod transpose;
mod reshape;
mod reduction_ops;
mod squeeze;
mod expand_dims;
#[doc(hidden)]
/// Represents a operation node in a computation graph.
/// `Tensor` wraps trait-object of this.
pub trait Op {
/// Name of this op
fn name(&self) -> &str;
/// Actually runs this op.
/// num of inputs : N,
/// num of outputs: 1
fn compute(&mut self, xs: &[&NdArray], train: bool) -> NdArray;
/// Returns symbolic gradient for each input node by use of output gradient etc.
///
/// # Arguments
/// * `gy` - Symbolic representation of the gradient of `compute`'s return value
/// * `inputs` - Symbolic representation of `compute::xs`
/// * `output` - Symbolic representation of `compute`'s return value
///
/// NOTE:
/// The number of return values must be same as `inputs.len()`.
fn grad(&self, gy: &Tensor, inputs: &[&Tensor], output: &Tensor) -> Vec<Option<Tensor>>;
}
impl Tensor {
/// Gets a symbolic float32 element from a tensor.
///
/// `idx` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::variable(ndarray::arr2(&[[2., 3.], [4., 5.]]));
/// let ref b = a.get(2);
/// assert_eq!(b.eval()[0], 4.);
/// ```
pub fn get(&self, idx: isize) -> Tensor
{
apply_op(index::IndexOp { index: idx }, &[self])
}
}
#[doc(hidden)]
#[inline]
/// Helper function to generate a symbolic tensor
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[4, 2]);
/// let ref v = ag::zeros(&[2, 3]);
/// let ref b = ag::zeros(&[4, 3]);
/// let ref z = ag::matmul(a, v) + b;
/// let mut vars = [a, v, b, z];
/// // `sort_by_key` don't reverse the order of `a` and `v`
/// vars.sort_by_key(|a| a.borrow().rank);
/// assert!(vars == [a, v, b, z])
/// ```
fn apply_op<T: Op + 'static>(op: T, inputs: &[&Tensor]) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(op),
inputs: inputs.iter().map(|a| (*a).clone()).collect::<Vec<Tensor>>(),
param: None,
rank: inputs
.iter()
.map(|a| a.borrow().rank)
.max()
.map(|a| a + 1)
.unwrap_or(0),
})))
}
// ---------------------------------------
// -- Ops to manipulate `Tensor` object --
// ---------------------------------------
/// Creates a placeholder tensor.
///
/// The placeholder tensor is a dynamic input node to the computation graph,
/// which can be filled on evaluation time.
/// To fill the placeholders, use `autograd::Feed`.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x: ag::Tensor = ag::placeholder();
/// let ref y: ag::Tensor = 3 * x;
///
/// // Fills placeholder `x`.
/// let feed = ag::Feed::new().add(x, ndarray::arr1(&[2.]));
/// assert_eq!(6., y.eval_with_input(feed)[0]);
/// ```
#[inline]
pub fn placeholder() -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Placeholder".to_string() }),
inputs: vec![],
param: None,
rank: 0,
})))
}
/// Creates a shared variable tensor from rust-ndarray's array object.
///
/// The shared variable behaves like any other tensors, except that
/// it can be optimized with gradient descent methods
/// implemented in `autograd::sgd`.
/// For the usages, see https://github.com/perrier1034/rust-autograd/tree/master/examples
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x: ag::Tensor = ag::variable(ndarray::arr1(&[2.]));
/// let ref y: ag::Tensor = 3 * x;
///
/// assert_eq!(6., y.eval()[0]);
/// ```
#[inline]
pub fn variable<T: ndarray::Dimension>(array: ndarray::Array<f32, T>) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Variable".to_string() }),
inputs: vec![],
param: Some(array.into_dyn()),
rank: 0,
})))
}
#[inline]
/// Returns gradient tensors wrt variables.
///
/// # Arguments
/// * `objective` - Target of differentiation.
/// * `variables` - Variable tensors with which differentiate `objective`.
/// * `initial_grad` - This is required **if objective is not a scalar**. In most cases,
/// this is initialized with 1s.
///
/// # Returns
/// Symbolic gradient tensors corresponding to `variables` in the same order as `variables`
///
/// # Example1
/// Partial derivatives of `z = 2x^2 + 3y + 1`.
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x = ag::placeholder();
/// let ref y = ag::variable(ndarray::arr1(&[0.]));
/// let ref z = 2*x*x + 3*y + 1;
///
/// // dz/dy
/// let ref g1 = ag::gradients(z, &[y], None)[0];
/// // dz/dx
/// let ref g2 = ag::gradients(z, &[x], None)[0];
///
/// // ddz/dx (differentiates `z` again)
/// let ref gg = ag::gradients(g2, &[x], None)[0];
///
/// // evaluation of symbolic gradients
/// assert_eq!(3., g1.eval()[0]);
/// assert_eq!(4., gg.eval()[0]);
///
/// // dz/dx requires to fill the placeholder `x`
/// let feed = ag::Feed::new().add(x, ndarray::arr1(&[2.]));
/// assert_eq!(8., g2.eval_with_input(feed)[0]);
///
/// ```
///
/// # Example2
/// The case where objective is not a scalar
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::variable(ag::ndarray_ext::zeros(&[4, 2]));
/// let ref b = ag::zeros(&[2, 3]);
/// let ref c = ag::matmul(a, b);
/// let ref g = ag::gradients(c, &[a], Some(ag::ndarray_ext::ones(&[4, 2])))[0];
/// ```
pub fn gradients(
objective: &Tensor,
variables: &[&Tensor],
initial_grad: Option<&Tensor>,
) -> Vec<Tensor>
{
::topology::symbolic_gradients(objective, variables, initial_grad)
}
#[inline]
/// Computes jacobians for variables.
///
/// # Arguments
/// * `objective` - Target of differentiation.
/// * `variables` - Variable tensors with which differentiate `objective`.
/// * `objective_len` - (flattened) Length of `objective`
///
/// # Returns
/// Jacobians for each variable. Each one is matrix of shape `(objective_len, variable size)`.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::variable(ag::ndarray_ext::standard_normal(&[4, 2]));
/// let ref b = ag::variable(ag::ndarray_ext::standard_normal(&[2, 3]));
/// let ref c = ag::matmul(a, b);
/// let ref j = ag::jacobians(c, &[a, b], 4*3);
///
/// assert_eq!(j[0].eval().shape(), &[4*3, 4*2]);
/// assert_eq!(j[1].eval().shape(), &[4*3, 2*3]);
/// ```
pub fn jacobians(objective: &Tensor, variables: &[&Tensor], objective_len: usize) -> Vec<Tensor>
{
let vec_vec = (0..objective_len as isize)
.map(|i| {
// For each scalar objective, computes gradients for all variables
::topology::symbolic_gradients(&objective.get(i), variables, None)
})
.collect::<Vec<Vec<_>>>();
// post process gradients
(0..variables.len())
.map(|i| {
// jac is matrix
let jac = (0..objective_len)
.map(|j| expand_dims(&flatten(&vec_vec[j][i]), &[0]))
.collect::<Vec<_>>();
// (objective_len, variable size)
concat(jac.iter().map(|a| a).collect::<Vec<_>>().as_slice(), 0)
})
.collect::<Vec<_>>()
}
#[inline]
/// Elementwise sine
pub fn sin(x: &Tensor) -> Tensor
{
apply_op(math_ops::Sin, &[x])
}
#[inline]
/// Elementwise cosine
pub fn cos(x: &Tensor) -> Tensor
{
apply_op(math_ops::Cos, &[x])
}
#[inline]
/// Elementwise tangent
pub fn tan(x: &Tensor) -> Tensor
{
apply_op(math_ops::Tan, &[x])
}
#[inline]
/// Elementwise arcsin
pub fn asin(x: &Tensor) -> Tensor
{
apply_op(math_ops::Asin, &[x])
}
#[inline]
/// Elementwise arccos
pub fn acos(x: &Tensor) -> Tensor
{
apply_op(math_ops::Acos, &[x])
}
#[inline]
/// Elementwise arctan
pub fn atan(x: &Tensor) -> Tensor
{
apply_op(math_ops::Atan, &[x])
}
#[inline]
/// Elementwise hyperbolic sine
pub fn sinh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Sinh, &[x])
}
#[inline]
/// Elementwise hyperbolic cosine
pub fn cosh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Cosh, &[x])
}
#[inline]
/// Elementwise hyperbolic tangent
pub fn tanh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Tanh, &[x])
}
#[inline]
/// Elementwise hyperbolic arcsin
pub fn asinh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Asinh, &[x])
}
#[inline]
/// Elementwise hyperbolic arccos
pub fn acosh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Acosh, &[x])
}
#[inline]
/// Elementwise hyperbolic arctan
pub fn atanh(x: &Tensor) -> Tensor
{
apply_op(math_ops::Atanh, &[x])
}
#[inline]
/// Identity function
pub fn identity(a: &Tensor) -> Tensor
{
apply_op(identity::Identity, &[a])
}
#[inline]
/// Adds two tensors
pub fn add(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseAdd, &[a, b])
}
#[inline]
/// Subtracts `a` from `b`
pub fn sub(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseSub, &[a, b])
}
#[inline]
/// Multiplies two tensors
pub fn mul(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseMul, &[a, b])
}
#[inline]
/// Divides `a` with `b`
pub fn div(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(binary_ops::ElementwiseDiv, &[a, b])
}
#[inline]
/// Elementwise sqrt
pub fn sqrt(x: &Tensor) -> Tensor
{
apply_op(math_ops::Sqrt, &[x])
}
#[inline]
/// Elementwise pow
pub fn pow(x: &Tensor, a: f32) -> Tensor
{
apply_op(math_ops::Pow { a: a }, &[x])
}
/// Elementwise log
#[inline]
pub fn log(x: &Tensor, a: f32) -> Tensor
{
apply_op(math_ops::Log { a: a }, &[x])
}
/// Elementwise exponential
#[inline]
pub fn exp(x: &Tensor) -> Tensor
{
apply_op(math_ops::Exp, &[x])
}
#[inline]
/// Adds all input tensors inplace
///
/// All the input tensors must have same shapes.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::ones(&[2, 2]);
/// let ref b = ag::ones(&[2, 2]);
/// let ref c = ag::ones(&[2, 2]);
/// let ref d = ag::add_n(&[a, b, c]);
/// assert_eq!(d.eval().shape(), &[2, 2]);
/// assert_eq!(d.eval(), ndarray::arr2(&[[3., 3.], [3., 3.]]).into_dyn());
/// ```
pub fn add_n(xs: &[&Tensor]) -> Tensor
{
apply_op(add_n::AddN, xs)
}
#[inline]
/// Compares two tensors and returns a binary tensor.
///
/// if `a[i] == b[i]` then `return_value[i]` will be 1 else 0
///
/// # Panics
/// When `a's shape` != `b's shape`.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ndarray::arr1(&[1., 2., 3.]));
/// let ref b = ag::constant(ndarray::arr1(&[3., 2., 1.]));
/// let ref c = ag::equals(a, b);
/// assert_eq!(c.eval(), ndarray::arr1(&[0., 1., 0.]).into_dyn());
/// ```
pub fn equals(a: &Tensor, b: &Tensor) -> Tensor
{
apply_op(cmp_ops::Equals, &[a, b])
}
#[inline]
/// Takes argmax along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let input_arr = ndarray::arr2(&[[1., 2.], [3., 4.], [6., 5.]]);
/// let answer = ndarray::arr1(&[1., 1., 0.]).into_dyn();
/// let input = ag::constant(input_arr);
/// let result = ag::argmax(&input, 1, false);
/// assert_eq!(result.eval(), answer);
/// ```
pub fn argmax(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ArgMax {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Expands dims.
///
/// Negative axes are acceptable.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ag::ndarray_ext::standard_normal(&[3]));
/// let ref b = ag::expand_dims(a, &[0, 2]);
/// assert_eq!(b.eval().shape(), &[1, 3, 1]);
/// ```
pub fn expand_dims(x: &Tensor, axes: &[isize]) -> Tensor
{
let mut axes = axes.to_vec();
axes.sort();
apply_op(expand_dims::ExpandDims { axes: axes }, &[x])
}
#[inline]
/// Squeezes dims.
///
/// Negative axes are acceptable.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ag::ndarray_ext::standard_normal(&[1, 3, 1]));
/// let ref b = ag::squeeze(a, &[0, 2]);
/// assert_eq!(b.eval().shape(), &[3]);
/// ```
pub fn squeeze(x: &Tensor, axes: &[isize]) -> Tensor
{
let mut axes = axes.to_vec();
axes.sort();
apply_op(squeeze::Squeeze { axes: axes }, &[x])
}
#[inline]
/// Tiles input tensor along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x = ag::constant(ndarray::arr2(&[[2., 2.], [3., 3.]]));
/// let ref y = ag::tile(x, 0, 2);
/// assert_eq!(
/// y.eval(),
/// ndarray::arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.]]).into_dyn()
/// );
/// ```
pub fn tile(x: &Tensor, axis: isize, num: usize) -> Tensor
{
let op = tile::Tile {
axis: axis,
num: num,
};
apply_op(op, &[x])
}
#[inline]
/// Limits all elements so as to be within `[min, max]`
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref x = ag::constant(ndarray::arr1(&[2., 4., 6.]));
/// let ref y = ag::clip(x, 3., 5.);
/// assert_eq!(y.eval(), ndarray::arr1(&[3., 4., 5.]).into_dyn());
/// ```
pub fn clip(x: &Tensor, min: f32, max: f32) -> Tensor
{
let op = clip::Clip { min: min, max: max };
apply_op(op, &[x])
}
#[inline]
/// Takes max along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_max(&x, 0, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[3., 4.]).into_dyn());
/// ```
pub fn reduce_max(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceMax {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Takes min along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_min(&x, 0, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[2., 1.]).into_dyn());
/// ```
pub fn reduce_min(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceMin {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Takes mean along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_mean(&x, 1, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[3., 2.]).into_dyn());
/// ```
pub fn reduce_mean(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceMean {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Takes sum along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::constant(ndarray::arr2(&[[2., 4.], [3., 1.]]));
/// let y = ag::reduce_sum(&x, 1, false);
/// assert_eq!(y.eval(), ndarray::arr1(&[6., 4.]).into_dyn());
/// ```
pub fn reduce_sum(x: &Tensor, axis: isize, keep_dim: bool) -> Tensor
{
let op = reduction_ops::ReduceSum {
axis: axis,
keep_dim: keep_dim,
};
apply_op(op, &[x])
}
#[inline]
/// Reshapes input tensor.
///
/// Only one dim in `shape` can be `-1`.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let x = ag::zeros(&[3, 2, 2]);
/// let y = ag::reshape(&x, &[3, 4]);
/// assert_eq!(y.eval(), x.eval().into_shape(ndarray::IxDyn(&[3, 4])).unwrap());
/// ```
pub fn reshape(x: &Tensor, shape: &[isize]) -> Tensor
{
let mut minus_one_found = false;
let shape = shape
.iter()
.map(|&len| if len == -1 {
if minus_one_found {
panic!("`shape` has two or more `-1` dim.");
}
minus_one_found = true;
None
} else if len < -1 {
panic!("`shape` contains invalid dim size: {}", len);
} else {
Some(len as usize)
})
.collect::<Vec<_>>();
let op = reshape::Reshape { target_shape: shape };
apply_op(op, &[x])
}
#[inline]
/// Returns 1-ranked tensor (vector)
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref x = ag::zeros(&[3, 2, 2]);
/// assert_eq!(ag::flatten(x).eval().shape(), &[12]);
/// ```
pub fn flatten(x: &Tensor) -> Tensor
{
let op = reshape::Reshape { target_shape: vec![None] };
apply_op(op, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn greater(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::Greater { a: a }, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn greater_equal(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::GreaterEqual { a: a }, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn lesser(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::Lesser { a: a }, &[x])
}
#[inline]
/// Returns binary tensor.
pub fn lesser_equal(x: &Tensor, a: f32) -> Tensor
{
apply_op(cmp_ops::LesserEqual { a: a }, &[x])
}
#[inline]
/// Reverses axes of the input tensor.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref a = ag::constant(ag::ndarray_ext::zeros(&[2, 3, 4, 5]));
/// let ref b = ag::reverse_axes(a);
/// assert_eq!(b.eval().shape(), &[5, 4, 3, 2]);
/// ```
pub fn reverse_axes(x: &Tensor) -> Tensor
{
apply_op(reverse_axes::ReverseAxes, &[x])
}
#[inline]
/// Elementwise logistic sigmoid function.
pub fn sigmoid(x: &Tensor) -> Tensor
{
apply_op(sigmoid::Sigmoid, &[x])
}
#[inline]
/// Elementwise exponential linear unit function.
///
/// See https://arxiv.org/abs/1511.07289
pub fn elu(x: &Tensor, alpha: f32) -> Tensor
{
apply_op(elu::ELU { alpha: alpha }, &[x])
}
#[inline]
/// Elementwise rectified linear unit function.
pub fn relu(x: &Tensor) -> Tensor
{
apply_op(relu::ReLU, &[x])
}
#[inline]
/// Computes `log(sum(exp(x)))` along specified axis.
pub fn logsumexp(x: &Tensor, axis: isize) -> Tensor
{
let op = logsumexp::LogSumExp { axis: axis };
apply_op(op, &[x])
}
#[inline]
/// Log softmax function.
///
/// `axis` can be negative.
///
/// Computes `softmax(x)` along specified axis and
/// takes logarithm of it.
pub fn log_softmax(x: &Tensor, axis: isize) -> Tensor
{
// TODO: Composing from "node level" LogSumExp.
let op = log_softmax::LogSoftmax { axis: axis };
apply_op(op, &[x])
}
#[inline]
/// Takes softmax along specified axis
///
/// `axis` can be negative.
pub fn softmax(x: &Tensor, axis: isize) -> Tensor
{
let op = softmax::Softmax { axis: axis };
apply_op(op, &[x])
}
#[inline]
/// Computes `binary_cross_entropy(sigmoid(y), t)`.
///
/// This function is better than that combination in that it can prevent
/// underflow of `log(sigmoid)`.
///
/// # Arguments
/// * `y` - Tensor with arbitrary shape
/// * `t` - Tensor with arbitrary shape
///
/// # Panics
/// When y.shape != t.shape.
///
/// # Returns
/// Loss tensor with same shape as inputs's shapes
pub fn sigmoid_cross_entropy(y: &Tensor, t: &Tensor) -> Tensor
{
let op = sigmoid_cross_entropy::SigmoidCrossEntropy;
apply_op(op, &[y, t])
}
#[inline]
/// Computes `categorical_cross_entropy(softmax(y), t)`.
///
/// This function is better than that combination in that it can prevent
/// underflow of `log(softmax)`.
///
/// # Arguments
/// * `y` - Tensor with shape (batch_size, num_classes)
/// * `t` - Tensor with shape (batch_size, num_classes)
///
/// # Returns
/// Loss tensor with shape (batch_size, 1)
pub fn softmax_cross_entropy(y: &Tensor, t: &Tensor) -> Tensor
{
let op = softmax_cross_entropy::SoftmaxCrossEntropy;
apply_op(op, &[y, t])
}
#[inline]
/// A variant of `softmax_cross_entropy`.
///
/// The behavior of this function is same as `softmax_cross_entropy`
/// except that `t` is **not** batch of one-hot distributions but batch of ground truth label ids.
///
/// # Arguments
/// * `y` - Tensor with shape (batch_size, num_classes)
/// * `t` - Tensor with shape (batch_size, 1)
///
/// # Returns
/// Loss tensor with shape (batch_size, 1)
pub fn sparse_softmax_cross_entropy(y: &Tensor, t: &Tensor) -> Tensor
{
let op = sparse_softmax_cross_entropy::SparseSoftmaxCrossEntropy;
apply_op(op, &[y, t])
}
#[inline]
/// Matrix multiplication.
///
/// Both `a` and `b` must be 2-ranked tensors.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[4, 2]);
/// let ref b = ag::zeros(&[2, 3]);
/// let ref c = ag::matmul(a, b);
/// assert_eq!(c.eval().shape(), &[4, 3]);
/// ```
pub fn matmul(a: &Tensor, b: &Tensor) -> Tensor
{
let op = matmul::MatMul {
transpose_a: false,
transpose_b: false,
};
apply_op(op, &[a, b])
}
#[inline]
/// Matrix multiplication.
///
/// Similar specification as `matmul` but, if `transpose_a` is true, `a` is transposed
/// before actual matrix multiplication. It is the same for `transpose_b`.
///
/// The performance is better than explicitly computing like `ag::matmul(ag::transpose)`.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[2, 4]);
/// let ref b = ag::zeros(&[2, 3]);
/// let ref c = ag::matmul_t(a, b, true, false);
/// assert_eq!(c.eval().shape(), &[4, 3]);
/// ```
pub fn matmul_t(a: &Tensor, b: &Tensor, transpose_a: bool, transpose_b: bool) -> Tensor
{
let op = matmul::MatMul {
transpose_a: transpose_a,
transpose_b: transpose_b,
};
apply_op(op, &[a, b])
}
#[inline]
/// Computes tensor dot product (tensor contraction) along specified axes.
///
/// Negative axes are acceptable.
///
/// # Panics
/// When `axes[0].len()` != `axes[1].len()`
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[3, 4, 5]);
/// let ref b = ag::zeros(&[4, 3, 2]);
/// let ref c = ag::tensordot(a, b, &[3, 4, 5], &[4, 3, 2], [&[1, 0], &[0, 1]]);
/// assert_eq!(c.eval().shape(), &[5, 2]);
///
/// // another example (simple matmul broadcast)
/// let ref a = ag::zeros(&[2, 3, 4]);
/// let ref b = ag::zeros(&[4, 2]);
/// let ref c = ag::tensordot(a, b, &[2, 3, 4], &[4, 2], [&[2], &[0]]);
/// assert_eq!(c.eval().shape(), &[2, 3, 2]);
/// ```
///
/// For detailed description,
/// see see https://docs.scipy.org/doc/numpy/reference/generated/numpy.tensordot.html.
pub fn tensordot(
a: &Tensor,
b: &Tensor,
a_shape: &[usize],
b_shape: &[usize],
axes: [&[isize]; 2],
) -> Tensor
{
assert_eq!(axes[0].len(), axes[1].len());
fn preprocess(x: &Tensor, x_shape: &[usize], axes: &[isize], flip: bool)
-> (Tensor, Vec<isize>)
{
let axes = axes.iter()
.map(|&i| if i >= 0 {
i as usize
} else {
(i + x_shape.len() as isize) as usize
})
.collect::<Vec<_>>();
let mut free: Vec<usize> = vec![];
for i in 0..x_shape.len() {
if !axes.contains(&i) {
free.push(i);
}
}
let free_dims = free.clone()
.into_iter()
.map(|i| x_shape[i] as isize)
.collect::<Vec<_>>();
let prod_free: isize = free_dims.clone().into_iter().product();
let prod_axes: usize = axes.iter().cloned().map(|a| x_shape[a]).product();
let perm = if flip {
axes.into_iter().chain(free).collect::<Vec<_>>()
} else {
free.into_iter().chain(axes).collect::<Vec<_>>()
};
let new_shape = if flip {
[prod_axes as isize, prod_free]
} else {
[prod_free, prod_axes as isize]
};
let reshaped = reshape(&transpose(x, perm.as_slice()), &new_shape);
(reshaped, free_dims)
}
let (a_reshaped, a_free_dims) = preprocess(a, a_shape, axes[0], false);
let (b_reshaped, b_free_dims) = preprocess(b, b_shape, axes[1], true);
let ref dot = matmul(&a_reshaped, &b_reshaped);
let final_shape = a_free_dims
.into_iter()
.chain(b_free_dims.into_iter())
.collect::<Vec<isize>>();
reshape(dot, final_shape.as_slice())
}
#[inline]
/// Batched matrix multiplication.
///
/// Performs matrix multiplication between corresponding dimensions of `a` and `b`.
/// So the rank of `a` and `b` must be equals.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[2, 3, 4, 2]);
/// let ref b = ag::zeros(&[2, 3, 2, 3]);
/// let ref c = ag::batch_matmul(a, b);
/// assert_eq!(c.eval().shape(), &[2, 3, 4, 3]);
/// ```
///
/// For detailed description, see https://www.tensorflow.org/api_docs/python/tf/matmul
pub fn batch_matmul(a: &Tensor, b: &Tensor) -> Tensor
{
let op = batch_matmul::BatchMatMul {
transpose_a: false,
transpose_b: false,
};
apply_op(op, &[a, b])
}
#[inline]
/// Permutes dimensions.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[1, 2, 3, 4, 5]);
/// let ref b = ag::transpose(a, &[4, 2, 3, 0, 1]);
/// assert_eq!(b.eval().shape(), &[5, 3, 4, 1, 2]);
/// ```
pub fn transpose(x: &Tensor, perm: &[usize]) -> Tensor
{
let src_dst = perm.iter().cloned().zip(0..perm.len()).collect::<Vec<_>>();
let op = transpose::Transpose { src_dst_sorted: src_dst };
apply_op(op, &[x])
}
#[inline]
/// Slices input tensor with indices.
///
/// # Arguments
/// * `x` - Tensor with arbitrary shape.
/// * `starts` - Start indices for each dimensions
/// * `ends` - End indices for each dimensions. `-1` representing the last index is acceptable.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[4, 4]);
/// let ref b = ag::slice(a, &[0, 0], &[-1, 2]); // numpy equivalent is a[:, 0:2]
/// assert_eq!(b.eval().shape(), &[4, 2]);
/// ```
pub fn slice(x: &Tensor, starts: &[isize], ends: &[isize]) -> Tensor
{
assert_eq!(starts.len(), ends.len());
let starts_ends = starts.iter().zip(ends.iter());
let indices = starts_ends
.map(|(s, e)| {
ndarray::Si(*s, if *e == -1 { None } else { Some(*e) }, 1)
})
.collect::<Vec<ndarray::Si>>();
let op = slice::Slice { indices: indices.into_boxed_slice() };
apply_op(op, &[x])
}
#[inline]
/// Concatenates (stacks) input tensors along specified axis.
///
/// `axis` can be negative.
///
/// # Examples
///
/// ```
/// extern crate autograd as ag;
///
/// let ref a = ag::zeros(&[3, 2]);
/// let ref b = ag::zeros(&[3, 2]);
/// let ref c = ag::zeros(&[3, 2]);
/// let ref d = ag::concat(&[a, b, c], 0);
/// assert_eq!(d.eval().shape(), &[9, 2]);
/// ```
pub fn concat(tensors: &[&Tensor], axis: isize) -> Tensor
{
apply_op(concat::Concat { axis: axis }, tensors)
}
#[inline]
/// Gathers slices.
///
/// Along `axis`, slices subviews from `param` with `indices`, and then gathers those.
/// For example, this can be used for embedding vector lookup.
/// `axis` can be negative.
///
/// See also https://www.tensorflow.org/api_docs/python/tf/gather.
///
/// # Arguments
/// * `param` - Target of slicing.
/// * `indices` - Index tensor with which slices `param`. This cab be arbitrary shape.
/// * `axis` - Slices sub tensors along this axis.
///
/// # Returns
/// Tensor with shape `param.shape[..axis] + indices.shape + param.shape[axis+1..]`
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let ref param = ag::constant(ag::ndarray_ext::zeros(&[5, 4, 8, 2]));
/// let ref indices = ag::constant(ndarray::arr2(&[[5., 4., 3.], [2., 1., 0.]]));
/// let y = ag::gather(param, indices, 2);
/// assert_eq!(y.eval().shape(), &[5, 4, 2, 3, 2])
/// ```
pub fn gather(param: &Tensor, indices: &Tensor, axis: isize) -> Tensor
{
let op = gather::Gather { axis: axis };
apply_op(op, &[indices, param])
}
#[inline]
/// Applies recurrent net unit to the input.
///
/// This func processes a time step in the batch of sequences in parallel.
///
/// # Arguments
/// * `x` - Input tensor for this step
/// * `rnn` - RNN struct
/// * `with_new_state` - If true, calls `rnn.reset_state()` before running a step
///
/// # Returns
/// Output of `rnn.step()`
///
/// For the usage, see `lstm_lm()` in `tests/test_tensor_ops_grad.rs` and `nn_impl::rnn`
pub fn rnn_step<T>(x: &Tensor, rnn: &mut T, with_new_state: bool) -> Tensor
where
T: ::nn_impl::rnn::RNN,
{
if with_new_state {
rnn.reset_state();
}
rnn.step(x)
}
/// Returns a constant tensor
pub fn zeros(shape: &[usize]) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Constant".to_string() }),
inputs: vec![],
param: Some(::ndarray_ext::zeros(shape)),
rank: 0,
})))
}
/// Returns a constant tensor
pub fn ones(shape: &[usize]) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Constant".to_string() }),
inputs: vec![],
param: Some(::ndarray_ext::ones(shape)),
rank: 0,
})))
}
/// Creates a constant tensor.
///
/// # Examples
///
/// ```
/// extern crate ndarray;
/// extern crate autograd as ag;
///
/// let arr = ndarray::arr1(&[0., 0., 0.]).into_dyn();
/// assert_eq!(arr.clone(), ag::constant(arr).eval())
/// ```
#[inline]
pub fn constant<T: ndarray::Dimension>(array: ndarray::Array<f32, T>) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Constant".to_string() }),
inputs: vec![],
param: Some(array.into_dyn()),
rank: 0,
})))
}
/// Creates a constant tensor.
#[inline]
pub fn scalar(a: f32) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Scalar".to_string() }),
inputs: vec![],
param: Some(NdArray::from_elem(ndarray::IxDyn(&[1]), a)),
rank: 0,
})))
}
/// Creates a constant tensor.
#[inline]
pub fn range(start: usize, end: usize, step: usize) -> Tensor
{
Tensor(Rc::new(RefCell::new(RawTensor {
op: Box::new(dummy_op::DummyOp { name: "Scalar".to_string() }),
inputs: vec![],
param: Some(
ndarray::Array1::range(start as f32, end as f32, step as f32).into_dyn(),
),
rank: 0,
})))
}
/// Outputs values sampled from the normal distribution.
pub fn random_normal(shape: &[usize], mean: f64, stddev: f64) -> Tensor
{
let op = random_ops::RandomNormal {
shape: shape.to_vec(),
mean: mean,
stddev: stddev,
};
apply_op(op, &[])
}
/// Outputs values sampled from the uniform distribution.
pub fn random_uniform(shape: &[usize], min: f64, max: f64) -> Tensor
{
let op = random_ops::RandomUniform {
shape: shape.to_vec(),
min: min,
max: max,
};
apply_op(op, &[])
}
/// Outputs values sampled from the standard normal distribution.
pub fn standard_normal(shape: &[usize]) -> Tensor
{
let op = random_ops::StandardNormal { shape: shape.to_vec() };
apply_op(op, &[])
}
/// Outputs values sampled from the standard uniform distribution.
pub fn standard_uniform(shape: &[usize]) -> Tensor
{
let op = random_ops::StandardUniform { shape: shape.to_vec() };
apply_op(op, &[])
}
/// Outputs values sampled from the bernoulli distribution.
pub fn bernoulli(shape: &[usize], p: f64) -> Tensor
{
let op = random_ops::Bernoulli {
shape: shape.to_vec(),
p: p,
};
apply_op(op, &[])
}
/// Outputs values sampled from the exponential distribution.
pub fn random_exp(shape: &[usize], lambda: f64) -> Tensor
{
let op = random_ops::Exponential {
shape: shape.to_vec(),
lambda: lambda,
};
apply_op(op, &[])
}
/// Outputs values sampled from the gamma distribution.
pub fn gamma(shape: &[usize], shape_param: f64, scale: f64) -> Tensor
{
let op = random_ops::Gamma {
shape: shape.to_vec(),
shape_param: shape_param,
scale: scale,
};
apply_op(op, &[])
}
/// Outputs values sampled from the log-normal distribution.
pub fn log_normal(shape: &[usize], mean: f64, stddev: f64) -> Tensor
{
let op = random_ops::LogNormal {
shape: shape.to_vec(),
mean: mean,
stddev: stddev,
};
apply_op(op, &[])
}
|
extern crate crypto;
extern crate lzma;
extern crate rustc_serialize;
extern crate std;
extern crate tar;
use crypto::digest::Digest;
use std::io::Read;
#[derive(Debug)]
pub enum Error {
Io(std::io::Error),
Lzma(lzma::LzmaError),
ParseInt(std::num::ParseIntError),
Custom(std::borrow::Cow<'static, str>),
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Error::Io(e)
}
}
impl From<lzma::LzmaError> for Error {
fn from(e: lzma::LzmaError) -> Self {
Error::Lzma(e)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(e: std::num::ParseIntError) -> Self {
Error::ParseInt(e)
}
}
impl From<&'static str> for Error {
fn from(e: &'static str) -> Self {
Error::Custom(std::borrow::Cow::from(e))
}
}
impl From<String> for Error {
fn from(e: String) -> Self {
Error::Custom(std::borrow::Cow::from(e))
}
}
#[derive(Debug, Clone)]
pub struct Package {
pkginfo: PkgInfo,
size: u64,
filename: String,
pgpsig: String,
md5sum: String,
sha256sum: String,
files: Vec<String>,
}
impl Package {
pub fn load<P: AsRef<std::path::Path>>(path: &P) -> Result<Package, Error> {
let (pkginfo, files) = try!(PkgInfo::load(path));
let filename = path.as_ref().file_name().unwrap().to_string_lossy().into_owned();
let sig_path = path.as_ref().parent().unwrap().join(format!("{}.sig", filename));
let pgpsig = if let Ok(mut f) = std::fs::File::open(sig_path) {
use rustc_serialize::base64::ToBase64;
let mut buf = vec![];
try!(f.read_to_end(&mut buf));
buf.to_base64(rustc_serialize::base64::STANDARD)
} else {
"".to_owned()
};
let mut md5 = crypto::md5::Md5::new();
let mut sha256 = crypto::sha2::Sha256::new();
let mut f = try!(std::fs::File::open(path));
loop {
let mut buf = [0; 1024];
match f.read(&mut buf) {
Ok(0) => {
break;
}
Ok(len) => {
md5.input(&buf[..len]);
sha256.input(&buf[..len]);
}
Err(e) => {
return Err(Error::from(e));
}
}
}
Ok(Package {
pkginfo: pkginfo,
size: try!(std::fs::metadata(path)).len(),
filename: filename,
pgpsig: pgpsig,
md5sum: md5.result_str(),
sha256sum: sha256.result_str(),
files: files,
})
}
pub fn groups(&self) -> &Vec<String> {
&self.pkginfo.groups
}
pub fn license(&self) -> &Vec<String> {
&self.pkginfo.license
}
pub fn replaces(&self) -> &Vec<String> {
&self.pkginfo.replaces
}
pub fn filename(&self) -> &str {
&self.filename
}
pub fn pkgname(&self) -> &str {
&self.pkginfo.pkgname
}
pub fn pkgbase(&self) -> &str {
&self.pkginfo.pkgbase
}
pub fn pkgver(&self) -> &str {
&self.pkginfo.pkgver
}
pub fn pkgdesc(&self) -> &str {
&self.pkginfo.pkgdesc
}
pub fn csize(&self) -> u64 {
self.size
}
pub fn isize(&self) -> u64 {
self.pkginfo.size
}
pub fn md5sum(&self) -> &str {
&self.md5sum
}
pub fn sha256sum(&self) -> &str {
&self.sha256sum
}
pub fn pgpsig(&self) -> &str {
&self.pgpsig
}
pub fn url(&self) -> &str {
&self.pkginfo.url
}
pub fn arch(&self) -> &str {
&self.pkginfo.arch
}
pub fn builddate(&self) -> u64 {
self.pkginfo.builddate
}
pub fn packager(&self) -> &str {
&self.pkginfo.packager
}
pub fn conflicts(&self) -> &Vec<String> {
&self.pkginfo.conflicts
}
pub fn provides(&self) -> &Vec<String> {
&self.pkginfo.provides
}
pub fn depends(&self) -> &Vec<String> {
&self.pkginfo.depends
}
pub fn makedepends(&self) -> &Vec<String> {
&self.pkginfo.makedepends
}
pub fn checkdepends(&self) -> &Vec<String> {
&self.pkginfo.checkdepends
}
pub fn optdepends(&self) -> &Vec<String> {
&self.pkginfo.optdepends
}
pub fn files(&self) -> &Vec<String> {
&self.files
}
}
#[derive(Debug, Default, Clone)]
pub struct PkgInfo {
pub pkgname: String,
pub pkgbase: String,
pub pkgver: String,
pub pkgdesc: String,
pub url: String,
pub builddate: u64,
pub packager: String,
pub size: u64,
pub arch: String,
pub license: Vec<String>,
pub groups: Vec<String>,
pub depends: Vec<String>,
pub makedepends: Vec<String>,
pub checkdepends: Vec<String>,
pub optdepends: Vec<String>,
pub conflicts: Vec<String>,
pub provides: Vec<String>,
pub replaces: Vec<String>,
}
impl PkgInfo {
fn load<P: AsRef<std::path::Path>>(path: &P) -> Result<(Self, Vec<String>), Error> {
let file = try!(std::fs::File::open(path));
let xz_reader = try!(lzma::LzmaReader::new_decompressor(file));
let mut tar_reader = tar::Archive::new(xz_reader);
let mut pkginfo = None;
let mut files = vec![];
for entry_result in try!(tar_reader.entries()) {
let mut entry = try!(entry_result);
let path = try!(entry.path()).to_mut().to_string_lossy().into_owned();
if path == ".PKGINFO" && entry.header().entry_type() == tar::EntryType::Regular {
let mut body = String::new();
try!(entry.read_to_string(&mut body));
pkginfo = Some(try!(parse_pkginfo(&body)));
}
if !path.starts_with(".") {
files.push(path);
}
}
if let Some(pkginfo) = pkginfo {
Ok((pkginfo, files))
} else {
Err(Error::from(".PKGINFO not found"))
}
}
}
fn parse_pkginfo(body: &str) -> Result<PkgInfo, Error> {
let mut info = PkgInfo::default();
for line in body.lines() {
if line.starts_with('#') {
continue;
}
let mut splitn = line.splitn(2, '=');
let key = splitn.next();
let val = splitn.next();
let rest = splitn.next();
if let (Some(key), Some(val), None) = (key, val, rest) {
let key = key.trim();
let val = val.trim();
match key {
"pkgname" => info.pkgname = val.to_owned(),
"pkgbase" => info.pkgbase = val.to_owned(),
"pkgver" => info.pkgver = val.to_owned(),
"pkgdesc" => info.pkgdesc = val.to_owned(),
"url" => info.url = val.to_owned(),
"builddate" => info.builddate = try!(val.parse()),
"packager" => info.packager = val.to_owned(),
"size" => info.size = try!(val.parse()),
"arch" => info.arch = val.to_owned(),
"license" => info.license.push(val.to_owned()),
"group" => info.groups.push(val.to_owned()),
"depend" => info.depends.push(val.to_owned()),
"makedepend" => info.makedepends.push(val.to_owned()),
"checkdepend" => info.checkdepends.push(val.to_owned()),
"optdepend" => info.optdepends.push(val.to_owned()),
"conflict" => info.conflicts.push(val.to_owned()),
"provides" => info.provides.push(val.to_owned()),
"replaces" => info.replaces.push(val.to_owned()),
_ => return Err(Error::from(format!("Unknown PKGINFO entry '{}': {}", key, line))),
}
} else {
return Err(Error::from(format!("Invalid line: {}", line)));
}
}
Ok(info)
}
as_ref
extern crate crypto;
extern crate lzma;
extern crate rustc_serialize;
extern crate std;
extern crate tar;
use crypto::digest::Digest;
use std::io::Read;
#[derive(Debug)]
pub enum Error {
Io(std::io::Error),
Lzma(lzma::LzmaError),
ParseInt(std::num::ParseIntError),
Custom(std::borrow::Cow<'static, str>),
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Error::Io(e)
}
}
impl From<lzma::LzmaError> for Error {
fn from(e: lzma::LzmaError) -> Self {
Error::Lzma(e)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(e: std::num::ParseIntError) -> Self {
Error::ParseInt(e)
}
}
impl From<&'static str> for Error {
fn from(e: &'static str) -> Self {
Error::Custom(std::borrow::Cow::from(e))
}
}
impl From<String> for Error {
fn from(e: String) -> Self {
Error::Custom(std::borrow::Cow::from(e))
}
}
#[derive(Debug, Clone)]
pub struct Package {
pkginfo: PkgInfo,
size: u64,
filename: String,
pgpsig: String,
md5sum: String,
sha256sum: String,
files: Vec<String>,
}
impl Package {
pub fn load<P: AsRef<std::path::Path>>(path: P) -> Result<Package, Error> {
let (pkginfo, files) = try!(PkgInfo::load(path.as_ref()));
let filename = path.as_ref().file_name().unwrap().to_string_lossy().into_owned();
let sig_path = path.as_ref().parent().unwrap().join(format!("{}.sig", filename));
let pgpsig = if let Ok(mut f) = std::fs::File::open(sig_path) {
use rustc_serialize::base64::ToBase64;
let mut buf = vec![];
try!(f.read_to_end(&mut buf));
buf.to_base64(rustc_serialize::base64::STANDARD)
} else {
"".to_owned()
};
let mut md5 = crypto::md5::Md5::new();
let mut sha256 = crypto::sha2::Sha256::new();
let mut f = try!(std::fs::File::open(path.as_ref()));
loop {
let mut buf = [0; 1024];
match f.read(&mut buf) {
Ok(0) => {
break;
}
Ok(len) => {
md5.input(&buf[..len]);
sha256.input(&buf[..len]);
}
Err(e) => {
return Err(Error::from(e));
}
}
}
Ok(Package {
pkginfo: pkginfo,
size: try!(std::fs::metadata(path)).len(),
filename: filename,
pgpsig: pgpsig,
md5sum: md5.result_str(),
sha256sum: sha256.result_str(),
files: files,
})
}
pub fn groups(&self) -> &Vec<String> {
&self.pkginfo.groups
}
pub fn license(&self) -> &Vec<String> {
&self.pkginfo.license
}
pub fn replaces(&self) -> &Vec<String> {
&self.pkginfo.replaces
}
pub fn filename(&self) -> &str {
&self.filename
}
pub fn pkgname(&self) -> &str {
&self.pkginfo.pkgname
}
pub fn pkgbase(&self) -> &str {
&self.pkginfo.pkgbase
}
pub fn pkgver(&self) -> &str {
&self.pkginfo.pkgver
}
pub fn pkgdesc(&self) -> &str {
&self.pkginfo.pkgdesc
}
pub fn csize(&self) -> u64 {
self.size
}
pub fn isize(&self) -> u64 {
self.pkginfo.size
}
pub fn md5sum(&self) -> &str {
&self.md5sum
}
pub fn sha256sum(&self) -> &str {
&self.sha256sum
}
pub fn pgpsig(&self) -> &str {
&self.pgpsig
}
pub fn url(&self) -> &str {
&self.pkginfo.url
}
pub fn arch(&self) -> &str {
&self.pkginfo.arch
}
pub fn builddate(&self) -> u64 {
self.pkginfo.builddate
}
pub fn packager(&self) -> &str {
&self.pkginfo.packager
}
pub fn conflicts(&self) -> &Vec<String> {
&self.pkginfo.conflicts
}
pub fn provides(&self) -> &Vec<String> {
&self.pkginfo.provides
}
pub fn depends(&self) -> &Vec<String> {
&self.pkginfo.depends
}
pub fn makedepends(&self) -> &Vec<String> {
&self.pkginfo.makedepends
}
pub fn checkdepends(&self) -> &Vec<String> {
&self.pkginfo.checkdepends
}
pub fn optdepends(&self) -> &Vec<String> {
&self.pkginfo.optdepends
}
pub fn files(&self) -> &Vec<String> {
&self.files
}
}
#[derive(Debug, Default, Clone)]
pub struct PkgInfo {
pub pkgname: String,
pub pkgbase: String,
pub pkgver: String,
pub pkgdesc: String,
pub url: String,
pub builddate: u64,
pub packager: String,
pub size: u64,
pub arch: String,
pub license: Vec<String>,
pub groups: Vec<String>,
pub depends: Vec<String>,
pub makedepends: Vec<String>,
pub checkdepends: Vec<String>,
pub optdepends: Vec<String>,
pub conflicts: Vec<String>,
pub provides: Vec<String>,
pub replaces: Vec<String>,
}
impl PkgInfo {
fn load<P: AsRef<std::path::Path>>(path: P) -> Result<(Self, Vec<String>), Error> {
let file = try!(std::fs::File::open(path));
let xz_reader = try!(lzma::LzmaReader::new_decompressor(file));
let mut tar_reader = tar::Archive::new(xz_reader);
let mut pkginfo = None;
let mut files = vec![];
for entry_result in try!(tar_reader.entries()) {
let mut entry = try!(entry_result);
let path = try!(entry.path()).to_mut().to_string_lossy().into_owned();
if path == ".PKGINFO" && entry.header().entry_type() == tar::EntryType::Regular {
let mut body = String::new();
try!(entry.read_to_string(&mut body));
pkginfo = Some(try!(parse_pkginfo(&body)));
}
if !path.starts_with(".") {
files.push(path);
}
}
if let Some(pkginfo) = pkginfo {
Ok((pkginfo, files))
} else {
Err(Error::from(".PKGINFO not found"))
}
}
}
fn parse_pkginfo(body: &str) -> Result<PkgInfo, Error> {
let mut info = PkgInfo::default();
for line in body.lines() {
if line.starts_with('#') {
continue;
}
let mut splitn = line.splitn(2, '=');
let key = splitn.next();
let val = splitn.next();
let rest = splitn.next();
if let (Some(key), Some(val), None) = (key, val, rest) {
let key = key.trim();
let val = val.trim();
match key {
"pkgname" => info.pkgname = val.to_owned(),
"pkgbase" => info.pkgbase = val.to_owned(),
"pkgver" => info.pkgver = val.to_owned(),
"pkgdesc" => info.pkgdesc = val.to_owned(),
"url" => info.url = val.to_owned(),
"builddate" => info.builddate = try!(val.parse()),
"packager" => info.packager = val.to_owned(),
"size" => info.size = try!(val.parse()),
"arch" => info.arch = val.to_owned(),
"license" => info.license.push(val.to_owned()),
"group" => info.groups.push(val.to_owned()),
"depend" => info.depends.push(val.to_owned()),
"makedepend" => info.makedepends.push(val.to_owned()),
"checkdepend" => info.checkdepends.push(val.to_owned()),
"optdepend" => info.optdepends.push(val.to_owned()),
"conflict" => info.conflicts.push(val.to_owned()),
"provides" => info.provides.push(val.to_owned()),
"replaces" => info.replaces.push(val.to_owned()),
_ => return Err(Error::from(format!("Unknown PKGINFO entry '{}': {}", key, line))),
}
} else {
return Err(Error::from(format!("Invalid line: {}", line)));
}
}
Ok(info)
}
|
use memory::MemSegment;
use screen::Screen;
use cart::Cart;
use std::rc::Rc;
use std::cell::RefCell;
use std::default::Default;
mod ppu_reg;
use ppu::ppu_reg::*;
mod ppu_memory;
use ppu::ppu_memory::*;
mod sprite_rendering;
use ppu::sprite_rendering::*;
mod background_rendering;
pub const SCREEN_WIDTH: usize = 256;
pub const SCREEN_HEIGHT: usize = 240;
pub const SCREEN_BUFFER_SIZE: usize = SCREEN_WIDTH * SCREEN_HEIGHT;
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(C)]
pub struct Color(u8);
impl Color {
fn from_bits_truncate(val: u8) -> Color {
Color(val & 0b0011_1111)
}
pub fn bits(&self) -> u8 {
self.0
}
}
pub struct PPU {
reg: PPUReg,
ppudata_read_buffer: u8,
oam: [OAMEntry; 64],
ppu_mem: PPUMemory,
screen: Box<Screen>,
screen_buffer: [Color; SCREEN_BUFFER_SIZE],
global_cyc: u64,
cyc: u16,
sl: i16,
frame: u32,
}
#[derive(Copy, Debug, PartialEq, Clone)]
pub enum StepResult {
NMI,
Continue,
}
impl PPU {
pub fn new(cart: Rc<RefCell<Cart>>, screen: Box<Screen>) -> PPU {
PPU {
reg: Default::default(),
ppudata_read_buffer: 0,
oam: [OAMEntry::zero(); 64],
ppu_mem: PPUMemory::new(cart),
screen_buffer: [Color::from_bits_truncate(0x00); SCREEN_BUFFER_SIZE],
screen: screen,
global_cyc: 0,
cyc: 0,
sl: 241,
frame: 0,
}
}
pub fn run_to(&mut self, cpu_cycle: u64) -> StepResult {
let mut hit_nmi = false;
while self.global_cyc < (cpu_cycle * 3) {
self.tick_cycle();
hit_nmi |= self.run_cycle();
}
if hit_nmi {
StepResult::NMI
} else {
StepResult::Continue
}
}
fn tick_cycle(&mut self) {
self.global_cyc += 1;
self.cyc += 1;
if self.cyc == 341 {
self.cyc = 0;
self.sl += 1;
if self.sl == 261 {
self.sl = -1;
self.frame += 1;
}
}
}
fn run_cycle(&mut self) -> bool {
match (self.cyc, self.sl) {
(c, -1) => self.prerender_scanline(c),
(c, sl @ 0...239) => self.visible_scanline(c, sl),
(_, 240) => (), //Post-render idle scanline
(1, 241) => return self.start_vblank(),
(_, 241...260) => (), //VBlank lines
_ => (),
}
false
}
fn prerender_scanline(&mut self, _: u16) {
// Nothing here yet
}
fn visible_scanline(&mut self, pixel: u16, scanline: i16) {
// Nothing here yet
if pixel >= 256 {
return;
}
let x = pixel as usize;
let y = scanline as usize;
self.screen_buffer[y * SCREEN_WIDTH + x] = self.get_pixel(x as u16, y as u16);
}
fn get_pixel(&mut self, x: u16, y: u16) -> Color {
self.get_background_pixel(x, y)
}
fn start_vblank(&mut self) -> bool {
let buf = &self.screen_buffer;
self.screen.draw(buf);
if self.frame > 0 {
self.reg.ppustat.insert(VBLANK);
self.reg.ppuctrl.generate_vblank_nmi()
} else {
false
}
}
#[cfg(feature="cputrace")]
pub fn cycle(&self) -> u16 {
self.cyc
}
#[cfg(feature="cputrace")]
pub fn scanline(&self) -> i16 {
self.sl
}
#[cfg(feature="cputrace")]
pub fn vram_addr(&self) -> u16 {
self.reg.ppuaddr
}
}
impl MemSegment for PPU {
fn read(&mut self, idx: u16) -> u8 {
match idx % 8 {
0x0004 => {
let res = self.oam[self.reg.oamaddr as usize / 4].read(self.reg.oamaddr as u16);
self.reg.incr_oamaddr();
res
}
0x0007 => {
let addr = self.reg.ppuaddr;
match addr {
0x0000...0x3EFF => {
let old_buffer = self.ppudata_read_buffer;
self.ppudata_read_buffer = self.ppu_mem.read(addr);
self.reg.incr_ppuaddr();
old_buffer
},
0x3F00...0x3FFF => {
let read_result = self.ppu_mem.read(addr);
self.reg.incr_ppuaddr();
self.ppudata_read_buffer = self.ppu_mem.read_bypass_palette(addr);
read_result
},
x => invalid_address!(x),
}
}
_ => self.reg.read( idx )
}
}
fn write(&mut self, idx: u16, val: u8) {
match idx % 8 {
0x0004 => {
self.oam[self.reg.oamaddr as usize / 4].write(self.reg.oamaddr as u16, val);
self.reg.incr_oamaddr();
}
0x0007 => {
self.ppu_mem.write(self.reg.ppuaddr, val);
self.reg.incr_ppuaddr();
}
_ => self.reg.write( idx, val )
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use mappers::{Mapper, MapperParams};
use std::rc::Rc;
use std::cell::RefCell;
use cart::Cart;
use screen::DummyScreen;
use ppu::ppu_reg::PPUCtrl;
use memory::MemSegment;
pub fn create_test_ppu() -> PPU {
create_test_ppu_with_rom(vec![0u8; 0x1000])
}
pub fn create_test_ppu_with_rom(chr_rom: Vec<u8>) -> PPU {
let mapper = Mapper::new(0, MapperParams::simple(vec![0u8; 0x1000], chr_rom));
let cart = Cart::new(mapper);
PPU::new(Rc::new(RefCell::new(cart)), Box::new(DummyScreen::new()))
}
#[test]
fn reading_oamdata_increments_oamaddr() {
let mut ppu = create_test_ppu();
ppu.reg.oamaddr = 0;
ppu.read(0x2004);
assert_eq!(ppu.reg.oamaddr, 1);
ppu.reg.oamaddr = 255;
ppu.read(0x2004);
assert_eq!(ppu.reg.oamaddr, 0);
}
#[test]
fn writing_oamdata_increments_oamaddr() {
let mut ppu = create_test_ppu();
ppu.reg.oamaddr = 0;
ppu.write(0x2004, 12);
assert_eq!(ppu.reg.oamaddr, 1);
ppu.reg.oamaddr = 255;
ppu.write(0x2004, 12);
assert_eq!(ppu.reg.oamaddr, 0);
}
#[test]
fn ppu_can_read_chr_rom() {
let mut chr_rom = vec![0u8; 0x2000];
chr_rom[0x0ABC] = 12;
chr_rom[0x0DBA] = 212;
let mut ppu = create_test_ppu_with_rom(chr_rom);
ppu.reg.ppuaddr = 0x0ABC;
assert_eq!(ppu.read(0x2007), 12);
ppu.reg.ppuaddr = 0x0DBA;
assert_eq!(ppu.read(0x2007), 212);
}
#[test]
fn ppu_can_read_write_vram() {
let mut ppu = create_test_ppu();
ppu.reg.ppuaddr = 0x2ABC;
ppu.write(0x2007, 12);
ppu.reg.ppuaddr = 0x2ABC;
assert_eq!(ppu.read(0x2007), 12);
ppu.reg.ppuaddr = 0x2DBA;
ppu.write(0x2007, 212);
ppu.reg.ppuaddr = 0x2DBA;
assert_eq!(ppu.read(0x2007), 212);
// Mirroring
ppu.reg.ppuaddr = 0x2EFC;
ppu.write(0x2007, 128);
ppu.reg.ppuaddr = 0x3EFC;
assert_eq!(ppu.read(0x2007), 128);
}
#[test]
fn accessing_ppudata_increments_ppuaddr() {
let mut ppu = create_test_ppu();
ppu.reg.ppuaddr = 0x2000;
ppu.read(0x2007);
assert_eq!(ppu.reg.ppuaddr, 0x2001);
ppu.write(0x2007, 0);
assert_eq!(ppu.reg.ppuaddr, 0x2002);
}
#[test]
fn accessing_ppudata_increments_ppuaddr_by_32_when_ctrl_flag_is_set() {
let mut ppu = create_test_ppu();
ppu.reg.ppuctrl = PPUCtrl::new(0b0000_0100);
ppu.reg.ppuaddr = 0x2000;
ppu.read(0x2007);
assert_eq!(ppu.reg.ppuaddr, 0x2020);
ppu.write(0x2007, 0);
assert_eq!(ppu.reg.ppuaddr, 0x2040);
}
}
Don't increment OAMADDR on read from OAMDATA.
use memory::MemSegment;
use screen::Screen;
use cart::Cart;
use std::rc::Rc;
use std::cell::RefCell;
use std::default::Default;
mod ppu_reg;
use ppu::ppu_reg::*;
mod ppu_memory;
use ppu::ppu_memory::*;
mod sprite_rendering;
use ppu::sprite_rendering::*;
mod background_rendering;
pub const SCREEN_WIDTH: usize = 256;
pub const SCREEN_HEIGHT: usize = 240;
pub const SCREEN_BUFFER_SIZE: usize = SCREEN_WIDTH * SCREEN_HEIGHT;
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(C)]
pub struct Color(u8);
impl Color {
fn from_bits_truncate(val: u8) -> Color {
Color(val & 0b0011_1111)
}
pub fn bits(&self) -> u8 {
self.0
}
}
pub struct PPU {
reg: PPUReg,
ppudata_read_buffer: u8,
oam: [OAMEntry; 64],
ppu_mem: PPUMemory,
screen: Box<Screen>,
screen_buffer: [Color; SCREEN_BUFFER_SIZE],
global_cyc: u64,
cyc: u16,
sl: i16,
frame: u32,
}
#[derive(Copy, Debug, PartialEq, Clone)]
pub enum StepResult {
NMI,
Continue,
}
impl PPU {
pub fn new(cart: Rc<RefCell<Cart>>, screen: Box<Screen>) -> PPU {
PPU {
reg: Default::default(),
ppudata_read_buffer: 0,
oam: [OAMEntry::zero(); 64],
ppu_mem: PPUMemory::new(cart),
screen_buffer: [Color::from_bits_truncate(0x00); SCREEN_BUFFER_SIZE],
screen: screen,
global_cyc: 0,
cyc: 0,
sl: 241,
frame: 0,
}
}
pub fn run_to(&mut self, cpu_cycle: u64) -> StepResult {
let mut hit_nmi = false;
while self.global_cyc < (cpu_cycle * 3) {
self.tick_cycle();
hit_nmi |= self.run_cycle();
}
if hit_nmi {
StepResult::NMI
} else {
StepResult::Continue
}
}
fn tick_cycle(&mut self) {
self.global_cyc += 1;
self.cyc += 1;
if self.cyc == 341 {
self.cyc = 0;
self.sl += 1;
if self.sl == 261 {
self.sl = -1;
self.frame += 1;
}
}
}
fn run_cycle(&mut self) -> bool {
match (self.cyc, self.sl) {
(c, -1) => self.prerender_scanline(c),
(c, sl @ 0...239) => self.visible_scanline(c, sl),
(_, 240) => (), //Post-render idle scanline
(1, 241) => return self.start_vblank(),
(_, 241...260) => (), //VBlank lines
_ => (),
}
false
}
fn prerender_scanline(&mut self, _: u16) {
// Nothing here yet
}
fn visible_scanline(&mut self, pixel: u16, scanline: i16) {
// Nothing here yet
if pixel >= 256 {
return;
}
let x = pixel as usize;
let y = scanline as usize;
self.screen_buffer[y * SCREEN_WIDTH + x] = self.get_pixel(x as u16, y as u16);
}
fn get_pixel(&mut self, x: u16, y: u16) -> Color {
self.get_background_pixel(x, y)
}
fn start_vblank(&mut self) -> bool {
let buf = &self.screen_buffer;
self.screen.draw(buf);
if self.frame > 0 {
self.reg.ppustat.insert(VBLANK);
self.reg.ppuctrl.generate_vblank_nmi()
} else {
false
}
}
#[cfg(feature="cputrace")]
pub fn cycle(&self) -> u16 {
self.cyc
}
#[cfg(feature="cputrace")]
pub fn scanline(&self) -> i16 {
self.sl
}
#[cfg(feature="cputrace")]
pub fn vram_addr(&self) -> u16 {
self.reg.ppuaddr
}
}
impl MemSegment for PPU {
fn read(&mut self, idx: u16) -> u8 {
match idx % 8 {
0x0004 => self.oam[self.reg.oamaddr as usize / 4].read(self.reg.oamaddr as u16),
0x0007 => {
let addr = self.reg.ppuaddr;
match addr {
0x0000...0x3EFF => {
let old_buffer = self.ppudata_read_buffer;
self.ppudata_read_buffer = self.ppu_mem.read(addr);
self.reg.incr_ppuaddr();
old_buffer
},
0x3F00...0x3FFF => {
let read_result = self.ppu_mem.read(addr);
self.reg.incr_ppuaddr();
self.ppudata_read_buffer = self.ppu_mem.read_bypass_palette(addr);
read_result
},
x => invalid_address!(x),
}
}
_ => self.reg.read( idx )
}
}
fn write(&mut self, idx: u16, val: u8) {
match idx % 8 {
0x0004 => {
self.oam[self.reg.oamaddr as usize / 4].write(self.reg.oamaddr as u16, val);
self.reg.incr_oamaddr();
}
0x0007 => {
self.ppu_mem.write(self.reg.ppuaddr, val);
self.reg.incr_ppuaddr();
}
_ => self.reg.write( idx, val )
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use mappers::{Mapper, MapperParams};
use std::rc::Rc;
use std::cell::RefCell;
use cart::Cart;
use screen::DummyScreen;
use ppu::ppu_reg::PPUCtrl;
use memory::MemSegment;
pub fn create_test_ppu() -> PPU {
create_test_ppu_with_rom(vec![0u8; 0x1000])
}
pub fn create_test_ppu_with_rom(chr_rom: Vec<u8>) -> PPU {
let mapper = Mapper::new(0, MapperParams::simple(vec![0u8; 0x1000], chr_rom));
let cart = Cart::new(mapper);
PPU::new(Rc::new(RefCell::new(cart)), Box::new(DummyScreen::new()))
}
#[test]
fn reading_oamdata_increments_oamaddr() {
let mut ppu = create_test_ppu();
ppu.reg.oamaddr = 0;
ppu.read(0x2004);
assert_eq!(ppu.reg.oamaddr, 1);
ppu.reg.oamaddr = 255;
ppu.read(0x2004);
assert_eq!(ppu.reg.oamaddr, 0);
}
#[test]
fn writing_oamdata_increments_oamaddr() {
let mut ppu = create_test_ppu();
ppu.reg.oamaddr = 0;
ppu.write(0x2004, 12);
assert_eq!(ppu.reg.oamaddr, 1);
ppu.reg.oamaddr = 255;
ppu.write(0x2004, 12);
assert_eq!(ppu.reg.oamaddr, 0);
}
#[test]
fn ppu_can_read_chr_rom() {
let mut chr_rom = vec![0u8; 0x2000];
chr_rom[0x0ABC] = 12;
chr_rom[0x0DBA] = 212;
let mut ppu = create_test_ppu_with_rom(chr_rom);
ppu.reg.ppuaddr = 0x0ABC;
assert_eq!(ppu.read(0x2007), 12);
ppu.reg.ppuaddr = 0x0DBA;
assert_eq!(ppu.read(0x2007), 212);
}
#[test]
fn ppu_can_read_write_vram() {
let mut ppu = create_test_ppu();
ppu.reg.ppuaddr = 0x2ABC;
ppu.write(0x2007, 12);
ppu.reg.ppuaddr = 0x2ABC;
assert_eq!(ppu.read(0x2007), 12);
ppu.reg.ppuaddr = 0x2DBA;
ppu.write(0x2007, 212);
ppu.reg.ppuaddr = 0x2DBA;
assert_eq!(ppu.read(0x2007), 212);
// Mirroring
ppu.reg.ppuaddr = 0x2EFC;
ppu.write(0x2007, 128);
ppu.reg.ppuaddr = 0x3EFC;
assert_eq!(ppu.read(0x2007), 128);
}
#[test]
fn accessing_ppudata_increments_ppuaddr() {
let mut ppu = create_test_ppu();
ppu.reg.ppuaddr = 0x2000;
ppu.read(0x2007);
assert_eq!(ppu.reg.ppuaddr, 0x2001);
ppu.write(0x2007, 0);
assert_eq!(ppu.reg.ppuaddr, 0x2002);
}
#[test]
fn accessing_ppudata_increments_ppuaddr_by_32_when_ctrl_flag_is_set() {
let mut ppu = create_test_ppu();
ppu.reg.ppuctrl = PPUCtrl::new(0b0000_0100);
ppu.reg.ppuaddr = 0x2000;
ppu.read(0x2007);
assert_eq!(ppu.reg.ppuaddr, 0x2020);
ppu.write(0x2007, 0);
assert_eq!(ppu.reg.ppuaddr, 0x2040);
}
} |
use preferences::{Preferences, Pref};
use prefreader::{parse, serialize, PrefReaderError};
use std::collections::btree_map::Iter;
use std::fs::File;
use std::io::Result as IoResult;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use tempdir::TempDir;
pub struct Profile {
pub path: PathBuf,
pub temp_dir: Option<TempDir>,
prefs: Option<PrefFile>,
user_prefs: Option<PrefFile>
}
impl Profile {
pub fn new(opt_path: Option<&Path>) -> IoResult<Profile> {
let mut temp_dir = None;
let path = match opt_path {
Some(p) => p.to_path_buf(),
None => {
let dir = try!(TempDir::new("rust_mozprofile"));
let temp_path = dir.path().to_path_buf();
temp_dir = Some(dir);
temp_path
}
};
info!("Using profile path {}", path.to_str().unwrap());
Ok(Profile {
path: path,
temp_dir: temp_dir,
prefs: None,
user_prefs: None
})
}
pub fn prefs(&mut self) -> Result<&mut PrefFile, PrefReaderError> {
if self.prefs.is_none() {
let mut pref_path = PathBuf::from(&self.path);
pref_path.push("prefs.js");
self.prefs = Some(try!(PrefFile::new(pref_path)))
};
// This error handling doesn't make much sense
Ok(self.prefs.as_mut().unwrap())
}
pub fn user_prefs(&mut self) -> Result<&mut PrefFile, PrefReaderError> {
if self.user_prefs.is_none() {
let mut pref_path = PathBuf::from(&self.path);
pref_path.push("user.js");
self.user_prefs = Some(try!(PrefFile::new(pref_path)))
};
// This error handling doesn't make much sense
Ok(self.user_prefs.as_mut().unwrap())
}
}
pub struct PrefFile {
path: PathBuf,
pub prefs: Preferences
}
impl PrefFile {
pub fn new(path: PathBuf) -> Result<PrefFile, PrefReaderError> {
let prefs = if !path.exists() {
Preferences::new()
} else {
let mut f = try!(File::open(&path));
let mut buf = String::with_capacity(4096);
try!(f.read_to_string(&mut buf));
try!(parse(buf.as_bytes()))
};
Ok(PrefFile {
path: path,
prefs: prefs
})
}
pub fn write(&self) -> IoResult<()> {
let mut f = try!(File::create(&self.path));
serialize(&self.prefs, &mut f)
}
pub fn insert_slice<K>(&mut self, preferences: &[(K, Pref)])
where K: Into<String> + Clone {
for &(ref name, ref value) in preferences.iter() {
self.insert((*name).clone(), (*value).clone());
}
}
pub fn insert<K>(&mut self, key: K, value: Pref)
where K: Into<String> {
self.prefs.insert(key.into(), value);
}
pub fn remove(&mut self, key: &str) -> Option<Pref> {
self.prefs.remove(key)
}
pub fn get(&mut self, key: &str) -> Option<&Pref> {
self.prefs.get(key)
}
pub fn contains_key(&self, key: &str) -> bool {
self.prefs.contains_key(key)
}
pub fn iter(&self) -> Iter<String, Pref> {
self.prefs.iter()
}
}
profile: provide Debug trait for Profile and PrefFile
use preferences::{Preferences, Pref};
use prefreader::{parse, serialize, PrefReaderError};
use std::collections::btree_map::Iter;
use std::fs::File;
use std::io::Result as IoResult;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use tempdir::TempDir;
#[derive(Debug)]
pub struct Profile {
pub path: PathBuf,
pub temp_dir: Option<TempDir>,
prefs: Option<PrefFile>,
user_prefs: Option<PrefFile>,
}
impl Profile {
pub fn new(opt_path: Option<&Path>) -> IoResult<Profile> {
let mut temp_dir = None;
let path = match opt_path {
Some(p) => p.to_path_buf(),
None => {
let dir = try!(TempDir::new("rust_mozprofile"));
let temp_path = dir.path().to_path_buf();
temp_dir = Some(dir);
temp_path
}
};
info!("Using profile path {}", path.to_str().unwrap());
Ok(Profile {
path: path,
temp_dir: temp_dir,
prefs: None,
user_prefs: None
})
}
pub fn prefs(&mut self) -> Result<&mut PrefFile, PrefReaderError> {
if self.prefs.is_none() {
let mut pref_path = PathBuf::from(&self.path);
pref_path.push("prefs.js");
self.prefs = Some(try!(PrefFile::new(pref_path)))
};
// This error handling doesn't make much sense
Ok(self.prefs.as_mut().unwrap())
}
pub fn user_prefs(&mut self) -> Result<&mut PrefFile, PrefReaderError> {
if self.user_prefs.is_none() {
let mut pref_path = PathBuf::from(&self.path);
pref_path.push("user.js");
self.user_prefs = Some(try!(PrefFile::new(pref_path)))
};
// This error handling doesn't make much sense
Ok(self.user_prefs.as_mut().unwrap())
}
}
#[derive(Debug)]
pub struct PrefFile {
path: PathBuf,
pub prefs: Preferences,
}
impl PrefFile {
pub fn new(path: PathBuf) -> Result<PrefFile, PrefReaderError> {
let prefs = if !path.exists() {
Preferences::new()
} else {
let mut f = try!(File::open(&path));
let mut buf = String::with_capacity(4096);
try!(f.read_to_string(&mut buf));
try!(parse(buf.as_bytes()))
};
Ok(PrefFile {
path: path,
prefs: prefs
})
}
pub fn write(&self) -> IoResult<()> {
let mut f = try!(File::create(&self.path));
serialize(&self.prefs, &mut f)
}
pub fn insert_slice<K>(&mut self, preferences: &[(K, Pref)])
where K: Into<String> + Clone {
for &(ref name, ref value) in preferences.iter() {
self.insert((*name).clone(), (*value).clone());
}
}
pub fn insert<K>(&mut self, key: K, value: Pref)
where K: Into<String> {
self.prefs.insert(key.into(), value);
}
pub fn remove(&mut self, key: &str) -> Option<Pref> {
self.prefs.remove(key)
}
pub fn get(&mut self, key: &str) -> Option<&Pref> {
self.prefs.get(key)
}
pub fn contains_key(&self, key: &str) -> bool {
self.prefs.contains_key(key)
}
pub fn iter(&self) -> Iter<String, Pref> {
self.prefs.iter()
}
}
|
use gl;
use std::{fmt, mem, ptr};
use std::collections::HashMap;
use std::sync::Arc;
use {Display, DisplayImpl};
struct Shader {
display: Arc<DisplayImpl>,
id: gl::types::GLuint,
}
impl Drop for Shader {
fn drop(&mut self) {
let id = self.id.clone();
self.display.context.exec(proc(gl, _state) {
gl.DeleteShader(id);
});
}
}
/// A combinaison of shaders linked together.
pub struct Program {
display: Arc<DisplayImpl>,
#[allow(dead_code)]
shaders: Vec<Arc<Shader>>,
id: gl::types::GLuint,
uniforms: Arc<HashMap<String, (gl::types::GLint, gl::types::GLenum, gl::types::GLint)>> // location, type and size of each uniform, ordered by name
}
/// Error that can be triggered when creating a `Program`.
#[deriving(Clone, Show)]
pub enum ProgramCreationError {
/// Error while compiling one of the shaders.
CompilationError(String),
/// Error while linking the program.
LinkingError(String),
/// `glCreateProgram` failed.
ProgramCreationFailure,
/// One of the request shader type is not supported by the backend.
///
/// Usually the case of geometry shaders.
ShaderTypeNotSupported,
}
impl Program {
/// Builds a new program.
///
/// A program is a group of shaders linked together.
///
/// # Parameters
///
/// - `vertex_shader`: Source code of the vertex shader.
/// - `fragment_shader`: Source code of the fragment shader.
/// - `geometry_shader`: Source code of the geometry shader.
///
/// # Example
///
/// ```no_run
/// # let display: glium::Display = unsafe { std::mem::uninitialized() };
/// # let vertex_source = ""; let fragment_source = ""; let geometry_source = "";
/// let program = glium::Program::new(&display, vertex_source, fragment_source, Some(geometry_source));
/// ```
///
#[experimental = "The list of shaders and the result error will probably change"]
pub fn new(display: &Display, vertex_shader: &str, fragment_shader: &str,
geometry_shader: Option<&str>) -> Result<Program, ProgramCreationError>
{
let mut shaders_store = Vec::new();
shaders_store.push(try!(build_shader(display, gl::VERTEX_SHADER, vertex_shader)));
match geometry_shader {
Some(gs) => shaders_store.push(try!(build_geometry_shader(display, gs))),
None => ()
}
shaders_store.push(try!(build_shader(display, gl::FRAGMENT_SHADER, fragment_shader)));
let mut shaders_ids = Vec::new();
for sh in shaders_store.iter() {
shaders_ids.push(sh.id);
}
let (tx, rx) = channel();
display.context.context.exec(proc(gl, _state) {
unsafe {
let id = gl.CreateProgram();
if id == 0 {
tx.send(Err(ProgramCreationFailure));
return;
}
// attaching shaders
for sh in shaders_ids.iter() {
gl.AttachShader(id, sh.clone());
}
// linking and checking for errors
gl.LinkProgram(id);
{ let mut link_success: gl::types::GLint = mem::uninitialized();
gl.GetProgramiv(id, gl::LINK_STATUS, &mut link_success);
if link_success == 0 {
match gl.GetError() {
gl::NO_ERROR => (),
gl::INVALID_VALUE => {
tx.send(Err(LinkingError(format!("glLinkProgram triggered GL_INVALID_VALUE"))));
return;
},
gl::INVALID_OPERATION => {
tx.send(Err(LinkingError(format!("glLinkProgram triggered GL_INVALID_OPERATION"))));
return;
},
_ => {
tx.send(Err(LinkingError(format!("glLinkProgram triggered an unknown error"))));
return;
}
};
let mut error_log_size: gl::types::GLint = mem::uninitialized();
gl.GetProgramiv(id, gl::INFO_LOG_LENGTH, &mut error_log_size);
let mut error_log: Vec<u8> = Vec::with_capacity(error_log_size as uint);
gl.GetProgramInfoLog(id, error_log_size, &mut error_log_size, error_log.as_mut_slice().as_mut_ptr() as *mut gl::types::GLchar);
error_log.set_len(error_log_size as uint);
let msg = String::from_utf8(error_log).unwrap();
tx.send(Err(LinkingError(msg)));
return;
}
}
tx.send(Ok(id));
}
});
let id = try!(rx.recv());
let (tx, rx) = channel();
display.context.context.exec(proc(gl, _state) {
unsafe {
// reflecting program uniforms
let mut uniforms = HashMap::new();
let mut active_uniforms: gl::types::GLint = mem::uninitialized();
gl.GetProgramiv(id, gl::ACTIVE_UNIFORMS, &mut active_uniforms);
for uniform_id in range(0, active_uniforms) {
let mut uniform_name_tmp: Vec<u8> = Vec::with_capacity(64);
let mut uniform_name_tmp_len = 63;
let mut data_type: gl::types::GLenum = mem::uninitialized();
let mut data_size: gl::types::GLint = mem::uninitialized();
gl.GetActiveUniform(id, uniform_id as gl::types::GLuint, uniform_name_tmp_len, &mut uniform_name_tmp_len, &mut data_size, &mut data_type, uniform_name_tmp.as_mut_slice().as_mut_ptr() as *mut gl::types::GLchar);
uniform_name_tmp.set_len(uniform_name_tmp_len as uint);
let uniform_name = String::from_utf8(uniform_name_tmp).unwrap();
let location = gl.GetUniformLocation(id, uniform_name.to_c_str().unwrap());
uniforms.insert(uniform_name, (location, data_type, data_size));
}
tx.send(Arc::new(uniforms));
}
});
Ok(Program {
display: display.context.clone(),
shaders: shaders_store,
id: id,
uniforms: rx.recv(),
})
}
}
impl fmt::Show for Program {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
(format!("Program #{}", self.id)).fmt(formatter)
}
}
pub fn get_program_id(program: &Program) -> gl::types::GLuint {
program.id
}
pub fn get_uniforms_locations(program: &Program) -> Arc<HashMap<String, (gl::types::GLint, gl::types::GLenum, gl::types::GLint)>> {
program.uniforms.clone()
}
impl Drop for Program {
fn drop(&mut self) {
let id = self.id.clone();
self.display.context.exec(proc(gl, state) {
if state.program == id {
gl.UseProgram(0);
state.program = 0;
}
gl.DeleteProgram(id);
});
}
}
/// Builds an individual shader.
fn build_shader<S: ToCStr>(display: &Display, shader_type: gl::types::GLenum, source_code: S)
-> Result<Arc<Shader>, ProgramCreationError>
{
let source_code = source_code.to_c_str();
let (tx, rx) = channel();
display.context.context.exec(proc(gl, _state) {
unsafe {
let id = gl.CreateShader(shader_type);
if id == 0 {
tx.send(Err(ShaderTypeNotSupported));
return;
}
gl.ShaderSource(id, 1, [ source_code.as_ptr() ].as_ptr(), ptr::null());
gl.CompileShader(id);
// checking compilation success
let compilation_success = {
let mut compilation_success: gl::types::GLint = mem::uninitialized();
gl.GetShaderiv(id, gl::COMPILE_STATUS, &mut compilation_success);
compilation_success
};
if compilation_success == 0 {
// compilation error
let mut error_log_size: gl::types::GLint = mem::uninitialized();
gl.GetShaderiv(id, gl::INFO_LOG_LENGTH, &mut error_log_size);
let mut error_log: Vec<u8> = Vec::with_capacity(error_log_size as uint);
gl.GetShaderInfoLog(id, error_log_size, &mut error_log_size, error_log.as_mut_slice().as_mut_ptr() as *mut gl::types::GLchar);
error_log.set_len(error_log_size as uint);
let msg = String::from_utf8(error_log).unwrap();
tx.send(Err(CompilationError(msg)));
return;
}
tx.send(Ok(id));
}
});
rx.recv().map(|id| {
Arc::new(Shader {
display: display.context.clone(),
id: id
})
})
}
#[cfg(any(target_os = "windows", target_os = "linux", target_os = "macos"))]
fn build_geometry_shader<S: ToCStr>(display: &Display, source_code: S)
-> Result<Arc<Shader>, ProgramCreationError>
{
build_shader(display, gl::GEOMETRY_SHADER, source_code)
}
#[cfg(target_os = "android")]
fn build_geometry_shader<S: ToCStr>(display: &Display, source_code: S)
-> Result<Arc<Shader>, ProgramCreationError>
{
Err(ShaderTypeNotSupported)
}
No longer use Arcs for Shaders
use gl;
use std::{fmt, mem, ptr};
use std::collections::HashMap;
use std::sync::Arc;
use {Display, DisplayImpl};
struct Shader {
display: Arc<DisplayImpl>,
id: gl::types::GLuint,
}
impl Drop for Shader {
fn drop(&mut self) {
let id = self.id.clone();
self.display.context.exec(proc(gl, _state) {
gl.DeleteShader(id);
});
}
}
/// A combinaison of shaders linked together.
pub struct Program {
display: Arc<DisplayImpl>,
#[allow(dead_code)]
shaders: Vec<Shader>,
id: gl::types::GLuint,
uniforms: Arc<HashMap<String, (gl::types::GLint, gl::types::GLenum, gl::types::GLint)>> // location, type and size of each uniform, ordered by name
}
/// Error that can be triggered when creating a `Program`.
#[deriving(Clone, Show)]
pub enum ProgramCreationError {
/// Error while compiling one of the shaders.
CompilationError(String),
/// Error while linking the program.
LinkingError(String),
/// `glCreateProgram` failed.
ProgramCreationFailure,
/// One of the request shader type is not supported by the backend.
///
/// Usually the case of geometry shaders.
ShaderTypeNotSupported,
}
impl Program {
/// Builds a new program.
///
/// A program is a group of shaders linked together.
///
/// # Parameters
///
/// - `vertex_shader`: Source code of the vertex shader.
/// - `fragment_shader`: Source code of the fragment shader.
/// - `geometry_shader`: Source code of the geometry shader.
///
/// # Example
///
/// ```no_run
/// # let display: glium::Display = unsafe { std::mem::uninitialized() };
/// # let vertex_source = ""; let fragment_source = ""; let geometry_source = "";
/// let program = glium::Program::new(&display, vertex_source, fragment_source, Some(geometry_source));
/// ```
///
#[experimental = "The list of shaders and the result error will probably change"]
pub fn new(display: &Display, vertex_shader: &str, fragment_shader: &str,
geometry_shader: Option<&str>) -> Result<Program, ProgramCreationError>
{
let mut shaders_store = Vec::new();
shaders_store.push(try!(build_shader(display, gl::VERTEX_SHADER, vertex_shader)));
match geometry_shader {
Some(gs) => shaders_store.push(try!(build_geometry_shader(display, gs))),
None => ()
}
shaders_store.push(try!(build_shader(display, gl::FRAGMENT_SHADER, fragment_shader)));
let mut shaders_ids = Vec::new();
for sh in shaders_store.iter() {
shaders_ids.push(sh.id);
}
let (tx, rx) = channel();
display.context.context.exec(proc(gl, _state) {
unsafe {
let id = gl.CreateProgram();
if id == 0 {
tx.send(Err(ProgramCreationFailure));
return;
}
// attaching shaders
for sh in shaders_ids.iter() {
gl.AttachShader(id, sh.clone());
}
// linking and checking for errors
gl.LinkProgram(id);
{ let mut link_success: gl::types::GLint = mem::uninitialized();
gl.GetProgramiv(id, gl::LINK_STATUS, &mut link_success);
if link_success == 0 {
match gl.GetError() {
gl::NO_ERROR => (),
gl::INVALID_VALUE => {
tx.send(Err(LinkingError(format!("glLinkProgram triggered GL_INVALID_VALUE"))));
return;
},
gl::INVALID_OPERATION => {
tx.send(Err(LinkingError(format!("glLinkProgram triggered GL_INVALID_OPERATION"))));
return;
},
_ => {
tx.send(Err(LinkingError(format!("glLinkProgram triggered an unknown error"))));
return;
}
};
let mut error_log_size: gl::types::GLint = mem::uninitialized();
gl.GetProgramiv(id, gl::INFO_LOG_LENGTH, &mut error_log_size);
let mut error_log: Vec<u8> = Vec::with_capacity(error_log_size as uint);
gl.GetProgramInfoLog(id, error_log_size, &mut error_log_size, error_log.as_mut_slice().as_mut_ptr() as *mut gl::types::GLchar);
error_log.set_len(error_log_size as uint);
let msg = String::from_utf8(error_log).unwrap();
tx.send(Err(LinkingError(msg)));
return;
}
}
tx.send(Ok(id));
}
});
let id = try!(rx.recv());
let (tx, rx) = channel();
display.context.context.exec(proc(gl, _state) {
unsafe {
// reflecting program uniforms
let mut uniforms = HashMap::new();
let mut active_uniforms: gl::types::GLint = mem::uninitialized();
gl.GetProgramiv(id, gl::ACTIVE_UNIFORMS, &mut active_uniforms);
for uniform_id in range(0, active_uniforms) {
let mut uniform_name_tmp: Vec<u8> = Vec::with_capacity(64);
let mut uniform_name_tmp_len = 63;
let mut data_type: gl::types::GLenum = mem::uninitialized();
let mut data_size: gl::types::GLint = mem::uninitialized();
gl.GetActiveUniform(id, uniform_id as gl::types::GLuint, uniform_name_tmp_len, &mut uniform_name_tmp_len, &mut data_size, &mut data_type, uniform_name_tmp.as_mut_slice().as_mut_ptr() as *mut gl::types::GLchar);
uniform_name_tmp.set_len(uniform_name_tmp_len as uint);
let uniform_name = String::from_utf8(uniform_name_tmp).unwrap();
let location = gl.GetUniformLocation(id, uniform_name.to_c_str().unwrap());
uniforms.insert(uniform_name, (location, data_type, data_size));
}
tx.send(Arc::new(uniforms));
}
});
Ok(Program {
display: display.context.clone(),
shaders: shaders_store,
id: id,
uniforms: rx.recv(),
})
}
}
impl fmt::Show for Program {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
(format!("Program #{}", self.id)).fmt(formatter)
}
}
pub fn get_program_id(program: &Program) -> gl::types::GLuint {
program.id
}
pub fn get_uniforms_locations(program: &Program) -> Arc<HashMap<String, (gl::types::GLint, gl::types::GLenum, gl::types::GLint)>> {
program.uniforms.clone()
}
impl Drop for Program {
fn drop(&mut self) {
let id = self.id.clone();
self.display.context.exec(proc(gl, state) {
if state.program == id {
gl.UseProgram(0);
state.program = 0;
}
gl.DeleteProgram(id);
});
}
}
/// Builds an individual shader.
fn build_shader<S: ToCStr>(display: &Display, shader_type: gl::types::GLenum, source_code: S)
-> Result<Shader, ProgramCreationError>
{
let source_code = source_code.to_c_str();
let (tx, rx) = channel();
display.context.context.exec(proc(gl, _state) {
unsafe {
let id = gl.CreateShader(shader_type);
if id == 0 {
tx.send(Err(ShaderTypeNotSupported));
return;
}
gl.ShaderSource(id, 1, [ source_code.as_ptr() ].as_ptr(), ptr::null());
gl.CompileShader(id);
// checking compilation success
let compilation_success = {
let mut compilation_success: gl::types::GLint = mem::uninitialized();
gl.GetShaderiv(id, gl::COMPILE_STATUS, &mut compilation_success);
compilation_success
};
if compilation_success == 0 {
// compilation error
let mut error_log_size: gl::types::GLint = mem::uninitialized();
gl.GetShaderiv(id, gl::INFO_LOG_LENGTH, &mut error_log_size);
let mut error_log: Vec<u8> = Vec::with_capacity(error_log_size as uint);
gl.GetShaderInfoLog(id, error_log_size, &mut error_log_size, error_log.as_mut_slice().as_mut_ptr() as *mut gl::types::GLchar);
error_log.set_len(error_log_size as uint);
let msg = String::from_utf8(error_log).unwrap();
tx.send(Err(CompilationError(msg)));
return;
}
tx.send(Ok(id));
}
});
rx.recv().map(|id| {
Shader {
display: display.context.clone(),
id: id
}
})
}
#[cfg(any(target_os = "windows", target_os = "linux", target_os = "macos"))]
fn build_geometry_shader<S: ToCStr>(display: &Display, source_code: S)
-> Result<Shader, ProgramCreationError>
{
build_shader(display, gl::GEOMETRY_SHADER, source_code)
}
#[cfg(target_os = "android")]
fn build_geometry_shader<S: ToCStr>(display: &Display, source_code: S)
-> Result<Shader, ProgramCreationError>
{
Err(ShaderTypeNotSupported)
}
|
use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error};
use crate::scopeguard::guard;
use crate::TryReserveError;
use core::alloc::Layout;
use core::hint;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
use core::ptr::NonNull;
cfg_if! {
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets
// at once instead of 8. We don't bother with AVX since it would require
// runtime dispatch and wouldn't gain us much anyways: the probability of
// finding a match drops off drastically after the first few buckets.
//
// I attempted an implementation on ARM using NEON instructions, but it
// turns out that most NEON instructions have multi-cycle latency, which in
// the end outweighs any gains over the generic implementation.
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
not(miri)
))] {
mod sse2;
use sse2 as imp;
} else {
#[path = "generic.rs"]
mod generic;
use generic as imp;
}
}
mod bitmask;
use self::bitmask::BitMask;
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
#[cfg(not(feature = "nightly"))]
#[inline]
fn likely(b: bool) -> bool {
b
}
#[cfg(not(feature = "nightly"))]
#[inline]
fn unlikely(b: bool) -> bool {
b
}
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
to.offset_from(from) as usize
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
(to as usize - from as usize) / mem::size_of::<T>()
}
/// Whether memory allocation errors should return an error or abort.
#[derive(Copy, Clone)]
enum Fallibility {
Fallible,
Infallible,
}
impl Fallibility {
/// Error to return on capacity overflow.
#[cfg_attr(feature = "inline-more", inline)]
fn capacity_overflow(self) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::CapacityOverflow,
Fallibility::Infallible => panic!("Hash table capacity overflow"),
}
}
/// Error to return on allocation error.
#[cfg_attr(feature = "inline-more", inline)]
fn alloc_err(self, layout: Layout) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::AllocError { layout },
Fallibility::Infallible => handle_alloc_error(layout),
}
}
}
/// Control byte value for an empty bucket.
const EMPTY: u8 = 0b1111_1111;
/// Control byte value for a deleted bucket.
const DELETED: u8 = 0b1000_0000;
/// Checks whether a control byte represents a full bucket (top bit is clear).
#[inline]
fn is_full(ctrl: u8) -> bool {
ctrl & 0x80 == 0
}
/// Checks whether a control byte represents a special value (top bit is set).
#[inline]
fn is_special(ctrl: u8) -> bool {
ctrl & 0x80 != 0
}
/// Checks whether a special control value is EMPTY (just check 1 bit).
#[inline]
fn special_is_empty(ctrl: u8) -> bool {
debug_assert!(is_special(ctrl));
ctrl & 0x01 != 0
}
/// Primary hash function, used to select the initial bucket to probe from.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h1(hash: u64) -> usize {
// On 32-bit platforms we simply ignore the higher hash bits.
hash as usize
}
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
let top7 = hash >> (hash_len * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
/// Probe sequence based on triangular numbers, which is guaranteed (since our
/// table size is a power of two) to visit every group of elements exactly once.
///
/// A triangular probe has us jump by 1 more group every time. So first we
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
///
/// Proof that the probe will visit every group in the table:
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
struct ProbeSeq {
bucket_mask: usize,
pos: usize,
stride: usize,
}
impl Iterator for ProbeSeq {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
// We should have found an empty bucket by now and ended the probe.
debug_assert!(
self.stride <= self.bucket_mask,
"Went past end of probe sequence"
);
let result = self.pos;
self.stride += Group::WIDTH;
self.pos += self.stride;
self.pos &= self.bucket_mask;
Some(result)
}
}
/// Returns the number of buckets needed to hold the given number of items,
/// taking the maximum load factor into account.
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
#[cfg_attr(target_os = "emscripten", inline(never))]
fn capacity_to_buckets(cap: usize) -> Option<usize> {
debug_assert_ne!(cap, 0);
// For small tables we require at least 1 empty bucket so that lookups are
// guaranteed to terminate if an element doesn't exist in the table.
if cap < 8 {
// We don't bother with a table size of 2 buckets since that can only
// hold a single element. Instead we skip directly to a 4 bucket table
// which can hold 3 elements.
return Some(if cap < 4 { 4 } else { 8 });
}
// Otherwise require 1/8 buckets to be empty (87.5% load)
//
// Be careful when modifying this, calculate_layout relies on the
// overflow check here.
let adjusted_cap = cap.checked_mul(8)? / 7;
// Any overflows will have been caught by the checked_mul. Also, any
// rounding errors from the division above will be cleaned up by
// next_power_of_two (which can't overflow because of the previous divison).
Some(adjusted_cap.next_power_of_two())
}
/// Returns the maximum effective capacity for the given bucket mask, taking
/// the maximum load factor into account.
#[cfg_attr(feature = "inline-more", inline)]
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
if bucket_mask < 8 {
// For tables with 1/2/4/8 buckets, we always reserve one empty slot.
// Keep in mind that the bucket mask is one less than the bucket count.
bucket_mask
} else {
// For larger tables we reserve 12.5% of the slots as empty.
((bucket_mask + 1) / 8) * 7
}
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Array of buckets
let data = Layout::array::<T>(buckets).ok()?;
// Array of control bytes. This must be aligned to the group size.
//
// We add `Group::WIDTH` control bytes at the end of the array which
// replicate the bytes at the start of the array and thus avoids the need to
// perform bounds-checking while probing.
//
// There is no possible overflow here since buckets is a power of two and
// Group::WIDTH is a small number.
let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
data.extend(ctrl).ok()
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(not(feature = "nightly"))]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Manual layout calculation since Layout methods are not yet stable.
let ctrl_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
let ctrl_offset = mem::size_of::<T>()
.checked_mul(buckets)?
.checked_add(ctrl_align - 1)?
& !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
))
}
/// A reference to a hash table bucket containing a `T`.
///
/// This is usually just a pointer to the element itself. However if the element
/// is a ZST, then we instead track the index of the element in the table so
/// that `erase` works properly.
pub struct Bucket<T> {
// Actually it is pointer to next element than element itself
// this is needed to maintain pointer arithmetic invariants
// keeping direct pointer to element introduces difficulty.
// Using `NonNull` for variance and niche layout
ptr: NonNull<T>,
}
// This Send impl is needed for rayon support. This is safe since Bucket is
// never exposed in a public API.
unsafe impl<T> Send for Bucket<T> {}
impl<T> Clone for Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self { ptr: self.ptr }
}
}
impl<T> Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
// won't overflow because index must be less than length
(index + 1) as *mut T
} else {
base.as_ptr().sub(index)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
if mem::size_of::<T>() == 0 {
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ptr(&self) -> *mut T {
if mem::size_of::<T>() == 0 {
// Just return an arbitrary ZST pointer which is properly aligned
mem::align_of::<T>() as *mut T
} else {
self.ptr.as_ptr().sub(1)
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(self.ptr.as_ptr() as usize + offset) as *mut T
} else {
self.ptr.as_ptr().sub(offset)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn read(&self) -> T {
self.as_ptr().read()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
}
}
/// A raw hash table with an unsafe API.
pub struct RawTable<T> {
// Mask to get an index from a hash value. The value is one less than the
// number of buckets in the table.
bucket_mask: usize,
// [Padding], T1, T2, ..., Tlast, C1, C2, ...
// ^ points here
ctrl: NonNull<u8>,
// Number of elements that can be inserted before we need to grow the table
growth_left: usize,
// Number of elements in the table, only really used by len()
items: usize,
// Tell dropck that we own instances of T.
marker: PhantomData<T>,
}
impl<T> RawTable<T> {
/// Creates a new empty hash table without allocating any memory.
///
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[cfg_attr(feature = "inline-more", inline)]
pub fn new() -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty().as_ptr() as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
marker: PhantomData,
}
}
/// Allocates a new hash table with the given number of buckets.
///
/// The control bytes are left uninitialized.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
buckets: usize,
fallability: Fallibility,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
let (layout, ctrl_offset) =
calculate_layout::<T>(buckets).ok_or_else(|| fallability.capacity_overflow())?;
let ptr = NonNull::new(alloc(layout)).ok_or_else(|| fallability.alloc_err(layout))?;
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
marker: PhantomData,
})
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
fn fallible_with_capacity(
capacity: usize,
fallability: Fallibility,
) -> Result<Self, TryReserveError> {
if capacity == 0 {
Ok(Self::new())
} else {
unsafe {
let buckets =
capacity_to_buckets(capacity).ok_or_else(|| fallability.capacity_overflow())?;
let result = Self::new_uninitialized(buckets, fallability)?;
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
}
}
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
#[cfg(feature = "raw")]
pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
Self::fallible_with_capacity(capacity, Fallibility::Fallible)
}
/// Allocates a new hash table with at least enough capacity for inserting
/// the given number of elements without reallocating.
pub fn with_capacity(capacity: usize) -> Self {
Self::fallible_with_capacity(capacity, Fallibility::Infallible)
.unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() })
}
/// Deallocates the table without dropping any entries.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
let (layout, ctrl_offset) =
calculate_layout::<T>(self.buckets()).unwrap_or_else(|| hint::unreachable_unchecked());
dealloc(self.ctrl.as_ptr().sub(ctrl_offset), layout);
}
/// Returns pointer to one past last element of data table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn data_end(&self) -> NonNull<T> {
NonNull::new_unchecked(self.ctrl.as_ptr() as *mut T)
}
/// Returns pointer to start of data table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
pub unsafe fn data_start(&self) -> *mut T {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
/// Returns the index of a bucket from a `Bucket`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
bucket.to_base_index(self.data_end())
}
/// Returns a pointer to a control byte.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
self.ctrl.as_ptr().add(index)
}
/// Returns a pointer to an element in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
debug_assert!(is_full(*self.ctrl(index)));
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
// If we are inside a continuous block of Group::WIDTH full or deleted
// cells then a probe window may have seen a full block when trying to
// insert. We therefore need to keep that block non-empty so that
// lookups will continue searching to the next probe window.
//
// Note that in this context `leading_zeros` refers to the bytes at the
// end of a group, while `trailing_zeros` refers to the bytes at the
// begining of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
self.set_ctrl(index, ctrl);
self.items -= 1;
}
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
item.drop();
}
/// Removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
self.erase_no_drop(&item);
item.read()
}
/// Returns an iterator for a probe sequence on the table.
///
/// This iterator never terminates, but is guaranteed to visit each bucket
/// group exactly once. The loop using `probe_seq` must terminate upon
/// reaching a group containing an empty bucket.
#[cfg_attr(feature = "inline-more", inline)]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
bucket_mask: self.bucket_mask,
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
}
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
// the array without using a branch:
// - If index >= Group::WIDTH then index == index2.
// - Otherwise index2 == self.bucket_mask + 1 + index.
//
// The very last replicated control byte is never actually read because
// we mask the initial index for unaligned loads, but we write it
// anyways because it makes the set_ctrl implementation simpler.
//
// If there are fewer buckets than Group::WIDTH then this code will
// replicate the buckets at the end of the trailing group. For example
// with 2 buckets and a group size of 4, the control bytes will look
// like this:
//
// Real | Replicated
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Searches for an empty or deleted bucket which is suitable for inserting
/// a new element.
///
/// There must be at least 1 empty bucket in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn find_insert_slot(&self, hash: u64) -> usize {
for pos in self.probe_seq(hash) {
unsafe {
let group = Group::load(self.ctrl(pos));
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
let result = (pos + bit) & self.bucket_mask;
// In tables smaller than the group width, trailing control
// bytes outside the range of the table are filled with
// EMPTY entries. These will unfortunately trigger a
// match, but once masked may point to a full bucket that
// is already occupied. We detect this situation here and
// perform a second scan starting at the begining of the
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
if unlikely(is_full(*self.ctrl(result))) {
debug_assert!(self.bucket_mask < Group::WIDTH);
debug_assert_ne!(pos, 0);
return Group::load_aligned(self.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
} else {
return result;
}
}
}
}
// probe_seq never returns.
unreachable!();
}
/// Marks all table buckets as empty without dropping their contents.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear_no_drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
}
}
self.items = 0;
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
// Ensure that the table is reset even if one of the drops panic
let self_ = guard(self, |self_| self_.clear_no_drop());
if mem::needs_drop::<T>() {
unsafe {
for item in self_.iter() {
item.drop();
}
}
}
}
/// Shrinks the table to fit `max(self.len(), min_size)` elements.
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
// Calculate the minimal number of elements that we need to reserve
// space for.
let min_size = usize::max(self.items, min_size);
if min_size == 0 {
*self = Self::new();
return;
}
// Calculate the number of buckets that we need for this number of
// elements. If the calculation overflows then the requested bucket
// count must be larger than what we have right and nothing needs to be
// done.
let min_buckets = match capacity_to_buckets(min_size) {
Some(buckets) => buckets,
None => return,
};
// If we have more buckets than we need, shrink the table.
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.items == 0 {
*self = Self::with_capacity(min_size)
} else {
self.resize(min_size, hasher, Fallibility::Infallible)
.unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
}
}
}
/// Ensures that at least `additional` items can be inserted into the table
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
if additional > self.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Infallible)
.unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
}
}
/// Tries to ensure that at least `additional` items can be inserted into
/// the table without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Fallible)
} else {
Ok(())
}
}
/// Out-of-line slow path for `reserve` and `try_reserve`.
#[cold]
#[inline(never)]
fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallability: Fallibility,
) -> Result<(), TryReserveError> {
let new_items = self
.items
.checked_add(additional)
.ok_or_else(|| fallability.capacity_overflow())?;
let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
if new_items <= full_capacity / 2 {
// Rehash in-place without re-allocating if we have plenty of spare
// capacity that is locked up due to DELETED entries.
self.rehash_in_place(hasher);
Ok(())
} else {
// Otherwise, conservatively resize to at least the next size up
// to avoid churning deletes into frequent rehashes.
self.resize(
usize::max(new_items, full_capacity + 1),
hasher,
fallability,
)
}
}
/// Rehashes the contents of the table in place (i.e. without changing the
/// allocation).
///
/// If `hasher` panics then some the table's contents may be lost.
fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
unsafe {
// Bulk convert all full control bytes to DELETED, and all DELETED
// control bytes to EMPTY. This effectively frees up all buckets
// containing a DELETED entry.
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
group.store_aligned(self.ctrl(i));
}
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
if self.buckets() < Group::WIDTH {
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
// If the hash function panics then properly clean up any elements
// that we haven't rehashed yet. We unfortunately can't preserve the
// element since we lost their hash and have no way of recovering it
// without risking another panic.
let mut guard = guard(self, |self_| {
if mem::needs_drop::<T>() {
for i in 0..self_.buckets() {
if *self_.ctrl(i) == DELETED {
self_.set_ctrl(i, EMPTY);
self_.bucket(i).drop();
self_.items -= 1;
}
}
}
self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
});
// At this point, DELETED elements are elements that we haven't
// rehashed yet. Find them and re-insert them at their ideal
// position.
'outer: for i in 0..guard.buckets() {
if *guard.ctrl(i) != DELETED {
continue;
}
'inner: loop {
// Hash the current item
let item = guard.bucket(i);
let hash = hasher(item.as_ref());
// Search for a suitable place to put it
let new_i = guard.find_insert_slot(hash);
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
// size. If both the new and old position fall within the
// same unaligned group, then there is no benefit in moving
// it and we can just continue to the next item.
let probe_index = |pos: usize| {
(pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask)
/ Group::WIDTH
};
if likely(probe_index(i) == probe_index(new_i)) {
guard.set_ctrl(i, h2(hash));
continue 'outer;
}
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = *guard.ctrl(new_i);
guard.set_ctrl(new_i, h2(hash));
if prev_ctrl == EMPTY {
// If the target slot is empty, simply move the current
// element into the new slot and clear the old control
// byte.
guard.set_ctrl(i, EMPTY);
guard.bucket(new_i).copy_from_nonoverlapping(&item);
continue 'outer;
} else {
// If the target slot is occupied, swap the two elements
// and then continue processing the element that we just
// swapped into the old slot.
debug_assert_eq!(prev_ctrl, DELETED);
mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
continue 'inner;
}
}
}
guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
mem::forget(guard);
}
}
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallability: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
debug_assert!(self.items <= capacity);
// Allocate and initialize the new table.
let mut new_table = Self::fallible_with_capacity(capacity, fallability)?;
new_table.growth_left -= self.items;
new_table.items = self.items;
// The hash function may panic, in which case we simply free the new
// table without dropping any elements that may have been copied into
// it.
//
// This guard is also used to free the old table on success, see
// the comment at the bottom of this function.
let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
if !new_table.is_empty_singleton() {
new_table.free_buckets();
}
});
// Copy all elements to the new table.
for item in self.iter() {
// This may panic.
let hash = hasher(item.as_ref());
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = new_table.find_insert_slot(hash);
new_table.set_ctrl(index, h2(hash));
new_table.bucket(index).copy_from_nonoverlapping(&item);
}
// We successfully copied all elements without panicking. Now replace
// self with the new table. The old table will have its memory freed but
// the items will not be dropped (since they have been moved into the
// new table).
mem::swap(self, &mut new_table);
Ok(())
}
}
/// Inserts a new element into the table.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
let mut index = self.find_insert_slot(hash);
// We can avoid growing the table once we have reached our load
// factor if we are replacing a tombstone. This works since the
// number of EMPTY slots does not change in this case.
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
index = self.find_insert_slot(hash);
}
let bucket = self.bucket(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
bucket
}
}
/// Inserts a new element into the table, without growing the table.
///
/// There must be enough space in the table to insert the new element.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rustc-internal-api")]
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
unsafe {
let index = self.find_insert_slot(hash);
let bucket = self.bucket(index);
// If we are replacing a DELETED entry then we don't need to update
// the load counter.
let old_ctrl = *self.ctrl(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
bucket
}
}
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
unsafe {
for pos in self.probe_seq(hash) {
let group = Group::load(self.ctrl(pos));
for bit in group.match_byte(h2(hash)) {
let index = (pos + bit) & self.bucket_mask;
let bucket = self.bucket(index);
if likely(eq(bucket.as_ref())) {
return Some(bucket);
}
}
if likely(group.match_empty().any_bit_set()) {
return None;
}
}
}
// probe_seq never returns.
unreachable!();
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the table might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
#[cfg_attr(feature = "inline-more", inline)]
pub fn capacity(&self) -> usize {
self.items + self.growth_left
}
/// Returns the number of elements in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn len(&self) -> usize {
self.items
}
/// Returns the number of buckets in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn buckets(&self) -> usize {
self.bucket_mask + 1
}
/// Returns the number of control bytes in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
}
/// Returns whether this table points to the empty singleton with a capacity
/// of 0.
#[cfg_attr(feature = "inline-more", inline)]
fn is_empty_singleton(&self) -> bool {
self.bucket_mask == 0
}
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter(&self) -> RawIter<T> {
let data = Bucket::from_base_index(self.data_end(), 0);
RawIter {
iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
items: self.items,
}
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory. It is up to the caller to ensure that the `RawTable`
/// outlives the `RawDrain`. Because we cannot make the `next` method unsafe
/// on the `RawDrain`, we have to make the `drain` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain(&mut self) -> RawDrain<'_, T> {
let iter = self.iter();
self.drain_iter_from(iter)
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory. It is up to the caller to ensure that the `RawTable`
/// outlives the `RawDrain`. Because we cannot make the `next` method unsafe
/// on the `RawDrain`, we have to make the `drain` method unsafe.
///
/// Iteration starts at the provided iterator's current location.
///
/// This method panics if the given iterator does not cover all items remaining in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
table: ManuallyDrop::new(mem::replace(self, Self::new())),
orig_table: NonNull::from(self),
marker: PhantomData,
}
}
/// Returns an iterator which consumes all elements from the table.
///
/// Iteration starts at the provided iterator's current location.
///
/// This method panics if the given iterator does not cover all items remaining in the table.
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T> {
debug_assert_eq!(iter.len(), self.len());
let alloc = self.into_alloc();
RawIntoIter {
iter,
alloc,
marker: PhantomData,
}
}
/// Converts the table into a raw allocation. The contents of the table
/// should be dropped using a `RawIter` before freeing the allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn into_alloc(self) -> Option<(NonNull<u8>, Layout)> {
let alloc = if self.is_empty_singleton() {
None
} else {
let (layout, ctrl_offset) = calculate_layout::<T>(self.buckets())
.unwrap_or_else(|| unsafe { hint::unreachable_unchecked() });
Some((
unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
))
};
mem::forget(self);
alloc
}
}
unsafe impl<T> Send for RawTable<T> where T: Send {}
unsafe impl<T> Sync for RawTable<T> where T: Sync {}
impl<T: Clone> Clone for RawTable<T> {
fn clone(&self) -> Self {
if self.is_empty_singleton() {
Self::new()
} else {
unsafe {
let mut new_table = ManuallyDrop::new(
Self::new_uninitialized(self.buckets(), Fallibility::Infallible)
.unwrap_or_else(|_| hint::unreachable_unchecked()),
);
new_table.clone_from_spec(self, |new_table| {
// We need to free the memory allocated for the new table.
new_table.free_buckets();
});
// Return the newly created table.
ManuallyDrop::into_inner(new_table)
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.is_empty_singleton() {
*self = Self::new();
} else {
unsafe {
// First, drop all our elements without clearing the control bytes.
if mem::needs_drop::<T>() {
for item in self.iter() {
item.drop();
}
}
// If necessary, resize our table to match the source.
if self.buckets() != source.buckets() {
// Skip our drop by using ptr::write.
if !self.is_empty_singleton() {
self.free_buckets();
}
(self as *mut Self).write(
Self::new_uninitialized(source.buckets(), Fallibility::Infallible)
.unwrap_or_else(|_| hint::unreachable_unchecked()),
);
}
self.clone_from_spec(source, |self_| {
// We need to leave the table in an empty state.
self_.clear_no_drop()
});
}
}
}
}
/// Specialization of `clone_from` for `Copy` types
trait RawTableClone {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
}
impl<T: Clone> RawTableClone for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
default_fn! {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
self.clone_from_impl(source, on_panic);
}
}
}
#[cfg(feature = "nightly")]
impl<T: Copy> RawTableClone for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
source
.ctrl(0)
.copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
source
.data_start()
.copy_to_nonoverlapping(self.data_start(), self.buckets());
self.items = source.items;
self.growth_left = source.growth_left;
}
}
impl<T: Clone> RawTable<T> {
/// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
// Copy the control bytes unchanged. We do this in a single pass
source
.ctrl(0)
.copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
// The cloning of elements may panic, in which case we need
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
if mem::needs_drop::<T>() {
for i in 0..=*index {
if is_full(*self_.ctrl(i)) {
self_.bucket(i).drop();
}
}
}
// Depending on whether we were called from clone or clone_from, we
// either need to free the memory for the destination table or just
// clear the control bytes.
on_panic(self_);
});
for from in source.iter() {
let index = source.bucket_index(&from);
let to = guard.1.bucket(index);
to.write(from.as_ref().clone());
// Update the index in case we need to unwind.
guard.0 = index;
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard);
self.items = source.items;
self.growth_left = source.growth_left;
}
/// Variant of `clone_from` to use when a hasher is available.
#[cfg(feature = "raw")]
pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
// If we have enough capacity in the table, just clear it and insert
// elements one by one. We don't do this if we have the same number of
// buckets as the source since we can just copy the contents directly
// in that case.
if self.buckets() != source.buckets()
&& bucket_mask_to_capacity(self.bucket_mask) >= source.len()
{
self.clear();
let guard_self = guard(&mut *self, |self_| {
// Clear the partially copied table if a panic occurs, otherwise
// items and growth_left will be out of sync with the contents
// of the table.
self_.clear();
});
unsafe {
for item in source.iter() {
// This may panic.
let item = item.as_ref().clone();
let hash = hasher(&item);
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = guard_self.find_insert_slot(hash);
guard_self.set_ctrl(index, h2(hash));
guard_self.bucket(index).write(item);
}
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard_self);
self.items = source.items;
self.growth_left -= source.items;
} else {
self.clone_from(source);
}
}
}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T> Drop for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
impl<T> IntoIterator for RawTable<T> {
type Item = T;
type IntoIter = RawIntoIter<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_iter(self) -> RawIntoIter<T> {
unsafe {
let iter = self.iter();
self.into_iter_from(iter)
}
}
}
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
/// not track an item count.
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
current_group: BitMask,
// Pointer to the buckets for the current group.
data: Bucket<T>,
// Pointer to the next group of control bytes,
// Must be aligned to the group size.
next_ctrl: *const u8,
// Pointer one past the last control byte of this range.
end: *const u8,
}
impl<T> RawIterRange<T> {
/// Returns a `RawIterRange` covering a subset of a table.
///
/// The control byte address must be aligned to the group size.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);
// Load the first group and advance ctrl to point to the next group
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
current_group,
data,
next_ctrl,
end,
}
}
/// Splits a `RawIterRange` into two halves.
///
/// Returns `None` if the remaining range is smaller than or equal to the
/// group width.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rayon")]
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
unsafe {
if self.end <= self.next_ctrl {
// Nothing to split if the group that we are current processing
// is the last one.
(self, None)
} else {
// len is the remaining number of elements after the group that
// we are currently processing. It must be a multiple of the
// group size (small tables are caught by the check above).
let len = offset_from(self.end, self.next_ctrl);
debug_assert_eq!(len % Group::WIDTH, 0);
// Split the remaining elements into two halves, but round the
// midpoint down in case there is an odd number of groups
// remaining. This ensures that:
// - The tail is at least 1 group long.
// - The split is roughly even considering we still have the
// current group to process.
let mid = (len / 2) & !(Group::WIDTH - 1);
let tail = Self::new(
self.next_ctrl.add(mid),
self.data.next_n(Group::WIDTH).next_n(mid),
len - mid,
);
debug_assert_eq!(
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
tail.data.ptr
);
debug_assert_eq!(self.end, tail.end);
self.end = self.next_ctrl.add(mid);
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
(self, Some(tail))
}
}
}
}
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
// in the actual iterator implementations determine the real Send/Sync bounds.
unsafe impl<T> Send for RawIterRange<T> {}
unsafe impl<T> Sync for RawIterRange<T> {}
impl<T> Clone for RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
next_ctrl: self.next_ctrl,
current_group: self.current_group,
end: self.end,
}
}
}
impl<T> Iterator for RawIterRange<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(index) = self.current_group.lowest_set_bit() {
self.current_group = self.current_group.remove_lowest_bit();
return Some(self.data.next_n(index));
}
if self.next_ctrl >= self.end {
return None;
}
// We might read past self.end up to the next group boundary,
// but this is fine because it only occurs on tables smaller
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
// We don't have an item count, so just guess based on the range size.
(
0,
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
)
}
}
impl<T> FusedIterator for RawIterRange<T> {}
/// Iterator which returns a raw pointer to every full bucket in the table.
pub struct RawIter<T> {
pub(crate) iter: RawIterRange<T>,
items: usize,
}
impl<T> RawIter<T> {
/// Refresh the iterator so that it reflects a removal from the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each removed bucket before `next` is called again.
///
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
pub fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
/// Refresh the iterator so that it reflects an insertion into the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each insert before `next` is called again.
///
/// This method does not guarantee that an insertion of a bucket witha greater
/// index than the last one yielded will be reflected in the iterator.
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
pub fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
unsafe {
if b.as_ptr() > self.iter.data.as_ptr() {
// The iterator has already passed the bucket's group.
// So the toggle isn't relevant to this iterator.
return;
}
if self.iter.next_ctrl < self.iter.end
&& b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
{
// The iterator has not yet reached the bucket's group.
// We don't need to reload anything, but we do need to adjust the item count.
if cfg!(debug_assertions) {
// Double-check that the user isn't lying to us by checking the bucket state.
// To do that, we need to find its control byte. We know that self.iter.data is
// at self.iter.next_ctrl - Group::WIDTH, so we work from there:
let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
// This method should be called _before_ a removal, or _after_ an insert,
// so in both cases the ctrl byte should indicate that the bucket is full.
assert!(is_full(*ctrl));
}
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
return;
}
// The iterator is at the bucket group that the toggled bucket is in.
// We need to do two things:
//
// - Determine if the iterator already yielded the toggled bucket.
// If it did, we're done.
// - Otherwise, update the iterator cached group so that it won't
// yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
// We'll also need ot update the item count accordingly.
if let Some(index) = self.iter.current_group.lowest_set_bit() {
let next_bucket = self.iter.data.next_n(index);
if b.as_ptr() > next_bucket.as_ptr() {
// The toggled bucket is "before" the bucket the iterator would yield next. We
// therefore don't need to do anything --- the iterator has already passed the
// bucket in question.
//
// The item count must already be correct, since a removal or insert "prior" to
// the iterator's position wouldn't affect the item count.
} else {
// The removed bucket is an upcoming bucket. We need to make sure it does _not_
// get yielded, and also that it's no longer included in the item count.
//
// NOTE: We can't just reload the group here, both since that might reflect
// inserts we've already passed, and because that might inadvertently unset the
// bits for _other_ removals. If we do that, we'd have to also decrement the
// item count for those other bits that we unset. But the presumably subsequent
// call to reflect for those buckets might _also_ decrement the item count.
// Instead, we _just_ flip the bit for the particular bucket the caller asked
// us to reflect.
let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let was_full = self.iter.current_group.flip(our_bit);
debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
if cfg!(debug_assertions) {
if b.as_ptr() == next_bucket.as_ptr() {
// The removed bucket should no longer be next
debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
} else {
// We should not have changed what bucket comes next.
debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
}
}
}
} else {
// We must have already iterated past the removed item.
}
}
}
}
impl<T> Clone for RawIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
items: self.items,
}
}
}
impl<T> Iterator for RawIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
if let Some(b) = self.iter.next() {
self.items -= 1;
Some(b)
} else {
// We don't check against items == 0 here to allow the
// compiler to optimize away the item count entirely if the
// iterator length is never queried.
debug_assert_eq!(self.items, 0);
None
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
/// Iterator which consumes a table and returns elements.
pub struct RawIntoIter<T> {
iter: RawIter<T>,
alloc: Option<(NonNull<u8>, Layout)>,
marker: PhantomData<T>,
}
impl<T> RawIntoIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T> Send for RawIntoIter<T> where T: Send {}
unsafe impl<T> Sync for RawIntoIter<T> where T: Sync {}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.alloc {
dealloc(ptr.as_ptr(), layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T> Drop for RawIntoIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.alloc {
dealloc(ptr.as_ptr(), layout);
}
}
}
}
impl<T> Iterator for RawIntoIter<T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe { Some(self.iter.next()?.read()) }
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T> ExactSizeIterator for RawIntoIter<T> {}
impl<T> FusedIterator for RawIntoIter<T> {}
/// Iterator which consumes elements without freeing the table storage.
pub struct RawDrain<'a, T> {
iter: RawIter<T>,
// The table is moved into the iterator for the duration of the drain. This
// ensures that an empty table is left if the drain iterator is leaked
// without dropping.
table: ManuallyDrop<RawTable<T>>,
orig_table: NonNull<RawTable<T>>,
// We don't use a &'a mut RawTable<T> because we want RawDrain to be
// covariant over T.
marker: PhantomData<&'a RawTable<T>>,
}
impl<T> RawDrain<'_, T> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T> Send for RawDrain<'_, T> where T: Send {}
unsafe impl<T> Sync for RawDrain<'_, T> where T: Sync {}
impl<T> Drop for RawDrain<'_, T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements. Note that this may panic.
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Reset the contents of the table now that all elements have been
// dropped.
self.table.clear_no_drop();
// Move the now empty table back to its original location.
self.orig_table
.as_ptr()
.copy_from_nonoverlapping(&*self.table, 1);
}
}
}
impl<T> Iterator for RawDrain<'_, T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe {
let item = self.iter.next()?;
Some(item.read())
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T> ExactSizeIterator for RawDrain<'_, T> {}
impl<T> FusedIterator for RawDrain<'_, T> {}
Clarify contract for iter_from methods
use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error};
use crate::scopeguard::guard;
use crate::TryReserveError;
use core::alloc::Layout;
use core::hint;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
use core::ptr::NonNull;
cfg_if! {
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets
// at once instead of 8. We don't bother with AVX since it would require
// runtime dispatch and wouldn't gain us much anyways: the probability of
// finding a match drops off drastically after the first few buckets.
//
// I attempted an implementation on ARM using NEON instructions, but it
// turns out that most NEON instructions have multi-cycle latency, which in
// the end outweighs any gains over the generic implementation.
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
not(miri)
))] {
mod sse2;
use sse2 as imp;
} else {
#[path = "generic.rs"]
mod generic;
use generic as imp;
}
}
mod bitmask;
use self::bitmask::BitMask;
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
#[cfg(not(feature = "nightly"))]
#[inline]
fn likely(b: bool) -> bool {
b
}
#[cfg(not(feature = "nightly"))]
#[inline]
fn unlikely(b: bool) -> bool {
b
}
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
to.offset_from(from) as usize
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
(to as usize - from as usize) / mem::size_of::<T>()
}
/// Whether memory allocation errors should return an error or abort.
#[derive(Copy, Clone)]
enum Fallibility {
Fallible,
Infallible,
}
impl Fallibility {
/// Error to return on capacity overflow.
#[cfg_attr(feature = "inline-more", inline)]
fn capacity_overflow(self) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::CapacityOverflow,
Fallibility::Infallible => panic!("Hash table capacity overflow"),
}
}
/// Error to return on allocation error.
#[cfg_attr(feature = "inline-more", inline)]
fn alloc_err(self, layout: Layout) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::AllocError { layout },
Fallibility::Infallible => handle_alloc_error(layout),
}
}
}
/// Control byte value for an empty bucket.
const EMPTY: u8 = 0b1111_1111;
/// Control byte value for a deleted bucket.
const DELETED: u8 = 0b1000_0000;
/// Checks whether a control byte represents a full bucket (top bit is clear).
#[inline]
fn is_full(ctrl: u8) -> bool {
ctrl & 0x80 == 0
}
/// Checks whether a control byte represents a special value (top bit is set).
#[inline]
fn is_special(ctrl: u8) -> bool {
ctrl & 0x80 != 0
}
/// Checks whether a special control value is EMPTY (just check 1 bit).
#[inline]
fn special_is_empty(ctrl: u8) -> bool {
debug_assert!(is_special(ctrl));
ctrl & 0x01 != 0
}
/// Primary hash function, used to select the initial bucket to probe from.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h1(hash: u64) -> usize {
// On 32-bit platforms we simply ignore the higher hash bits.
hash as usize
}
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
let top7 = hash >> (hash_len * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
/// Probe sequence based on triangular numbers, which is guaranteed (since our
/// table size is a power of two) to visit every group of elements exactly once.
///
/// A triangular probe has us jump by 1 more group every time. So first we
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
///
/// Proof that the probe will visit every group in the table:
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
struct ProbeSeq {
bucket_mask: usize,
pos: usize,
stride: usize,
}
impl Iterator for ProbeSeq {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
// We should have found an empty bucket by now and ended the probe.
debug_assert!(
self.stride <= self.bucket_mask,
"Went past end of probe sequence"
);
let result = self.pos;
self.stride += Group::WIDTH;
self.pos += self.stride;
self.pos &= self.bucket_mask;
Some(result)
}
}
/// Returns the number of buckets needed to hold the given number of items,
/// taking the maximum load factor into account.
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
#[cfg_attr(target_os = "emscripten", inline(never))]
fn capacity_to_buckets(cap: usize) -> Option<usize> {
debug_assert_ne!(cap, 0);
// For small tables we require at least 1 empty bucket so that lookups are
// guaranteed to terminate if an element doesn't exist in the table.
if cap < 8 {
// We don't bother with a table size of 2 buckets since that can only
// hold a single element. Instead we skip directly to a 4 bucket table
// which can hold 3 elements.
return Some(if cap < 4 { 4 } else { 8 });
}
// Otherwise require 1/8 buckets to be empty (87.5% load)
//
// Be careful when modifying this, calculate_layout relies on the
// overflow check here.
let adjusted_cap = cap.checked_mul(8)? / 7;
// Any overflows will have been caught by the checked_mul. Also, any
// rounding errors from the division above will be cleaned up by
// next_power_of_two (which can't overflow because of the previous divison).
Some(adjusted_cap.next_power_of_two())
}
/// Returns the maximum effective capacity for the given bucket mask, taking
/// the maximum load factor into account.
#[cfg_attr(feature = "inline-more", inline)]
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
if bucket_mask < 8 {
// For tables with 1/2/4/8 buckets, we always reserve one empty slot.
// Keep in mind that the bucket mask is one less than the bucket count.
bucket_mask
} else {
// For larger tables we reserve 12.5% of the slots as empty.
((bucket_mask + 1) / 8) * 7
}
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Array of buckets
let data = Layout::array::<T>(buckets).ok()?;
// Array of control bytes. This must be aligned to the group size.
//
// We add `Group::WIDTH` control bytes at the end of the array which
// replicate the bytes at the start of the array and thus avoids the need to
// perform bounds-checking while probing.
//
// There is no possible overflow here since buckets is a power of two and
// Group::WIDTH is a small number.
let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
data.extend(ctrl).ok()
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(not(feature = "nightly"))]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Manual layout calculation since Layout methods are not yet stable.
let ctrl_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
let ctrl_offset = mem::size_of::<T>()
.checked_mul(buckets)?
.checked_add(ctrl_align - 1)?
& !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
))
}
/// A reference to a hash table bucket containing a `T`.
///
/// This is usually just a pointer to the element itself. However if the element
/// is a ZST, then we instead track the index of the element in the table so
/// that `erase` works properly.
pub struct Bucket<T> {
// Actually it is pointer to next element than element itself
// this is needed to maintain pointer arithmetic invariants
// keeping direct pointer to element introduces difficulty.
// Using `NonNull` for variance and niche layout
ptr: NonNull<T>,
}
// This Send impl is needed for rayon support. This is safe since Bucket is
// never exposed in a public API.
unsafe impl<T> Send for Bucket<T> {}
impl<T> Clone for Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self { ptr: self.ptr }
}
}
impl<T> Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
// won't overflow because index must be less than length
(index + 1) as *mut T
} else {
base.as_ptr().sub(index)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
if mem::size_of::<T>() == 0 {
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ptr(&self) -> *mut T {
if mem::size_of::<T>() == 0 {
// Just return an arbitrary ZST pointer which is properly aligned
mem::align_of::<T>() as *mut T
} else {
self.ptr.as_ptr().sub(1)
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(self.ptr.as_ptr() as usize + offset) as *mut T
} else {
self.ptr.as_ptr().sub(offset)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn read(&self) -> T {
self.as_ptr().read()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
}
}
/// A raw hash table with an unsafe API.
pub struct RawTable<T> {
// Mask to get an index from a hash value. The value is one less than the
// number of buckets in the table.
bucket_mask: usize,
// [Padding], T1, T2, ..., Tlast, C1, C2, ...
// ^ points here
ctrl: NonNull<u8>,
// Number of elements that can be inserted before we need to grow the table
growth_left: usize,
// Number of elements in the table, only really used by len()
items: usize,
// Tell dropck that we own instances of T.
marker: PhantomData<T>,
}
impl<T> RawTable<T> {
/// Creates a new empty hash table without allocating any memory.
///
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[cfg_attr(feature = "inline-more", inline)]
pub fn new() -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty().as_ptr() as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
marker: PhantomData,
}
}
/// Allocates a new hash table with the given number of buckets.
///
/// The control bytes are left uninitialized.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
buckets: usize,
fallability: Fallibility,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
let (layout, ctrl_offset) =
calculate_layout::<T>(buckets).ok_or_else(|| fallability.capacity_overflow())?;
let ptr = NonNull::new(alloc(layout)).ok_or_else(|| fallability.alloc_err(layout))?;
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
marker: PhantomData,
})
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
fn fallible_with_capacity(
capacity: usize,
fallability: Fallibility,
) -> Result<Self, TryReserveError> {
if capacity == 0 {
Ok(Self::new())
} else {
unsafe {
let buckets =
capacity_to_buckets(capacity).ok_or_else(|| fallability.capacity_overflow())?;
let result = Self::new_uninitialized(buckets, fallability)?;
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
}
}
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
#[cfg(feature = "raw")]
pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
Self::fallible_with_capacity(capacity, Fallibility::Fallible)
}
/// Allocates a new hash table with at least enough capacity for inserting
/// the given number of elements without reallocating.
pub fn with_capacity(capacity: usize) -> Self {
Self::fallible_with_capacity(capacity, Fallibility::Infallible)
.unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() })
}
/// Deallocates the table without dropping any entries.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
let (layout, ctrl_offset) =
calculate_layout::<T>(self.buckets()).unwrap_or_else(|| hint::unreachable_unchecked());
dealloc(self.ctrl.as_ptr().sub(ctrl_offset), layout);
}
/// Returns pointer to one past last element of data table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn data_end(&self) -> NonNull<T> {
NonNull::new_unchecked(self.ctrl.as_ptr() as *mut T)
}
/// Returns pointer to start of data table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
pub unsafe fn data_start(&self) -> *mut T {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
/// Returns the index of a bucket from a `Bucket`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
bucket.to_base_index(self.data_end())
}
/// Returns a pointer to a control byte.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
self.ctrl.as_ptr().add(index)
}
/// Returns a pointer to an element in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
debug_assert!(is_full(*self.ctrl(index)));
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
// If we are inside a continuous block of Group::WIDTH full or deleted
// cells then a probe window may have seen a full block when trying to
// insert. We therefore need to keep that block non-empty so that
// lookups will continue searching to the next probe window.
//
// Note that in this context `leading_zeros` refers to the bytes at the
// end of a group, while `trailing_zeros` refers to the bytes at the
// begining of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
self.set_ctrl(index, ctrl);
self.items -= 1;
}
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
item.drop();
}
/// Removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
self.erase_no_drop(&item);
item.read()
}
/// Returns an iterator for a probe sequence on the table.
///
/// This iterator never terminates, but is guaranteed to visit each bucket
/// group exactly once. The loop using `probe_seq` must terminate upon
/// reaching a group containing an empty bucket.
#[cfg_attr(feature = "inline-more", inline)]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
bucket_mask: self.bucket_mask,
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
}
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
// the array without using a branch:
// - If index >= Group::WIDTH then index == index2.
// - Otherwise index2 == self.bucket_mask + 1 + index.
//
// The very last replicated control byte is never actually read because
// we mask the initial index for unaligned loads, but we write it
// anyways because it makes the set_ctrl implementation simpler.
//
// If there are fewer buckets than Group::WIDTH then this code will
// replicate the buckets at the end of the trailing group. For example
// with 2 buckets and a group size of 4, the control bytes will look
// like this:
//
// Real | Replicated
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Searches for an empty or deleted bucket which is suitable for inserting
/// a new element.
///
/// There must be at least 1 empty bucket in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn find_insert_slot(&self, hash: u64) -> usize {
for pos in self.probe_seq(hash) {
unsafe {
let group = Group::load(self.ctrl(pos));
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
let result = (pos + bit) & self.bucket_mask;
// In tables smaller than the group width, trailing control
// bytes outside the range of the table are filled with
// EMPTY entries. These will unfortunately trigger a
// match, but once masked may point to a full bucket that
// is already occupied. We detect this situation here and
// perform a second scan starting at the begining of the
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
if unlikely(is_full(*self.ctrl(result))) {
debug_assert!(self.bucket_mask < Group::WIDTH);
debug_assert_ne!(pos, 0);
return Group::load_aligned(self.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
} else {
return result;
}
}
}
}
// probe_seq never returns.
unreachable!();
}
/// Marks all table buckets as empty without dropping their contents.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear_no_drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
}
}
self.items = 0;
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
// Ensure that the table is reset even if one of the drops panic
let self_ = guard(self, |self_| self_.clear_no_drop());
if mem::needs_drop::<T>() {
unsafe {
for item in self_.iter() {
item.drop();
}
}
}
}
/// Shrinks the table to fit `max(self.len(), min_size)` elements.
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
// Calculate the minimal number of elements that we need to reserve
// space for.
let min_size = usize::max(self.items, min_size);
if min_size == 0 {
*self = Self::new();
return;
}
// Calculate the number of buckets that we need for this number of
// elements. If the calculation overflows then the requested bucket
// count must be larger than what we have right and nothing needs to be
// done.
let min_buckets = match capacity_to_buckets(min_size) {
Some(buckets) => buckets,
None => return,
};
// If we have more buckets than we need, shrink the table.
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.items == 0 {
*self = Self::with_capacity(min_size)
} else {
self.resize(min_size, hasher, Fallibility::Infallible)
.unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
}
}
}
/// Ensures that at least `additional` items can be inserted into the table
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
if additional > self.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Infallible)
.unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
}
}
/// Tries to ensure that at least `additional` items can be inserted into
/// the table without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Fallible)
} else {
Ok(())
}
}
/// Out-of-line slow path for `reserve` and `try_reserve`.
#[cold]
#[inline(never)]
fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallability: Fallibility,
) -> Result<(), TryReserveError> {
let new_items = self
.items
.checked_add(additional)
.ok_or_else(|| fallability.capacity_overflow())?;
let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
if new_items <= full_capacity / 2 {
// Rehash in-place without re-allocating if we have plenty of spare
// capacity that is locked up due to DELETED entries.
self.rehash_in_place(hasher);
Ok(())
} else {
// Otherwise, conservatively resize to at least the next size up
// to avoid churning deletes into frequent rehashes.
self.resize(
usize::max(new_items, full_capacity + 1),
hasher,
fallability,
)
}
}
/// Rehashes the contents of the table in place (i.e. without changing the
/// allocation).
///
/// If `hasher` panics then some the table's contents may be lost.
fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
unsafe {
// Bulk convert all full control bytes to DELETED, and all DELETED
// control bytes to EMPTY. This effectively frees up all buckets
// containing a DELETED entry.
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
group.store_aligned(self.ctrl(i));
}
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
if self.buckets() < Group::WIDTH {
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
// If the hash function panics then properly clean up any elements
// that we haven't rehashed yet. We unfortunately can't preserve the
// element since we lost their hash and have no way of recovering it
// without risking another panic.
let mut guard = guard(self, |self_| {
if mem::needs_drop::<T>() {
for i in 0..self_.buckets() {
if *self_.ctrl(i) == DELETED {
self_.set_ctrl(i, EMPTY);
self_.bucket(i).drop();
self_.items -= 1;
}
}
}
self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
});
// At this point, DELETED elements are elements that we haven't
// rehashed yet. Find them and re-insert them at their ideal
// position.
'outer: for i in 0..guard.buckets() {
if *guard.ctrl(i) != DELETED {
continue;
}
'inner: loop {
// Hash the current item
let item = guard.bucket(i);
let hash = hasher(item.as_ref());
// Search for a suitable place to put it
let new_i = guard.find_insert_slot(hash);
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
// size. If both the new and old position fall within the
// same unaligned group, then there is no benefit in moving
// it and we can just continue to the next item.
let probe_index = |pos: usize| {
(pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask)
/ Group::WIDTH
};
if likely(probe_index(i) == probe_index(new_i)) {
guard.set_ctrl(i, h2(hash));
continue 'outer;
}
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = *guard.ctrl(new_i);
guard.set_ctrl(new_i, h2(hash));
if prev_ctrl == EMPTY {
// If the target slot is empty, simply move the current
// element into the new slot and clear the old control
// byte.
guard.set_ctrl(i, EMPTY);
guard.bucket(new_i).copy_from_nonoverlapping(&item);
continue 'outer;
} else {
// If the target slot is occupied, swap the two elements
// and then continue processing the element that we just
// swapped into the old slot.
debug_assert_eq!(prev_ctrl, DELETED);
mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
continue 'inner;
}
}
}
guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
mem::forget(guard);
}
}
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallability: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
debug_assert!(self.items <= capacity);
// Allocate and initialize the new table.
let mut new_table = Self::fallible_with_capacity(capacity, fallability)?;
new_table.growth_left -= self.items;
new_table.items = self.items;
// The hash function may panic, in which case we simply free the new
// table without dropping any elements that may have been copied into
// it.
//
// This guard is also used to free the old table on success, see
// the comment at the bottom of this function.
let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
if !new_table.is_empty_singleton() {
new_table.free_buckets();
}
});
// Copy all elements to the new table.
for item in self.iter() {
// This may panic.
let hash = hasher(item.as_ref());
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = new_table.find_insert_slot(hash);
new_table.set_ctrl(index, h2(hash));
new_table.bucket(index).copy_from_nonoverlapping(&item);
}
// We successfully copied all elements without panicking. Now replace
// self with the new table. The old table will have its memory freed but
// the items will not be dropped (since they have been moved into the
// new table).
mem::swap(self, &mut new_table);
Ok(())
}
}
/// Inserts a new element into the table.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
let mut index = self.find_insert_slot(hash);
// We can avoid growing the table once we have reached our load
// factor if we are replacing a tombstone. This works since the
// number of EMPTY slots does not change in this case.
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
index = self.find_insert_slot(hash);
}
let bucket = self.bucket(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
bucket
}
}
/// Inserts a new element into the table, without growing the table.
///
/// There must be enough space in the table to insert the new element.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rustc-internal-api")]
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
unsafe {
let index = self.find_insert_slot(hash);
let bucket = self.bucket(index);
// If we are replacing a DELETED entry then we don't need to update
// the load counter.
let old_ctrl = *self.ctrl(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
bucket
}
}
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
unsafe {
for pos in self.probe_seq(hash) {
let group = Group::load(self.ctrl(pos));
for bit in group.match_byte(h2(hash)) {
let index = (pos + bit) & self.bucket_mask;
let bucket = self.bucket(index);
if likely(eq(bucket.as_ref())) {
return Some(bucket);
}
}
if likely(group.match_empty().any_bit_set()) {
return None;
}
}
}
// probe_seq never returns.
unreachable!();
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the table might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
#[cfg_attr(feature = "inline-more", inline)]
pub fn capacity(&self) -> usize {
self.items + self.growth_left
}
/// Returns the number of elements in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn len(&self) -> usize {
self.items
}
/// Returns the number of buckets in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn buckets(&self) -> usize {
self.bucket_mask + 1
}
/// Returns the number of control bytes in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
}
/// Returns whether this table points to the empty singleton with a capacity
/// of 0.
#[cfg_attr(feature = "inline-more", inline)]
fn is_empty_singleton(&self) -> bool {
self.bucket_mask == 0
}
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter(&self) -> RawIter<T> {
let data = Bucket::from_base_index(self.data_end(), 0);
RawIter {
iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
items: self.items,
}
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory.
///
/// It is up to the caller to ensure that the `RawTable` outlives the `RawDrain`.
/// Because we cannot make the `next` method unsafe on the `RawDrain`,
/// we have to make the `drain` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain(&mut self) -> RawDrain<'_, T> {
let iter = self.iter();
self.drain_iter_from(iter)
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory.
///
/// It is up to the caller to ensure that the `RawTable` outlives the `RawDrain`.
/// Because we cannot make the `next` method unsafe on the `RawDrain`,
/// we have to make the `drain` method unsafe.
///
/// Iteration starts at the provided iterator's current location.
/// You must ensure that the iterator covers all items that remain in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
table: ManuallyDrop::new(mem::replace(self, Self::new())),
orig_table: NonNull::from(self),
marker: PhantomData,
}
}
/// Returns an iterator which consumes all elements from the table.
///
/// It is up to the caller to ensure that the `RawTable` outlives the `RawIntoIter`.
/// Because we cannot make the `next` method unsafe on the `RawIntoIter`,
/// we have to make the `into_iter_from` method unsafe.
///
/// Iteration starts at the provided iterator's current location.
/// You must ensure that the iterator covers all items that remain in the table.
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T> {
debug_assert_eq!(iter.len(), self.len());
let alloc = self.into_alloc();
RawIntoIter {
iter,
alloc,
marker: PhantomData,
}
}
/// Converts the table into a raw allocation. The contents of the table
/// should be dropped using a `RawIter` before freeing the allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn into_alloc(self) -> Option<(NonNull<u8>, Layout)> {
let alloc = if self.is_empty_singleton() {
None
} else {
let (layout, ctrl_offset) = calculate_layout::<T>(self.buckets())
.unwrap_or_else(|| unsafe { hint::unreachable_unchecked() });
Some((
unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
))
};
mem::forget(self);
alloc
}
}
unsafe impl<T> Send for RawTable<T> where T: Send {}
unsafe impl<T> Sync for RawTable<T> where T: Sync {}
impl<T: Clone> Clone for RawTable<T> {
fn clone(&self) -> Self {
if self.is_empty_singleton() {
Self::new()
} else {
unsafe {
let mut new_table = ManuallyDrop::new(
Self::new_uninitialized(self.buckets(), Fallibility::Infallible)
.unwrap_or_else(|_| hint::unreachable_unchecked()),
);
new_table.clone_from_spec(self, |new_table| {
// We need to free the memory allocated for the new table.
new_table.free_buckets();
});
// Return the newly created table.
ManuallyDrop::into_inner(new_table)
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.is_empty_singleton() {
*self = Self::new();
} else {
unsafe {
// First, drop all our elements without clearing the control bytes.
if mem::needs_drop::<T>() {
for item in self.iter() {
item.drop();
}
}
// If necessary, resize our table to match the source.
if self.buckets() != source.buckets() {
// Skip our drop by using ptr::write.
if !self.is_empty_singleton() {
self.free_buckets();
}
(self as *mut Self).write(
Self::new_uninitialized(source.buckets(), Fallibility::Infallible)
.unwrap_or_else(|_| hint::unreachable_unchecked()),
);
}
self.clone_from_spec(source, |self_| {
// We need to leave the table in an empty state.
self_.clear_no_drop()
});
}
}
}
}
/// Specialization of `clone_from` for `Copy` types
trait RawTableClone {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
}
impl<T: Clone> RawTableClone for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
default_fn! {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
self.clone_from_impl(source, on_panic);
}
}
}
#[cfg(feature = "nightly")]
impl<T: Copy> RawTableClone for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
source
.ctrl(0)
.copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
source
.data_start()
.copy_to_nonoverlapping(self.data_start(), self.buckets());
self.items = source.items;
self.growth_left = source.growth_left;
}
}
impl<T: Clone> RawTable<T> {
/// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
// Copy the control bytes unchanged. We do this in a single pass
source
.ctrl(0)
.copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
// The cloning of elements may panic, in which case we need
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
if mem::needs_drop::<T>() {
for i in 0..=*index {
if is_full(*self_.ctrl(i)) {
self_.bucket(i).drop();
}
}
}
// Depending on whether we were called from clone or clone_from, we
// either need to free the memory for the destination table or just
// clear the control bytes.
on_panic(self_);
});
for from in source.iter() {
let index = source.bucket_index(&from);
let to = guard.1.bucket(index);
to.write(from.as_ref().clone());
// Update the index in case we need to unwind.
guard.0 = index;
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard);
self.items = source.items;
self.growth_left = source.growth_left;
}
/// Variant of `clone_from` to use when a hasher is available.
#[cfg(feature = "raw")]
pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
// If we have enough capacity in the table, just clear it and insert
// elements one by one. We don't do this if we have the same number of
// buckets as the source since we can just copy the contents directly
// in that case.
if self.buckets() != source.buckets()
&& bucket_mask_to_capacity(self.bucket_mask) >= source.len()
{
self.clear();
let guard_self = guard(&mut *self, |self_| {
// Clear the partially copied table if a panic occurs, otherwise
// items and growth_left will be out of sync with the contents
// of the table.
self_.clear();
});
unsafe {
for item in source.iter() {
// This may panic.
let item = item.as_ref().clone();
let hash = hasher(&item);
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = guard_self.find_insert_slot(hash);
guard_self.set_ctrl(index, h2(hash));
guard_self.bucket(index).write(item);
}
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard_self);
self.items = source.items;
self.growth_left -= source.items;
} else {
self.clone_from(source);
}
}
}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T> Drop for RawTable<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
impl<T> IntoIterator for RawTable<T> {
type Item = T;
type IntoIter = RawIntoIter<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_iter(self) -> RawIntoIter<T> {
unsafe {
let iter = self.iter();
self.into_iter_from(iter)
}
}
}
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
/// not track an item count.
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
current_group: BitMask,
// Pointer to the buckets for the current group.
data: Bucket<T>,
// Pointer to the next group of control bytes,
// Must be aligned to the group size.
next_ctrl: *const u8,
// Pointer one past the last control byte of this range.
end: *const u8,
}
impl<T> RawIterRange<T> {
/// Returns a `RawIterRange` covering a subset of a table.
///
/// The control byte address must be aligned to the group size.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);
// Load the first group and advance ctrl to point to the next group
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
current_group,
data,
next_ctrl,
end,
}
}
/// Splits a `RawIterRange` into two halves.
///
/// Returns `None` if the remaining range is smaller than or equal to the
/// group width.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rayon")]
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
unsafe {
if self.end <= self.next_ctrl {
// Nothing to split if the group that we are current processing
// is the last one.
(self, None)
} else {
// len is the remaining number of elements after the group that
// we are currently processing. It must be a multiple of the
// group size (small tables are caught by the check above).
let len = offset_from(self.end, self.next_ctrl);
debug_assert_eq!(len % Group::WIDTH, 0);
// Split the remaining elements into two halves, but round the
// midpoint down in case there is an odd number of groups
// remaining. This ensures that:
// - The tail is at least 1 group long.
// - The split is roughly even considering we still have the
// current group to process.
let mid = (len / 2) & !(Group::WIDTH - 1);
let tail = Self::new(
self.next_ctrl.add(mid),
self.data.next_n(Group::WIDTH).next_n(mid),
len - mid,
);
debug_assert_eq!(
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
tail.data.ptr
);
debug_assert_eq!(self.end, tail.end);
self.end = self.next_ctrl.add(mid);
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
(self, Some(tail))
}
}
}
}
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
// in the actual iterator implementations determine the real Send/Sync bounds.
unsafe impl<T> Send for RawIterRange<T> {}
unsafe impl<T> Sync for RawIterRange<T> {}
impl<T> Clone for RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
next_ctrl: self.next_ctrl,
current_group: self.current_group,
end: self.end,
}
}
}
impl<T> Iterator for RawIterRange<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(index) = self.current_group.lowest_set_bit() {
self.current_group = self.current_group.remove_lowest_bit();
return Some(self.data.next_n(index));
}
if self.next_ctrl >= self.end {
return None;
}
// We might read past self.end up to the next group boundary,
// but this is fine because it only occurs on tables smaller
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
// We don't have an item count, so just guess based on the range size.
(
0,
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
)
}
}
impl<T> FusedIterator for RawIterRange<T> {}
/// Iterator which returns a raw pointer to every full bucket in the table.
pub struct RawIter<T> {
pub(crate) iter: RawIterRange<T>,
items: usize,
}
impl<T> RawIter<T> {
/// Refresh the iterator so that it reflects a removal from the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each removed bucket before `next` is called again.
///
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
pub fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
/// Refresh the iterator so that it reflects an insertion into the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each insert before `next` is called again.
///
/// This method does not guarantee that an insertion of a bucket witha greater
/// index than the last one yielded will be reflected in the iterator.
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
pub fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
unsafe {
if b.as_ptr() > self.iter.data.as_ptr() {
// The iterator has already passed the bucket's group.
// So the toggle isn't relevant to this iterator.
return;
}
if self.iter.next_ctrl < self.iter.end
&& b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
{
// The iterator has not yet reached the bucket's group.
// We don't need to reload anything, but we do need to adjust the item count.
if cfg!(debug_assertions) {
// Double-check that the user isn't lying to us by checking the bucket state.
// To do that, we need to find its control byte. We know that self.iter.data is
// at self.iter.next_ctrl - Group::WIDTH, so we work from there:
let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
// This method should be called _before_ a removal, or _after_ an insert,
// so in both cases the ctrl byte should indicate that the bucket is full.
assert!(is_full(*ctrl));
}
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
return;
}
// The iterator is at the bucket group that the toggled bucket is in.
// We need to do two things:
//
// - Determine if the iterator already yielded the toggled bucket.
// If it did, we're done.
// - Otherwise, update the iterator cached group so that it won't
// yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
// We'll also need ot update the item count accordingly.
if let Some(index) = self.iter.current_group.lowest_set_bit() {
let next_bucket = self.iter.data.next_n(index);
if b.as_ptr() > next_bucket.as_ptr() {
// The toggled bucket is "before" the bucket the iterator would yield next. We
// therefore don't need to do anything --- the iterator has already passed the
// bucket in question.
//
// The item count must already be correct, since a removal or insert "prior" to
// the iterator's position wouldn't affect the item count.
} else {
// The removed bucket is an upcoming bucket. We need to make sure it does _not_
// get yielded, and also that it's no longer included in the item count.
//
// NOTE: We can't just reload the group here, both since that might reflect
// inserts we've already passed, and because that might inadvertently unset the
// bits for _other_ removals. If we do that, we'd have to also decrement the
// item count for those other bits that we unset. But the presumably subsequent
// call to reflect for those buckets might _also_ decrement the item count.
// Instead, we _just_ flip the bit for the particular bucket the caller asked
// us to reflect.
let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let was_full = self.iter.current_group.flip(our_bit);
debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
if cfg!(debug_assertions) {
if b.as_ptr() == next_bucket.as_ptr() {
// The removed bucket should no longer be next
debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
} else {
// We should not have changed what bucket comes next.
debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
}
}
}
} else {
// We must have already iterated past the removed item.
}
}
}
}
impl<T> Clone for RawIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
items: self.items,
}
}
}
impl<T> Iterator for RawIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
if let Some(b) = self.iter.next() {
self.items -= 1;
Some(b)
} else {
// We don't check against items == 0 here to allow the
// compiler to optimize away the item count entirely if the
// iterator length is never queried.
debug_assert_eq!(self.items, 0);
None
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
/// Iterator which consumes a table and returns elements.
pub struct RawIntoIter<T> {
iter: RawIter<T>,
alloc: Option<(NonNull<u8>, Layout)>,
marker: PhantomData<T>,
}
impl<T> RawIntoIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T> Send for RawIntoIter<T> where T: Send {}
unsafe impl<T> Sync for RawIntoIter<T> where T: Sync {}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.alloc {
dealloc(ptr.as_ptr(), layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T> Drop for RawIntoIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.alloc {
dealloc(ptr.as_ptr(), layout);
}
}
}
}
impl<T> Iterator for RawIntoIter<T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe { Some(self.iter.next()?.read()) }
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T> ExactSizeIterator for RawIntoIter<T> {}
impl<T> FusedIterator for RawIntoIter<T> {}
/// Iterator which consumes elements without freeing the table storage.
pub struct RawDrain<'a, T> {
iter: RawIter<T>,
// The table is moved into the iterator for the duration of the drain. This
// ensures that an empty table is left if the drain iterator is leaked
// without dropping.
table: ManuallyDrop<RawTable<T>>,
orig_table: NonNull<RawTable<T>>,
// We don't use a &'a mut RawTable<T> because we want RawDrain to be
// covariant over T.
marker: PhantomData<&'a RawTable<T>>,
}
impl<T> RawDrain<'_, T> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T> Send for RawDrain<'_, T> where T: Send {}
unsafe impl<T> Sync for RawDrain<'_, T> where T: Sync {}
impl<T> Drop for RawDrain<'_, T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements. Note that this may panic.
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Reset the contents of the table now that all elements have been
// dropped.
self.table.clear_no_drop();
// Move the now empty table back to its original location.
self.orig_table
.as_ptr()
.copy_from_nonoverlapping(&*self.table, 1);
}
}
}
impl<T> Iterator for RawDrain<'_, T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe {
let item = self.iter.next()?;
Some(item.read())
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T> ExactSizeIterator for RawDrain<'_, T> {}
impl<T> FusedIterator for RawDrain<'_, T> {}
|
/*!
Operations on raw finite state transducers.
This sub-module exposes the guts of a finite state transducer. Many parts of
it, such as construction and traversal, are mirrored in the `set` and `map`
sub-modules. Other parts of it, such as direct access to nodes and transitions
in the transducer, do not have any analog.
# Overview of types
`Fst` is a read only interface to pre-constructed finite state transducers.
`Node` is a read only interface to a single node in a transducer. `Builder` is
used to create new finite state transducers. (Once a transducer is created, it
can never be modified.) `Stream` is a stream of all inputs and outputs in a
transducer. `StreamBuilder` builds range queries. `OpBuilder` collects streams
and executes set operations like `union` or `intersection` on them with the
option of specifying a merge strategy for output values.
Most of the rest of the types are streams from set operations.
*/
use std::cmp;
use std::fmt;
use crate::automaton::{AlwaysMatch, Automaton};
use crate::bytes;
use crate::error::Result;
use crate::stream::{IntoStreamer, Streamer};
pub use crate::raw::build::Builder;
pub use crate::raw::error::Error;
pub use crate::raw::node::{Node, Transitions};
pub use crate::raw::ops::{
Difference, IndexedValue, Intersection, OpBuilder, SymmetricDifference,
Union,
};
mod build;
mod common_inputs;
mod counting_writer;
mod crc32;
mod crc32_table;
mod error;
mod node;
mod ops;
mod registry;
mod registry_minimal;
#[cfg(test)]
mod tests;
/// The API version of this crate.
///
/// This version number is written to every finite state transducer created by
/// this crate. When a finite state transducer is read, its version number is
/// checked against this value.
///
/// Currently, any version mismatch results in an error. Fixing this requires
/// regenerating the finite state transducer or switching to a version of this
/// crate that is compatible with the serialized transducer. This particular
/// behavior may be relaxed in future versions.
pub const VERSION: u64 = 3;
/// A sentinel value used to indicate an empty final state.
const EMPTY_ADDRESS: CompiledAddr = 0;
/// A sentinel value used to indicate an invalid state.
///
/// This is never the address of a node in a serialized transducer.
const NONE_ADDRESS: CompiledAddr = 1;
/// FstType is a convention used to indicate the type of the underlying
/// transducer.
///
/// This crate reserves the range 0-255 (inclusive) but currently leaves the
/// meaning of 0-255 unspecified.
pub type FstType = u64;
/// CompiledAddr is the type used to address nodes in a finite state
/// transducer.
///
/// It is most useful as a pointer to nodes. It can be used in the `Fst::node`
/// method to resolve the pointer.
pub type CompiledAddr = usize;
/// An acyclic deterministic finite state transducer.
///
/// # How does it work?
///
/// The short answer: it's just like a prefix trie, which compresses keys
/// based only on their prefixes, except that a automaton/transducer also
/// compresses suffixes.
///
/// The longer answer is that keys in an automaton are stored only in the
/// transitions from one state to another. A key can be acquired by tracing
/// a path from the root of the automaton to any match state. The inputs along
/// each transition are concatenated. Once a match state is reached, the
/// concatenation of inputs up until that point corresponds to a single key.
///
/// But why is it called a transducer instead of an automaton? A finite state
/// transducer is just like a finite state automaton, except that it has output
/// transitions in addition to input transitions. Namely, the value associated
/// with any particular key is determined by summing the outputs along every
/// input transition that leads to the key's corresponding match state.
///
/// This is best demonstrated with a couple images. First, let's ignore the
/// "transducer" aspect and focus on a plain automaton.
///
/// Consider that your keys are abbreviations of some of the months in the
/// Gregorian calendar:
///
/// ```plain
/// jan
/// feb
/// mar
/// apr
/// may
/// jun
/// jul
/// ```
///
/// The corresponding automaton that stores all of these as keys looks like
/// this:
///
/// 
///
/// Notice here how the prefix and suffix of `jan` and `jun` are shared.
/// Similarly, the prefixes of `jun` and `jul` are shared and the prefixes
/// of `mar` and `may` are shared.
///
/// All of the keys from this automaton can be enumerated in lexicographic
/// order by following every transition from each node in lexicographic
/// order. Since it is acyclic, the procedure will terminate.
///
/// A key can be found by tracing it through the transitions in the automaton.
/// For example, the key `aug` is known not to be in the automaton by only
/// visiting the root state (because there is no `a` transition). For another
/// example, the key `jax` is known not to be in the set only after moving
/// through the transitions for `j` and `a`. Namely, after those transitions
/// are followed, there are no transitions for `x`.
///
/// Notice here that looking up a key is proportional the length of the key
/// itself. Namely, lookup time is not affected by the number of keys in the
/// automaton!
///
/// Additionally, notice that the automaton exploits the fact that many keys
/// share common prefixes and suffixes. For example, `jun` and `jul` are
/// represented with no more states than would be required to represent either
/// one on its own. Instead, the only change is a single extra transition. This
/// is a form of compression and is key to how the automatons produced by this
/// crate are so small.
///
/// Let's move on to finite state transducers. Consider the same set of keys
/// as above, but let's assign their numeric month values:
///
/// ```plain
/// jan,1
/// feb,2
/// mar,3
/// apr,4
/// may,5
/// jun,6
/// jul,7
/// ```
///
/// The corresponding transducer looks very similar to the automaton above,
/// except outputs have been added to some of the transitions:
///
/// 
///
/// All of the operations with a transducer are the same as described above
/// for automatons. Additionally, the same compression techniques are used:
/// common prefixes and suffixes in keys are exploited.
///
/// The key difference is that some transitions have been given an output.
/// As one follows input transitions, one must sum the outputs as they
/// are seen. (A transition with no output represents the additive identity,
/// or `0` in this case.) For example, when looking up `feb`, the transition
/// `f` has output `2`, the transition `e` has output `0`, and the transition
/// `b` also has output `0`. The sum of these is `2`, which is exactly the
/// value we associated with `feb`.
///
/// For another more interesting example, consider `jul`. The `j` transition
/// has output `1`, the `u` transition has output `5` and the `l` transition
/// has output `1`. Summing these together gets us `7`, which is again the
/// correct value associated with `jul`. Notice that if we instead looked up
/// the `jun` key, then the `n` transition would be followed instead of the
/// `l` transition, which has no output. Therefore, the `jun` key equals
/// `1+5+0=6`.
///
/// The trick to transducers is that there exists a unique path through the
/// transducer for every key, and its outputs are stored appropriately along
/// this path such that the correct value is returned when they are all summed
/// together. This process also enables the data that makes up each value to be
/// shared across many values in the transducer in exactly the same way that
/// keys are shared. This is yet another form of compression!
///
/// # Bonus: a billion strings
///
/// The amount of compression one can get from automata can be absolutely
/// ridiuclous. Consider the particular case of storing all billion strings
/// in the range `0000000001-1000000000`, e.g.,
///
/// ```plain
/// 0000000001
/// 0000000002
/// ...
/// 0000000100
/// 0000000101
/// ...
/// 0999999999
/// 1000000000
/// ```
///
/// The corresponding automaton looks like this:
///
/// 
///
/// Indeed, the on disk size of this automaton is a mere **251 bytes**.
///
/// Of course, this is a bit of a pathological best case, but it does serve
/// to show how good compression can be in the optimal case.
///
/// Also, check out the
/// [corresponding transducer](http://burntsushi.net/stuff/one-billion-map.svg)
/// that maps each string to its integer value. It's a bit bigger, but still
/// only takes up **896 bytes** of space on disk. This demonstrates that
/// output values are also compressible.
///
/// # Does this crate produce minimal transducers?
///
/// For any non-trivial sized set of keys, it is unlikely that this crate will
/// produce a minimal transducer. As far as this author knows, guaranteeing a
/// minimal transducer requires working memory proportional to the number of
/// states. This can be quite costly and is anathema to the main design goal of
/// this crate: provide the ability to work with gigantic sets of strings with
/// constant memory overhead.
///
/// Instead, construction of a finite state transducer uses a cache of
/// states. More frequently used states are cached and reused, which provides
/// reasonably good compression ratios. (No comprehensive benchmarks exist to
/// back up this claim.)
///
/// It is possible that this crate may expose a way to guarantee minimal
/// construction of transducers at the expense of exorbitant memory
/// requirements.
///
/// # Bibliography
///
/// I initially got the idea to use finite state tranducers to represent
/// ordered sets/maps from
/// [Michael
/// McCandless'](http://blog.mikemccandless.com/2010/12/using-finite-state-transducers-in.html)
/// work on incorporating transducers in Lucene.
///
/// However, my work would also not have been possible without the hard work
/// of many academics, especially
/// [Jan Daciuk](http://galaxy.eti.pg.gda.pl/katedry/kiw/pracownicy/Jan.Daciuk/personal/).
///
/// * [Incremental construction of minimal acyclic finite-state automata](http://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601)
/// (Section 3 provides a decent overview of the algorithm used to construct
/// transducers in this crate, assuming all outputs are `0`.)
/// * [Direct Construction of Minimal Acyclic Subsequential Transducers](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.3698&rep=rep1&type=pdf)
/// (The whole thing. The proof is dense but illuminating. The algorithm at
/// the end is the money shot, namely, it incorporates output values.)
/// * [Experiments with Automata Compression](http://www.researchgate.net/profile/Jii_Dvorsky/publication/221568039_Word_Random_Access_Compression/links/0c96052c095630d5b3000000.pdf#page=116), [Smaller Representation of Finite State Automata](http://www.cs.put.poznan.pl/dweiss/site/publications/download/fsacomp.pdf)
/// (various compression techniques for representing states/transitions)
/// * [Jan Daciuk's dissertation](http://www.pg.gda.pl/~jandac/thesis.ps.gz)
/// (excellent for in depth overview)
/// * [Comparison of Construction Algorithms for Minimal, Acyclic, Deterministic, Finite-State Automata from Sets of Strings](http://www.cs.mun.ca/~harold/Courses/Old/CS4750/Diary/q3p2qx4lv71m5vew.pdf)
/// (excellent for surface level overview)
#[derive(Clone)]
pub struct Fst<D> {
meta: Meta,
data: D,
}
#[derive(Debug, Clone)]
struct Meta {
version: u64,
root_addr: CompiledAddr,
ty: FstType,
len: usize,
/// A checksum is missing when the FST version is <= 2. (Checksums were
/// added in version 3.)
checksum: Option<u32>,
}
impl Fst<Vec<u8>> {
/// Create a new FST from an iterator of lexicographically ordered byte
/// strings. Every key's value is set to `0`.
///
/// If the iterator does not yield values in lexicographic order, then an
/// error is returned.
///
/// Note that this is a convenience function to build an FST in memory.
/// To build an FST that streams to an arbitrary `io::Write`, use
/// `raw::Builder`.
pub fn from_iter_set<K, I>(iter: I) -> Result<Fst<Vec<u8>>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
let mut builder = Builder::memory();
for k in iter {
builder.add(k)?;
}
Ok(builder.into_fst())
}
/// Create a new FST from an iterator of lexicographically ordered byte
/// strings. The iterator should consist of tuples, where the first element
/// is the byte string and the second element is its corresponding value.
///
/// If the iterator does not yield unique keys in lexicographic order, then
/// an error is returned.
///
/// Note that this is a convenience function to build an FST in memory.
/// To build an FST that streams to an arbitrary `io::Write`, use
/// `raw::Builder`.
pub fn from_iter_map<K, I>(iter: I) -> Result<Fst<Vec<u8>>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (K, u64)>,
{
let mut builder = Builder::memory();
for (k, v) in iter {
builder.insert(k, v)?;
}
Ok(builder.into_fst())
}
}
impl<D: AsRef<[u8]>> Fst<D> {
/// Creates a transducer from its representation as a raw byte sequence.
///
/// This operation is intentionally very cheap (no allocations and no
/// copies). In particular, no verification on the integrity of the
/// FST is performed. Callers may opt into integrity checks via the
/// [`Fst::verify`](struct.Fst.html#method.verify) method.
///
/// The fst must have been written with a compatible finite state
/// transducer builder (`Builder` qualifies). If the format is invalid or
/// if there is a mismatch between the API version of this library and the
/// fst, then an error is returned.
#[inline]
pub fn new(data: D) -> Result<Fst<D>> {
let bytes = data.as_ref();
if bytes.len() < 36 {
return Err(Error::Format { size: bytes.len() }.into());
}
// The read_u64 unwraps below are OK because they can never fail.
// They can only fail when there is an IO error or if there is an
// unexpected EOF. However, we are reading from a byte slice (no
// IO errors possible) and we've confirmed the byte slice is at least
// N bytes (no unexpected EOF).
let version = bytes::read_u64_le(&bytes);
if version == 0 || version > VERSION {
return Err(
Error::Version { expected: VERSION, got: version }.into()
);
}
let ty = bytes::read_u64_le(&bytes[8..]);
let (end, checksum) = if version <= 2 {
(bytes.len(), None)
} else {
let checksum = bytes::read_u32_le(&bytes[bytes.len() - 4..]);
(bytes.len() - 4, Some(checksum))
};
let root_addr = {
let last = &bytes[end - 8..];
u64_to_usize(bytes::read_u64_le(last))
};
let len = {
let last2 = &bytes[end - 16..];
u64_to_usize(bytes::read_u64_le(last2))
};
// The root node is always the last node written, so its address should
// be near the end. After the root node is written, we still have to
// write the root *address* and the number of keys in the FST, along
// with the checksum. That's 20 bytes. The extra byte used below (21
// and not 20) comes from the fact that the root address points to
// the last byte in the root node, rather than the byte immediately
// following the root node.
//
// If this check passes, it is still possible that the FST is invalid
// but probably unlikely. If this check reports a false positive, then
// the program will probably panic. In the worst case, the FST will
// operate but be subtly wrong. (This would require the bytes to be in
// a format expected by an FST, which is incredibly unlikely.)
//
// The special check for EMPTY_ADDRESS is needed since an empty FST
// has a root node that is empty and final, which means it has the
// special address `0`. In that case, the FST is the smallest it can
// be: the version, type, root address and number of nodes. That's
// 36 bytes (8 byte u64 each).
//
// And finally, our calculation changes somewhat based on version.
// If the FST version is less than 3, then it does not have a checksum.
let (empty_total, addr_offset) =
if version <= 2 { (32, 17) } else { (36, 21) };
if (root_addr == EMPTY_ADDRESS && bytes.len() != empty_total)
&& root_addr + addr_offset != bytes.len()
{
return Err(Error::Format { size: bytes.len() }.into());
}
let meta = Meta { version, root_addr, ty, len, checksum };
Ok(Fst { meta, data })
}
/// Retrieves the value associated with a key.
///
/// If the key does not exist, then `None` is returned.
#[inline]
pub fn get<B: AsRef<[u8]>>(&self, key: B) -> Option<Output> {
self.as_ref().get(key.as_ref())
}
/// Returns true if and only if the given key is in this FST.
#[inline]
pub fn contains_key<B: AsRef<[u8]>>(&self, key: B) -> bool {
self.as_ref().contains_key(key.as_ref())
}
/// Retrieves the key associated with the given value.
///
/// This is like `get_key_into`, but will return the key itself without
/// allowing the caller to reuse an allocation.
///
/// If the given value does not exist, then `None` is returned.
///
/// The values in this FST are not monotonically increasing when sorted
/// lexicographically by key, then this routine has unspecified behavior.
#[inline]
pub fn get_key(&self, value: u64) -> Option<Vec<u8>> {
let mut key = vec![];
if self.get_key_into(value, &mut key) {
Some(key)
} else {
None
}
}
/// Retrieves the key associated with the given value.
///
/// If the given value does not exist, then `false` is returned. In this
/// case, the contents of `key` are unspecified.
///
/// The given buffer is not clearer before the key is written to it.
///
/// The values in this FST are not monotonically increasing when sorted
/// lexicographically by key, then this routine has unspecified behavior.
#[inline]
pub fn get_key_into(&self, value: u64, key: &mut Vec<u8>) -> bool {
self.as_ref().get_key_into(value, key)
}
/// Return a lexicographically ordered stream of all key-value pairs in
/// this fst.
#[inline]
pub fn stream(&self) -> Stream<'_> {
StreamBuilder::new(self.as_ref(), AlwaysMatch).into_stream()
}
/// Return a builder for range queries.
///
/// A range query returns a subset of key-value pairs in this fst in a
/// range given in lexicographic order.
#[inline]
pub fn range(&self) -> StreamBuilder<'_> {
StreamBuilder::new(self.as_ref(), AlwaysMatch)
}
/// Executes an automaton on the keys of this FST.
#[inline]
pub fn search<A: Automaton>(&self, aut: A) -> StreamBuilder<'_, A> {
StreamBuilder::new(self.as_ref(), aut)
}
/// Executes an automaton on the keys of this FST and yields matching
/// keys along with the corresponding matching states in the given
/// automaton.
#[inline]
pub fn search_with_state<A: Automaton>(
&self,
aut: A,
) -> StreamWithStateBuilder<'_, A> {
StreamWithStateBuilder::new(self.as_ref(), aut)
}
/// Returns the number of keys in this fst.
#[inline]
pub fn len(&self) -> usize {
self.as_ref().len()
}
/// Returns true if and only if this fst has no keys.
#[inline]
pub fn is_empty(&self) -> bool {
self.as_ref().is_empty()
}
/// Returns the number of bytes used by this fst.
#[inline]
pub fn size(&self) -> usize {
self.as_ref().size()
}
/// Attempts to verify this FST by computing its checksum.
///
/// This will scan over all of the bytes in the underlying FST, so this
/// may be an expensive operation depending on the size of the FST.
///
/// This returns an error in two cases:
///
/// 1. When a checksum does not exist, which is the case for FSTs that were
/// produced by the `fst` crate before version `0.4`.
/// 2. When the checksum in the FST does not match the computed checksum
/// performed by this procedure.
#[inline]
pub fn verify(&self) -> Result<()> {
use crate::raw::crc32::CheckSummer;
let expected = match self.as_ref().meta.checksum {
None => return Err(Error::ChecksumMissing.into()),
Some(expected) => expected,
};
let mut summer = CheckSummer::new();
summer.update(&self.as_bytes()[..self.as_bytes().len() - 4]);
let got = summer.masked();
if expected == got {
return Ok(());
}
Err(Error::ChecksumMismatch { expected, got }.into())
}
/// Creates a new fst operation with this fst added to it.
///
/// The `OpBuilder` type can be used to add additional fst streams
/// and perform set operations like union, intersection, difference and
/// symmetric difference on the keys of the fst. These set operations also
/// allow one to specify how conflicting values are merged in the stream.
#[inline]
pub fn op(&self) -> OpBuilder<'_> {
OpBuilder::new().add(self)
}
/// Returns true if and only if the `self` fst is disjoint with the fst
/// `stream`.
///
/// `stream` must be a lexicographically ordered sequence of byte strings
/// with associated values.
#[inline]
pub fn is_disjoint<'f, I, S>(&self, stream: I) -> bool
where
I: for<'a> IntoStreamer<'a, Into = S, Item = (&'a [u8], Output)>,
S: 'f + for<'a> Streamer<'a, Item = (&'a [u8], Output)>,
{
self.op().add(stream).intersection().next().is_none()
}
/// Returns true if and only if the `self` fst is a subset of the fst
/// `stream`.
///
/// `stream` must be a lexicographically ordered sequence of byte strings
/// with associated values.
#[inline]
pub fn is_subset<'f, I, S>(&self, stream: I) -> bool
where
I: for<'a> IntoStreamer<'a, Into = S, Item = (&'a [u8], Output)>,
S: 'f + for<'a> Streamer<'a, Item = (&'a [u8], Output)>,
{
let mut op = self.op().add(stream).intersection();
let mut count = 0;
while let Some(_) = op.next() {
count += 1;
}
count == self.len()
}
/// Returns true if and only if the `self` fst is a superset of the fst
/// `stream`.
///
/// `stream` must be a lexicographically ordered sequence of byte strings
/// with associated values.
#[inline]
pub fn is_superset<'f, I, S>(&self, stream: I) -> bool
where
I: for<'a> IntoStreamer<'a, Into = S, Item = (&'a [u8], Output)>,
S: 'f + for<'a> Streamer<'a, Item = (&'a [u8], Output)>,
{
let mut op = self.op().add(stream).union();
let mut count = 0;
while let Some(_) = op.next() {
count += 1;
}
count == self.len()
}
/// Returns the underlying type of this fst.
///
/// FstType is a convention used to indicate the type of the underlying
/// transducer.
///
/// This crate reserves the range 0-255 (inclusive) but currently leaves
/// the meaning of 0-255 unspecified.
#[inline]
pub fn fst_type(&self) -> FstType {
self.as_ref().fst_type()
}
/// Returns the root node of this fst.
#[inline]
pub fn root(&self) -> Node<'_> {
self.as_ref().root()
}
/// Returns the node at the given address.
///
/// Node addresses can be obtained by reading transitions on `Node` values.
#[inline]
pub fn node(&self, addr: CompiledAddr) -> Node<'_> {
self.as_ref().node(addr)
}
/// Returns a copy of the binary contents of this FST.
#[inline]
pub fn to_vec(&self) -> Vec<u8> {
self.as_ref().to_vec()
}
/// Returns the binary contents of this FST.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.as_ref().as_bytes()
}
#[inline]
fn as_ref(&self) -> FstRef {
FstRef { meta: &self.meta, data: self.data.as_ref() }
}
}
impl<D> Fst<D> {
/// Returns the underlying data which constitutes the FST itself.
#[inline]
pub fn into_inner(self) -> D {
self.data
}
/// Maps the underlying data of the fst to another data type.
#[inline]
pub fn map_data<F, T>(self, mut f: F) -> Result<Fst<T>>
where
F: FnMut(D) -> T,
T: AsRef<[u8]>,
{
Fst::new(f(self.into_inner()))
}
}
impl<'a, 'f, D: AsRef<[u8]>> IntoStreamer<'a> for &'f Fst<D> {
type Item = (&'a [u8], Output);
type Into = Stream<'f>;
#[inline]
fn into_stream(self) -> Stream<'f> {
StreamBuilder::new(self.as_ref(), AlwaysMatch).into_stream()
}
}
struct FstRef<'f> {
meta: &'f Meta,
data: &'f [u8],
}
impl<'f> FstRef<'f> {
#[inline]
fn get(&self, key: &[u8]) -> Option<Output> {
let mut node = self.root();
let mut out = Output::zero();
for &b in key {
node = match node.find_input(b) {
None => return None,
Some(i) => {
let t = node.transition(i);
out = out.cat(t.out);
self.node(t.addr)
}
}
}
if !node.is_final() {
None
} else {
Some(out.cat(node.final_output()))
}
}
#[inline]
fn contains_key(&self, key: &[u8]) -> bool {
let mut node = self.root();
for &b in key {
node = match node.find_input(b) {
None => return false,
Some(i) => self.node(node.transition_addr(i)),
}
}
node.is_final()
}
#[inline]
fn get_key_into(&self, mut value: u64, key: &mut Vec<u8>) -> bool {
let mut node = self.root();
while value != 0 || !node.is_final() {
let trans = node
.transitions()
.take_while(|t| t.out.value() <= value)
.last();
node = match trans {
None => return false,
Some(t) => {
value -= t.out.value();
key.push(t.inp);
self.node(t.addr)
}
};
}
true
}
#[inline]
fn len(&self) -> usize {
self.meta.len
}
#[inline]
fn is_empty(&self) -> bool {
self.meta.len == 0
}
#[inline]
fn size(&self) -> usize {
self.as_bytes().len()
}
#[inline]
fn fst_type(&self) -> FstType {
self.meta.ty
}
#[inline]
fn root_addr(&self) -> CompiledAddr {
self.meta.root_addr
}
#[inline]
fn root(&self) -> Node<'f> {
self.node(self.root_addr())
}
#[inline]
fn node(&self, addr: CompiledAddr) -> Node<'f> {
Node::new(self.meta.version, addr, self.as_bytes())
}
#[inline]
fn to_vec(&self) -> Vec<u8> {
self.as_bytes().to_vec()
}
#[inline]
fn as_bytes(&self) -> &'f [u8] {
self.data
}
#[inline]
fn empty_final_output(&self) -> Option<Output> {
let root = self.root();
if root.is_final() {
Some(root.final_output())
} else {
None
}
}
}
/// A builder for constructing range queries on streams.
///
/// Once all bounds are set, one should call `into_stream` to get a
/// `Stream`.
///
/// Bounds are not additive. That is, if `ge` is called twice on the same
/// builder, then the second setting wins.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'f` lifetime parameter refers to the lifetime of the underlying fst.
pub struct StreamBuilder<'f, A = AlwaysMatch> {
fst: FstRef<'f>,
aut: A,
min: Bound,
max: Bound,
}
impl<'f, A: Automaton> StreamBuilder<'f, A> {
fn new(fst: FstRef<'f>, aut: A) -> StreamBuilder<'f, A> {
StreamBuilder {
fst,
aut,
min: Bound::Unbounded,
max: Bound::Unbounded,
}
}
/// Specify a greater-than-or-equal-to bound.
pub fn ge<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.min = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a greater-than bound.
pub fn gt<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.min = Bound::Excluded(bound.as_ref().to_owned());
self
}
/// Specify a less-than-or-equal-to bound.
pub fn le<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.max = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a less-than bound.
pub fn lt<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.max = Bound::Excluded(bound.as_ref().to_owned());
self
}
}
impl<'a, 'f, A: Automaton> IntoStreamer<'a> for StreamBuilder<'f, A> {
type Item = (&'a [u8], Output);
type Into = Stream<'f, A>;
fn into_stream(self) -> Stream<'f, A> {
Stream::new(self.fst, self.aut, self.min, self.max)
}
}
/// A builder for constructing range queries on streams that include automaton
/// states.
///
/// In general, one should use `StreamBuilder` unless you have a specific need
/// for accessing the states of the underlying automaton that is being used to
/// filter this stream.
///
/// Once all bounds are set, one should call `into_stream` to get a
/// `Stream`.
///
/// Bounds are not additive. That is, if `ge` is called twice on the same
/// builder, then the second setting wins.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'f` lifetime parameter refers to the lifetime of the underlying fst.
pub struct StreamWithStateBuilder<'f, A = AlwaysMatch> {
fst: FstRef<'f>,
aut: A,
min: Bound,
max: Bound,
}
impl<'f, A: Automaton> StreamWithStateBuilder<'f, A> {
fn new(fst: FstRef<'f>, aut: A) -> StreamWithStateBuilder<'f, A> {
StreamWithStateBuilder {
fst,
aut,
min: Bound::Unbounded,
max: Bound::Unbounded,
}
}
/// Specify a greater-than-or-equal-to bound.
pub fn ge<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.min = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a greater-than bound.
pub fn gt<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.min = Bound::Excluded(bound.as_ref().to_owned());
self
}
/// Specify a less-than-or-equal-to bound.
pub fn le<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.max = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a less-than bound.
pub fn lt<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.max = Bound::Excluded(bound.as_ref().to_owned());
self
}
}
impl<'a, 'f, A: 'a + Automaton> IntoStreamer<'a>
for StreamWithStateBuilder<'f, A>
where
A::State: Clone,
{
type Item = (&'a [u8], Output, A::State);
type Into = StreamWithState<'f, A>;
fn into_stream(self) -> StreamWithState<'f, A> {
StreamWithState::new(self.fst, self.aut, self.min, self.max)
}
}
#[derive(Debug)]
enum Bound {
Included(Vec<u8>),
Excluded(Vec<u8>),
Unbounded,
}
impl Bound {
#[inline]
fn exceeded_by(&self, inp: &[u8]) -> bool {
match *self {
Bound::Included(ref v) => inp > v,
Bound::Excluded(ref v) => inp >= v,
Bound::Unbounded => false,
}
}
#[inline]
fn is_empty(&self) -> bool {
match *self {
Bound::Included(ref v) => v.is_empty(),
Bound::Excluded(ref v) => v.is_empty(),
Bound::Unbounded => true,
}
}
#[inline]
fn is_inclusive(&self) -> bool {
match *self {
Bound::Excluded(_) => false,
_ => true,
}
}
}
/// A lexicographically ordered stream of key-value pairs from an fst.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'f` lifetime parameter refers to the lifetime of the underlying fst.
pub struct Stream<'f, A: Automaton = AlwaysMatch>(StreamWithState<'f, A>);
impl<'f, A: Automaton> Stream<'f, A> {
fn new(fst: FstRef<'f>, aut: A, min: Bound, max: Bound) -> Stream<'f, A> {
Stream(StreamWithState::new(fst, aut, min, max))
}
/// Convert this stream into a vector of byte strings and outputs.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_byte_vec(mut self) -> Vec<(Vec<u8>, u64)> {
let mut vs = vec![];
while let Some((k, v)) = self.next() {
vs.push((k.to_vec(), v.value()));
}
vs
}
/// Convert this stream into a vector of Unicode strings and outputs.
///
/// If any key is not valid UTF-8, then iteration on the stream is stopped
/// and a UTF-8 decoding error is returned.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_str_vec(mut self) -> Result<Vec<(String, u64)>> {
let mut vs = vec![];
while let Some((k, v)) = self.next() {
let k = String::from_utf8(k.to_vec()).map_err(Error::from)?;
vs.push((k, v.value()));
}
Ok(vs)
}
/// Convert this stream into a vector of byte strings.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_byte_keys(mut self) -> Vec<Vec<u8>> {
let mut vs = vec![];
while let Some((k, _)) = self.next() {
vs.push(k.to_vec());
}
vs
}
/// Convert this stream into a vector of Unicode strings.
///
/// If any key is not valid UTF-8, then iteration on the stream is stopped
/// and a UTF-8 decoding error is returned.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_str_keys(mut self) -> Result<Vec<String>> {
let mut vs = vec![];
while let Some((k, _)) = self.next() {
let k = String::from_utf8(k.to_vec()).map_err(Error::from)?;
vs.push(k);
}
Ok(vs)
}
/// Convert this stream into a vector of outputs.
pub fn into_values(mut self) -> Vec<u64> {
let mut vs = vec![];
while let Some((_, v)) = self.next() {
vs.push(v.value());
}
vs
}
}
impl<'f, 'a, A: Automaton> Streamer<'a> for Stream<'f, A> {
type Item = (&'a [u8], Output);
fn next(&'a mut self) -> Option<(&'a [u8], Output)> {
self.0.next_with(|_| ()).map(|(key, out, _)| (key, out))
}
}
/// A lexicographically ordered stream of key-value-state triples from an fst
/// and an automaton.
///
/// The key-values are from the underyling FSTP while the states are from the
/// automaton.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'m` lifetime parameter refers to the lifetime of the underlying map.
pub struct StreamWithState<'f, A = AlwaysMatch>
where
A: Automaton,
{
fst: FstRef<'f>,
aut: A,
inp: Vec<u8>,
empty_output: Option<Output>,
stack: Vec<StreamState<'f, A::State>>,
end_at: Bound,
}
#[derive(Clone, Debug)]
struct StreamState<'f, S> {
node: Node<'f>,
trans: usize,
out: Output,
aut_state: S,
}
impl<'f, A: Automaton> StreamWithState<'f, A> {
fn new(
fst: FstRef<'f>,
aut: A,
min: Bound,
max: Bound,
) -> StreamWithState<'f, A> {
let mut rdr = StreamWithState {
fst,
aut,
inp: Vec::with_capacity(16),
empty_output: None,
stack: vec![],
end_at: max,
};
rdr.seek_min(min);
rdr
}
/// Seeks the underlying stream such that the next key to be read is the
/// smallest key in the underlying fst that satisfies the given minimum
/// bound.
///
/// This theoretically should be straight-forward, but we need to make
/// sure our stack is correct, which includes accounting for automaton
/// states.
fn seek_min(&mut self, min: Bound) {
if min.is_empty() {
if min.is_inclusive() {
self.empty_output = self.fst.empty_final_output();
}
self.stack = vec![StreamState {
node: self.fst.root(),
trans: 0,
out: Output::zero(),
aut_state: self.aut.start(),
}];
return;
}
let (key, inclusive) = match min {
Bound::Excluded(ref min) => (min, false),
Bound::Included(ref min) => (min, true),
Bound::Unbounded => unreachable!(),
};
// At this point, we need to find the starting location of `min` in
// the FST. However, as we search, we need to maintain a stack of
// reader states so that the reader can pick up where we left off.
// N.B. We do not necessarily need to stop in a final state, unlike
// the one-off `find` method. For the example, the given bound might
// not actually exist in the FST.
let mut node = self.fst.root();
let mut out = Output::zero();
let mut aut_state = self.aut.start();
for &b in key {
match node.find_input(b) {
Some(i) => {
let t = node.transition(i);
let prev_state = aut_state;
aut_state = self.aut.accept(&prev_state, b);
self.inp.push(b);
self.stack.push(StreamState {
node,
trans: i + 1,
out,
aut_state: prev_state,
});
out = out.cat(t.out);
node = self.fst.node(t.addr);
}
None => {
// This is a little tricky. We're in this case if the
// given bound is not a prefix of any key in the FST.
// Since this is a minimum bound, we need to find the
// first transition in this node that proceeds the current
// input byte.
self.stack.push(StreamState {
node,
trans: node
.transitions()
.position(|t| t.inp > b)
.unwrap_or(node.len()),
out,
aut_state,
});
return;
}
}
}
if !self.stack.is_empty() {
let last = self.stack.len() - 1;
if inclusive {
self.stack[last].trans -= 1;
self.inp.pop();
} else {
let node = self.stack[last].node;
let trans = self.stack[last].trans;
self.stack.push(StreamState {
node: self.fst.node(node.transition(trans - 1).addr),
trans: 0,
out,
aut_state,
});
}
}
}
fn next_with<T>(
&mut self,
mut map: impl FnMut(&A::State) -> T,
) -> Option<(&[u8], Output, T)> {
if let Some(out) = self.empty_output.take() {
if self.end_at.exceeded_by(&[]) {
self.stack.clear();
return None;
}
let start = self.aut.start();
if self.aut.is_match(&start) {
return Some((&[], out, map(&start)));
}
}
while let Some(state) = self.stack.pop() {
if state.trans >= state.node.len()
|| !self.aut.can_match(&state.aut_state)
{
if state.node.addr() != self.fst.root_addr() {
self.inp.pop().unwrap();
}
continue;
}
let trans = state.node.transition(state.trans);
let out = state.out.cat(trans.out);
let next_state = self.aut.accept(&state.aut_state, trans.inp);
let t = map(&next_state);
let mut is_match = self.aut.is_match(&next_state);
let next_node = self.fst.node(trans.addr);
self.inp.push(trans.inp);
if next_node.is_final() {
if let Some(eof_state) = self.aut.accept_eof(&next_state) {
is_match = self.aut.is_match(&eof_state);
}
}
self.stack.push(StreamState { trans: state.trans + 1, ..state });
self.stack.push(StreamState {
node: next_node,
trans: 0,
out,
aut_state: next_state,
});
if self.end_at.exceeded_by(&self.inp) {
// We are done, forever.
self.stack.clear();
return None;
}
if next_node.is_final() && is_match {
return Some((
&self.inp,
out.cat(next_node.final_output()),
t,
));
}
}
None
}
}
impl<'a, 'f, A: 'a + Automaton> Streamer<'a> for StreamWithState<'f, A>
where
A::State: Clone,
{
type Item = (&'a [u8], Output, A::State);
fn next(&'a mut self) -> Option<(&'a [u8], Output, A::State)> {
self.next_with(|state| state.clone())
}
}
/// An output is a value that is associated with a key in a finite state
/// transducer.
///
/// Note that outputs must satisfy an algebra. Namely, it must have an additive
/// identity and the following binary operations defined: `prefix`,
/// `concatenation` and `subtraction`. `prefix` and `concatenation` are
/// commutative while `subtraction` is not. `subtraction` is only defined on
/// pairs of operands where the first operand is greater than or equal to the
/// second operand.
///
/// Currently, output values must be `u64`. However, in theory, an output value
/// can be anything that satisfies the above algebra. Future versions of this
/// crate may make outputs generic on this algebra.
#[derive(Copy, Clone, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct Output(u64);
impl Output {
/// Create a new output from a `u64`.
#[inline]
pub fn new(v: u64) -> Output {
Output(v)
}
/// Create a zero output.
#[inline]
pub fn zero() -> Output {
Output(0)
}
/// Retrieve the value inside this output.
#[inline]
pub fn value(self) -> u64 {
self.0
}
/// Returns true if this is a zero output.
#[inline]
pub fn is_zero(self) -> bool {
self.0 == 0
}
/// Returns the prefix of this output and `o`.
#[inline]
pub fn prefix(self, o: Output) -> Output {
Output(cmp::min(self.0, o.0))
}
/// Returns the concatenation of this output and `o`.
#[inline]
pub fn cat(self, o: Output) -> Output {
Output(self.0 + o.0)
}
/// Returns the subtraction of `o` from this output.
///
/// This function panics if `self > o`.
#[inline]
pub fn sub(self, o: Output) -> Output {
Output(
self.0
.checked_sub(o.0)
.expect("BUG: underflow subtraction not allowed"),
)
}
}
/// A transition from one note to another.
#[derive(Copy, Clone, Hash, Eq, PartialEq)]
pub struct Transition {
/// The byte input associated with this transition.
pub inp: u8,
/// The output associated with this transition.
pub out: Output,
/// The address of the node that this transition points to.
pub addr: CompiledAddr,
}
impl Default for Transition {
#[inline]
fn default() -> Transition {
Transition { inp: 0, out: Output::zero(), addr: NONE_ADDRESS }
}
}
impl fmt::Debug for Transition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.out.is_zero() {
write!(f, "{} -> {}", self.inp as char, self.addr)
} else {
write!(
f,
"({}, {}) -> {}",
self.inp as char,
self.out.value(),
self.addr
)
}
}
}
#[inline]
#[cfg(target_pointer_width = "64")]
fn u64_to_usize(n: u64) -> usize {
n as usize
}
#[inline]
#[cfg(not(target_pointer_width = "64"))]
fn u64_to_usize(n: u64) -> usize {
if n > std::usize::MAX as u64 {
panic!(
"\
Cannot convert node address {} to a pointer sized variable. If this FST
is very large and was generated on a system with a larger pointer size
than this system, then it is not possible to read this FST on this
system.",
n
);
}
n as usize
}
api: Fst::as_inner to give access to underlying data
Closes #116
/*!
Operations on raw finite state transducers.
This sub-module exposes the guts of a finite state transducer. Many parts of
it, such as construction and traversal, are mirrored in the `set` and `map`
sub-modules. Other parts of it, such as direct access to nodes and transitions
in the transducer, do not have any analog.
# Overview of types
`Fst` is a read only interface to pre-constructed finite state transducers.
`Node` is a read only interface to a single node in a transducer. `Builder` is
used to create new finite state transducers. (Once a transducer is created, it
can never be modified.) `Stream` is a stream of all inputs and outputs in a
transducer. `StreamBuilder` builds range queries. `OpBuilder` collects streams
and executes set operations like `union` or `intersection` on them with the
option of specifying a merge strategy for output values.
Most of the rest of the types are streams from set operations.
*/
use std::cmp;
use std::fmt;
use crate::automaton::{AlwaysMatch, Automaton};
use crate::bytes;
use crate::error::Result;
use crate::stream::{IntoStreamer, Streamer};
pub use crate::raw::build::Builder;
pub use crate::raw::error::Error;
pub use crate::raw::node::{Node, Transitions};
pub use crate::raw::ops::{
Difference, IndexedValue, Intersection, OpBuilder, SymmetricDifference,
Union,
};
mod build;
mod common_inputs;
mod counting_writer;
mod crc32;
mod crc32_table;
mod error;
mod node;
mod ops;
mod registry;
mod registry_minimal;
#[cfg(test)]
mod tests;
/// The API version of this crate.
///
/// This version number is written to every finite state transducer created by
/// this crate. When a finite state transducer is read, its version number is
/// checked against this value.
///
/// Currently, any version mismatch results in an error. Fixing this requires
/// regenerating the finite state transducer or switching to a version of this
/// crate that is compatible with the serialized transducer. This particular
/// behavior may be relaxed in future versions.
pub const VERSION: u64 = 3;
/// A sentinel value used to indicate an empty final state.
const EMPTY_ADDRESS: CompiledAddr = 0;
/// A sentinel value used to indicate an invalid state.
///
/// This is never the address of a node in a serialized transducer.
const NONE_ADDRESS: CompiledAddr = 1;
/// FstType is a convention used to indicate the type of the underlying
/// transducer.
///
/// This crate reserves the range 0-255 (inclusive) but currently leaves the
/// meaning of 0-255 unspecified.
pub type FstType = u64;
/// CompiledAddr is the type used to address nodes in a finite state
/// transducer.
///
/// It is most useful as a pointer to nodes. It can be used in the `Fst::node`
/// method to resolve the pointer.
pub type CompiledAddr = usize;
/// An acyclic deterministic finite state transducer.
///
/// # How does it work?
///
/// The short answer: it's just like a prefix trie, which compresses keys
/// based only on their prefixes, except that a automaton/transducer also
/// compresses suffixes.
///
/// The longer answer is that keys in an automaton are stored only in the
/// transitions from one state to another. A key can be acquired by tracing
/// a path from the root of the automaton to any match state. The inputs along
/// each transition are concatenated. Once a match state is reached, the
/// concatenation of inputs up until that point corresponds to a single key.
///
/// But why is it called a transducer instead of an automaton? A finite state
/// transducer is just like a finite state automaton, except that it has output
/// transitions in addition to input transitions. Namely, the value associated
/// with any particular key is determined by summing the outputs along every
/// input transition that leads to the key's corresponding match state.
///
/// This is best demonstrated with a couple images. First, let's ignore the
/// "transducer" aspect and focus on a plain automaton.
///
/// Consider that your keys are abbreviations of some of the months in the
/// Gregorian calendar:
///
/// ```plain
/// jan
/// feb
/// mar
/// apr
/// may
/// jun
/// jul
/// ```
///
/// The corresponding automaton that stores all of these as keys looks like
/// this:
///
/// 
///
/// Notice here how the prefix and suffix of `jan` and `jun` are shared.
/// Similarly, the prefixes of `jun` and `jul` are shared and the prefixes
/// of `mar` and `may` are shared.
///
/// All of the keys from this automaton can be enumerated in lexicographic
/// order by following every transition from each node in lexicographic
/// order. Since it is acyclic, the procedure will terminate.
///
/// A key can be found by tracing it through the transitions in the automaton.
/// For example, the key `aug` is known not to be in the automaton by only
/// visiting the root state (because there is no `a` transition). For another
/// example, the key `jax` is known not to be in the set only after moving
/// through the transitions for `j` and `a`. Namely, after those transitions
/// are followed, there are no transitions for `x`.
///
/// Notice here that looking up a key is proportional the length of the key
/// itself. Namely, lookup time is not affected by the number of keys in the
/// automaton!
///
/// Additionally, notice that the automaton exploits the fact that many keys
/// share common prefixes and suffixes. For example, `jun` and `jul` are
/// represented with no more states than would be required to represent either
/// one on its own. Instead, the only change is a single extra transition. This
/// is a form of compression and is key to how the automatons produced by this
/// crate are so small.
///
/// Let's move on to finite state transducers. Consider the same set of keys
/// as above, but let's assign their numeric month values:
///
/// ```plain
/// jan,1
/// feb,2
/// mar,3
/// apr,4
/// may,5
/// jun,6
/// jul,7
/// ```
///
/// The corresponding transducer looks very similar to the automaton above,
/// except outputs have been added to some of the transitions:
///
/// 
///
/// All of the operations with a transducer are the same as described above
/// for automatons. Additionally, the same compression techniques are used:
/// common prefixes and suffixes in keys are exploited.
///
/// The key difference is that some transitions have been given an output.
/// As one follows input transitions, one must sum the outputs as they
/// are seen. (A transition with no output represents the additive identity,
/// or `0` in this case.) For example, when looking up `feb`, the transition
/// `f` has output `2`, the transition `e` has output `0`, and the transition
/// `b` also has output `0`. The sum of these is `2`, which is exactly the
/// value we associated with `feb`.
///
/// For another more interesting example, consider `jul`. The `j` transition
/// has output `1`, the `u` transition has output `5` and the `l` transition
/// has output `1`. Summing these together gets us `7`, which is again the
/// correct value associated with `jul`. Notice that if we instead looked up
/// the `jun` key, then the `n` transition would be followed instead of the
/// `l` transition, which has no output. Therefore, the `jun` key equals
/// `1+5+0=6`.
///
/// The trick to transducers is that there exists a unique path through the
/// transducer for every key, and its outputs are stored appropriately along
/// this path such that the correct value is returned when they are all summed
/// together. This process also enables the data that makes up each value to be
/// shared across many values in the transducer in exactly the same way that
/// keys are shared. This is yet another form of compression!
///
/// # Bonus: a billion strings
///
/// The amount of compression one can get from automata can be absolutely
/// ridiuclous. Consider the particular case of storing all billion strings
/// in the range `0000000001-1000000000`, e.g.,
///
/// ```plain
/// 0000000001
/// 0000000002
/// ...
/// 0000000100
/// 0000000101
/// ...
/// 0999999999
/// 1000000000
/// ```
///
/// The corresponding automaton looks like this:
///
/// 
///
/// Indeed, the on disk size of this automaton is a mere **251 bytes**.
///
/// Of course, this is a bit of a pathological best case, but it does serve
/// to show how good compression can be in the optimal case.
///
/// Also, check out the
/// [corresponding transducer](http://burntsushi.net/stuff/one-billion-map.svg)
/// that maps each string to its integer value. It's a bit bigger, but still
/// only takes up **896 bytes** of space on disk. This demonstrates that
/// output values are also compressible.
///
/// # Does this crate produce minimal transducers?
///
/// For any non-trivial sized set of keys, it is unlikely that this crate will
/// produce a minimal transducer. As far as this author knows, guaranteeing a
/// minimal transducer requires working memory proportional to the number of
/// states. This can be quite costly and is anathema to the main design goal of
/// this crate: provide the ability to work with gigantic sets of strings with
/// constant memory overhead.
///
/// Instead, construction of a finite state transducer uses a cache of
/// states. More frequently used states are cached and reused, which provides
/// reasonably good compression ratios. (No comprehensive benchmarks exist to
/// back up this claim.)
///
/// It is possible that this crate may expose a way to guarantee minimal
/// construction of transducers at the expense of exorbitant memory
/// requirements.
///
/// # Bibliography
///
/// I initially got the idea to use finite state tranducers to represent
/// ordered sets/maps from
/// [Michael
/// McCandless'](http://blog.mikemccandless.com/2010/12/using-finite-state-transducers-in.html)
/// work on incorporating transducers in Lucene.
///
/// However, my work would also not have been possible without the hard work
/// of many academics, especially
/// [Jan Daciuk](http://galaxy.eti.pg.gda.pl/katedry/kiw/pracownicy/Jan.Daciuk/personal/).
///
/// * [Incremental construction of minimal acyclic finite-state automata](http://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601)
/// (Section 3 provides a decent overview of the algorithm used to construct
/// transducers in this crate, assuming all outputs are `0`.)
/// * [Direct Construction of Minimal Acyclic Subsequential Transducers](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.3698&rep=rep1&type=pdf)
/// (The whole thing. The proof is dense but illuminating. The algorithm at
/// the end is the money shot, namely, it incorporates output values.)
/// * [Experiments with Automata Compression](http://www.researchgate.net/profile/Jii_Dvorsky/publication/221568039_Word_Random_Access_Compression/links/0c96052c095630d5b3000000.pdf#page=116), [Smaller Representation of Finite State Automata](http://www.cs.put.poznan.pl/dweiss/site/publications/download/fsacomp.pdf)
/// (various compression techniques for representing states/transitions)
/// * [Jan Daciuk's dissertation](http://www.pg.gda.pl/~jandac/thesis.ps.gz)
/// (excellent for in depth overview)
/// * [Comparison of Construction Algorithms for Minimal, Acyclic, Deterministic, Finite-State Automata from Sets of Strings](http://www.cs.mun.ca/~harold/Courses/Old/CS4750/Diary/q3p2qx4lv71m5vew.pdf)
/// (excellent for surface level overview)
#[derive(Clone)]
pub struct Fst<D> {
meta: Meta,
data: D,
}
#[derive(Debug, Clone)]
struct Meta {
version: u64,
root_addr: CompiledAddr,
ty: FstType,
len: usize,
/// A checksum is missing when the FST version is <= 2. (Checksums were
/// added in version 3.)
checksum: Option<u32>,
}
impl Fst<Vec<u8>> {
/// Create a new FST from an iterator of lexicographically ordered byte
/// strings. Every key's value is set to `0`.
///
/// If the iterator does not yield values in lexicographic order, then an
/// error is returned.
///
/// Note that this is a convenience function to build an FST in memory.
/// To build an FST that streams to an arbitrary `io::Write`, use
/// `raw::Builder`.
pub fn from_iter_set<K, I>(iter: I) -> Result<Fst<Vec<u8>>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
let mut builder = Builder::memory();
for k in iter {
builder.add(k)?;
}
Ok(builder.into_fst())
}
/// Create a new FST from an iterator of lexicographically ordered byte
/// strings. The iterator should consist of tuples, where the first element
/// is the byte string and the second element is its corresponding value.
///
/// If the iterator does not yield unique keys in lexicographic order, then
/// an error is returned.
///
/// Note that this is a convenience function to build an FST in memory.
/// To build an FST that streams to an arbitrary `io::Write`, use
/// `raw::Builder`.
pub fn from_iter_map<K, I>(iter: I) -> Result<Fst<Vec<u8>>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (K, u64)>,
{
let mut builder = Builder::memory();
for (k, v) in iter {
builder.insert(k, v)?;
}
Ok(builder.into_fst())
}
}
impl<D: AsRef<[u8]>> Fst<D> {
/// Creates a transducer from its representation as a raw byte sequence.
///
/// This operation is intentionally very cheap (no allocations and no
/// copies). In particular, no verification on the integrity of the
/// FST is performed. Callers may opt into integrity checks via the
/// [`Fst::verify`](struct.Fst.html#method.verify) method.
///
/// The fst must have been written with a compatible finite state
/// transducer builder (`Builder` qualifies). If the format is invalid or
/// if there is a mismatch between the API version of this library and the
/// fst, then an error is returned.
#[inline]
pub fn new(data: D) -> Result<Fst<D>> {
let bytes = data.as_ref();
if bytes.len() < 36 {
return Err(Error::Format { size: bytes.len() }.into());
}
// The read_u64 unwraps below are OK because they can never fail.
// They can only fail when there is an IO error or if there is an
// unexpected EOF. However, we are reading from a byte slice (no
// IO errors possible) and we've confirmed the byte slice is at least
// N bytes (no unexpected EOF).
let version = bytes::read_u64_le(&bytes);
if version == 0 || version > VERSION {
return Err(
Error::Version { expected: VERSION, got: version }.into()
);
}
let ty = bytes::read_u64_le(&bytes[8..]);
let (end, checksum) = if version <= 2 {
(bytes.len(), None)
} else {
let checksum = bytes::read_u32_le(&bytes[bytes.len() - 4..]);
(bytes.len() - 4, Some(checksum))
};
let root_addr = {
let last = &bytes[end - 8..];
u64_to_usize(bytes::read_u64_le(last))
};
let len = {
let last2 = &bytes[end - 16..];
u64_to_usize(bytes::read_u64_le(last2))
};
// The root node is always the last node written, so its address should
// be near the end. After the root node is written, we still have to
// write the root *address* and the number of keys in the FST, along
// with the checksum. That's 20 bytes. The extra byte used below (21
// and not 20) comes from the fact that the root address points to
// the last byte in the root node, rather than the byte immediately
// following the root node.
//
// If this check passes, it is still possible that the FST is invalid
// but probably unlikely. If this check reports a false positive, then
// the program will probably panic. In the worst case, the FST will
// operate but be subtly wrong. (This would require the bytes to be in
// a format expected by an FST, which is incredibly unlikely.)
//
// The special check for EMPTY_ADDRESS is needed since an empty FST
// has a root node that is empty and final, which means it has the
// special address `0`. In that case, the FST is the smallest it can
// be: the version, type, root address and number of nodes. That's
// 36 bytes (8 byte u64 each).
//
// And finally, our calculation changes somewhat based on version.
// If the FST version is less than 3, then it does not have a checksum.
let (empty_total, addr_offset) =
if version <= 2 { (32, 17) } else { (36, 21) };
if (root_addr == EMPTY_ADDRESS && bytes.len() != empty_total)
&& root_addr + addr_offset != bytes.len()
{
return Err(Error::Format { size: bytes.len() }.into());
}
let meta = Meta { version, root_addr, ty, len, checksum };
Ok(Fst { meta, data })
}
/// Retrieves the value associated with a key.
///
/// If the key does not exist, then `None` is returned.
#[inline]
pub fn get<B: AsRef<[u8]>>(&self, key: B) -> Option<Output> {
self.as_ref().get(key.as_ref())
}
/// Returns true if and only if the given key is in this FST.
#[inline]
pub fn contains_key<B: AsRef<[u8]>>(&self, key: B) -> bool {
self.as_ref().contains_key(key.as_ref())
}
/// Retrieves the key associated with the given value.
///
/// This is like `get_key_into`, but will return the key itself without
/// allowing the caller to reuse an allocation.
///
/// If the given value does not exist, then `None` is returned.
///
/// The values in this FST are not monotonically increasing when sorted
/// lexicographically by key, then this routine has unspecified behavior.
#[inline]
pub fn get_key(&self, value: u64) -> Option<Vec<u8>> {
let mut key = vec![];
if self.get_key_into(value, &mut key) {
Some(key)
} else {
None
}
}
/// Retrieves the key associated with the given value.
///
/// If the given value does not exist, then `false` is returned. In this
/// case, the contents of `key` are unspecified.
///
/// The given buffer is not clearer before the key is written to it.
///
/// The values in this FST are not monotonically increasing when sorted
/// lexicographically by key, then this routine has unspecified behavior.
#[inline]
pub fn get_key_into(&self, value: u64, key: &mut Vec<u8>) -> bool {
self.as_ref().get_key_into(value, key)
}
/// Return a lexicographically ordered stream of all key-value pairs in
/// this fst.
#[inline]
pub fn stream(&self) -> Stream<'_> {
StreamBuilder::new(self.as_ref(), AlwaysMatch).into_stream()
}
/// Return a builder for range queries.
///
/// A range query returns a subset of key-value pairs in this fst in a
/// range given in lexicographic order.
#[inline]
pub fn range(&self) -> StreamBuilder<'_> {
StreamBuilder::new(self.as_ref(), AlwaysMatch)
}
/// Executes an automaton on the keys of this FST.
#[inline]
pub fn search<A: Automaton>(&self, aut: A) -> StreamBuilder<'_, A> {
StreamBuilder::new(self.as_ref(), aut)
}
/// Executes an automaton on the keys of this FST and yields matching
/// keys along with the corresponding matching states in the given
/// automaton.
#[inline]
pub fn search_with_state<A: Automaton>(
&self,
aut: A,
) -> StreamWithStateBuilder<'_, A> {
StreamWithStateBuilder::new(self.as_ref(), aut)
}
/// Returns the number of keys in this fst.
#[inline]
pub fn len(&self) -> usize {
self.as_ref().len()
}
/// Returns true if and only if this fst has no keys.
#[inline]
pub fn is_empty(&self) -> bool {
self.as_ref().is_empty()
}
/// Returns the number of bytes used by this fst.
#[inline]
pub fn size(&self) -> usize {
self.as_ref().size()
}
/// Attempts to verify this FST by computing its checksum.
///
/// This will scan over all of the bytes in the underlying FST, so this
/// may be an expensive operation depending on the size of the FST.
///
/// This returns an error in two cases:
///
/// 1. When a checksum does not exist, which is the case for FSTs that were
/// produced by the `fst` crate before version `0.4`.
/// 2. When the checksum in the FST does not match the computed checksum
/// performed by this procedure.
#[inline]
pub fn verify(&self) -> Result<()> {
use crate::raw::crc32::CheckSummer;
let expected = match self.as_ref().meta.checksum {
None => return Err(Error::ChecksumMissing.into()),
Some(expected) => expected,
};
let mut summer = CheckSummer::new();
summer.update(&self.as_bytes()[..self.as_bytes().len() - 4]);
let got = summer.masked();
if expected == got {
return Ok(());
}
Err(Error::ChecksumMismatch { expected, got }.into())
}
/// Creates a new fst operation with this fst added to it.
///
/// The `OpBuilder` type can be used to add additional fst streams
/// and perform set operations like union, intersection, difference and
/// symmetric difference on the keys of the fst. These set operations also
/// allow one to specify how conflicting values are merged in the stream.
#[inline]
pub fn op(&self) -> OpBuilder<'_> {
OpBuilder::new().add(self)
}
/// Returns true if and only if the `self` fst is disjoint with the fst
/// `stream`.
///
/// `stream` must be a lexicographically ordered sequence of byte strings
/// with associated values.
#[inline]
pub fn is_disjoint<'f, I, S>(&self, stream: I) -> bool
where
I: for<'a> IntoStreamer<'a, Into = S, Item = (&'a [u8], Output)>,
S: 'f + for<'a> Streamer<'a, Item = (&'a [u8], Output)>,
{
self.op().add(stream).intersection().next().is_none()
}
/// Returns true if and only if the `self` fst is a subset of the fst
/// `stream`.
///
/// `stream` must be a lexicographically ordered sequence of byte strings
/// with associated values.
#[inline]
pub fn is_subset<'f, I, S>(&self, stream: I) -> bool
where
I: for<'a> IntoStreamer<'a, Into = S, Item = (&'a [u8], Output)>,
S: 'f + for<'a> Streamer<'a, Item = (&'a [u8], Output)>,
{
let mut op = self.op().add(stream).intersection();
let mut count = 0;
while let Some(_) = op.next() {
count += 1;
}
count == self.len()
}
/// Returns true if and only if the `self` fst is a superset of the fst
/// `stream`.
///
/// `stream` must be a lexicographically ordered sequence of byte strings
/// with associated values.
#[inline]
pub fn is_superset<'f, I, S>(&self, stream: I) -> bool
where
I: for<'a> IntoStreamer<'a, Into = S, Item = (&'a [u8], Output)>,
S: 'f + for<'a> Streamer<'a, Item = (&'a [u8], Output)>,
{
let mut op = self.op().add(stream).union();
let mut count = 0;
while let Some(_) = op.next() {
count += 1;
}
count == self.len()
}
/// Returns the underlying type of this fst.
///
/// FstType is a convention used to indicate the type of the underlying
/// transducer.
///
/// This crate reserves the range 0-255 (inclusive) but currently leaves
/// the meaning of 0-255 unspecified.
#[inline]
pub fn fst_type(&self) -> FstType {
self.as_ref().fst_type()
}
/// Returns the root node of this fst.
#[inline]
pub fn root(&self) -> Node<'_> {
self.as_ref().root()
}
/// Returns the node at the given address.
///
/// Node addresses can be obtained by reading transitions on `Node` values.
#[inline]
pub fn node(&self, addr: CompiledAddr) -> Node<'_> {
self.as_ref().node(addr)
}
/// Returns a copy of the binary contents of this FST.
#[inline]
pub fn to_vec(&self) -> Vec<u8> {
self.as_ref().to_vec()
}
/// Returns the binary contents of this FST.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.as_ref().as_bytes()
}
#[inline]
fn as_ref(&self) -> FstRef {
FstRef { meta: &self.meta, data: self.data.as_ref() }
}
}
impl<D> Fst<D> {
/// Returns the underlying data which constitutes the FST itself.
#[inline]
pub fn into_inner(self) -> D {
self.data
}
/// Returns a borrow to the underlying data which constitutes the FST itself.
#[inline]
pub fn as_inner(&self) -> &D {
&self.data
}
/// Maps the underlying data of the fst to another data type.
#[inline]
pub fn map_data<F, T>(self, mut f: F) -> Result<Fst<T>>
where
F: FnMut(D) -> T,
T: AsRef<[u8]>,
{
Fst::new(f(self.into_inner()))
}
}
impl<'a, 'f, D: AsRef<[u8]>> IntoStreamer<'a> for &'f Fst<D> {
type Item = (&'a [u8], Output);
type Into = Stream<'f>;
#[inline]
fn into_stream(self) -> Stream<'f> {
StreamBuilder::new(self.as_ref(), AlwaysMatch).into_stream()
}
}
struct FstRef<'f> {
meta: &'f Meta,
data: &'f [u8],
}
impl<'f> FstRef<'f> {
#[inline]
fn get(&self, key: &[u8]) -> Option<Output> {
let mut node = self.root();
let mut out = Output::zero();
for &b in key {
node = match node.find_input(b) {
None => return None,
Some(i) => {
let t = node.transition(i);
out = out.cat(t.out);
self.node(t.addr)
}
}
}
if !node.is_final() {
None
} else {
Some(out.cat(node.final_output()))
}
}
#[inline]
fn contains_key(&self, key: &[u8]) -> bool {
let mut node = self.root();
for &b in key {
node = match node.find_input(b) {
None => return false,
Some(i) => self.node(node.transition_addr(i)),
}
}
node.is_final()
}
#[inline]
fn get_key_into(&self, mut value: u64, key: &mut Vec<u8>) -> bool {
let mut node = self.root();
while value != 0 || !node.is_final() {
let trans = node
.transitions()
.take_while(|t| t.out.value() <= value)
.last();
node = match trans {
None => return false,
Some(t) => {
value -= t.out.value();
key.push(t.inp);
self.node(t.addr)
}
};
}
true
}
#[inline]
fn len(&self) -> usize {
self.meta.len
}
#[inline]
fn is_empty(&self) -> bool {
self.meta.len == 0
}
#[inline]
fn size(&self) -> usize {
self.as_bytes().len()
}
#[inline]
fn fst_type(&self) -> FstType {
self.meta.ty
}
#[inline]
fn root_addr(&self) -> CompiledAddr {
self.meta.root_addr
}
#[inline]
fn root(&self) -> Node<'f> {
self.node(self.root_addr())
}
#[inline]
fn node(&self, addr: CompiledAddr) -> Node<'f> {
Node::new(self.meta.version, addr, self.as_bytes())
}
#[inline]
fn to_vec(&self) -> Vec<u8> {
self.as_bytes().to_vec()
}
#[inline]
fn as_bytes(&self) -> &'f [u8] {
self.data
}
#[inline]
fn empty_final_output(&self) -> Option<Output> {
let root = self.root();
if root.is_final() {
Some(root.final_output())
} else {
None
}
}
}
/// A builder for constructing range queries on streams.
///
/// Once all bounds are set, one should call `into_stream` to get a
/// `Stream`.
///
/// Bounds are not additive. That is, if `ge` is called twice on the same
/// builder, then the second setting wins.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'f` lifetime parameter refers to the lifetime of the underlying fst.
pub struct StreamBuilder<'f, A = AlwaysMatch> {
fst: FstRef<'f>,
aut: A,
min: Bound,
max: Bound,
}
impl<'f, A: Automaton> StreamBuilder<'f, A> {
fn new(fst: FstRef<'f>, aut: A) -> StreamBuilder<'f, A> {
StreamBuilder {
fst,
aut,
min: Bound::Unbounded,
max: Bound::Unbounded,
}
}
/// Specify a greater-than-or-equal-to bound.
pub fn ge<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.min = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a greater-than bound.
pub fn gt<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.min = Bound::Excluded(bound.as_ref().to_owned());
self
}
/// Specify a less-than-or-equal-to bound.
pub fn le<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.max = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a less-than bound.
pub fn lt<T: AsRef<[u8]>>(mut self, bound: T) -> StreamBuilder<'f, A> {
self.max = Bound::Excluded(bound.as_ref().to_owned());
self
}
}
impl<'a, 'f, A: Automaton> IntoStreamer<'a> for StreamBuilder<'f, A> {
type Item = (&'a [u8], Output);
type Into = Stream<'f, A>;
fn into_stream(self) -> Stream<'f, A> {
Stream::new(self.fst, self.aut, self.min, self.max)
}
}
/// A builder for constructing range queries on streams that include automaton
/// states.
///
/// In general, one should use `StreamBuilder` unless you have a specific need
/// for accessing the states of the underlying automaton that is being used to
/// filter this stream.
///
/// Once all bounds are set, one should call `into_stream` to get a
/// `Stream`.
///
/// Bounds are not additive. That is, if `ge` is called twice on the same
/// builder, then the second setting wins.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'f` lifetime parameter refers to the lifetime of the underlying fst.
pub struct StreamWithStateBuilder<'f, A = AlwaysMatch> {
fst: FstRef<'f>,
aut: A,
min: Bound,
max: Bound,
}
impl<'f, A: Automaton> StreamWithStateBuilder<'f, A> {
fn new(fst: FstRef<'f>, aut: A) -> StreamWithStateBuilder<'f, A> {
StreamWithStateBuilder {
fst,
aut,
min: Bound::Unbounded,
max: Bound::Unbounded,
}
}
/// Specify a greater-than-or-equal-to bound.
pub fn ge<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.min = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a greater-than bound.
pub fn gt<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.min = Bound::Excluded(bound.as_ref().to_owned());
self
}
/// Specify a less-than-or-equal-to bound.
pub fn le<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.max = Bound::Included(bound.as_ref().to_owned());
self
}
/// Specify a less-than bound.
pub fn lt<T: AsRef<[u8]>>(
mut self,
bound: T,
) -> StreamWithStateBuilder<'f, A> {
self.max = Bound::Excluded(bound.as_ref().to_owned());
self
}
}
impl<'a, 'f, A: 'a + Automaton> IntoStreamer<'a>
for StreamWithStateBuilder<'f, A>
where
A::State: Clone,
{
type Item = (&'a [u8], Output, A::State);
type Into = StreamWithState<'f, A>;
fn into_stream(self) -> StreamWithState<'f, A> {
StreamWithState::new(self.fst, self.aut, self.min, self.max)
}
}
#[derive(Debug)]
enum Bound {
Included(Vec<u8>),
Excluded(Vec<u8>),
Unbounded,
}
impl Bound {
#[inline]
fn exceeded_by(&self, inp: &[u8]) -> bool {
match *self {
Bound::Included(ref v) => inp > v,
Bound::Excluded(ref v) => inp >= v,
Bound::Unbounded => false,
}
}
#[inline]
fn is_empty(&self) -> bool {
match *self {
Bound::Included(ref v) => v.is_empty(),
Bound::Excluded(ref v) => v.is_empty(),
Bound::Unbounded => true,
}
}
#[inline]
fn is_inclusive(&self) -> bool {
match *self {
Bound::Excluded(_) => false,
_ => true,
}
}
}
/// A lexicographically ordered stream of key-value pairs from an fst.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'f` lifetime parameter refers to the lifetime of the underlying fst.
pub struct Stream<'f, A: Automaton = AlwaysMatch>(StreamWithState<'f, A>);
impl<'f, A: Automaton> Stream<'f, A> {
fn new(fst: FstRef<'f>, aut: A, min: Bound, max: Bound) -> Stream<'f, A> {
Stream(StreamWithState::new(fst, aut, min, max))
}
/// Convert this stream into a vector of byte strings and outputs.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_byte_vec(mut self) -> Vec<(Vec<u8>, u64)> {
let mut vs = vec![];
while let Some((k, v)) = self.next() {
vs.push((k.to_vec(), v.value()));
}
vs
}
/// Convert this stream into a vector of Unicode strings and outputs.
///
/// If any key is not valid UTF-8, then iteration on the stream is stopped
/// and a UTF-8 decoding error is returned.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_str_vec(mut self) -> Result<Vec<(String, u64)>> {
let mut vs = vec![];
while let Some((k, v)) = self.next() {
let k = String::from_utf8(k.to_vec()).map_err(Error::from)?;
vs.push((k, v.value()));
}
Ok(vs)
}
/// Convert this stream into a vector of byte strings.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_byte_keys(mut self) -> Vec<Vec<u8>> {
let mut vs = vec![];
while let Some((k, _)) = self.next() {
vs.push(k.to_vec());
}
vs
}
/// Convert this stream into a vector of Unicode strings.
///
/// If any key is not valid UTF-8, then iteration on the stream is stopped
/// and a UTF-8 decoding error is returned.
///
/// Note that this creates a new allocation for every key in the stream.
pub fn into_str_keys(mut self) -> Result<Vec<String>> {
let mut vs = vec![];
while let Some((k, _)) = self.next() {
let k = String::from_utf8(k.to_vec()).map_err(Error::from)?;
vs.push(k);
}
Ok(vs)
}
/// Convert this stream into a vector of outputs.
pub fn into_values(mut self) -> Vec<u64> {
let mut vs = vec![];
while let Some((_, v)) = self.next() {
vs.push(v.value());
}
vs
}
}
impl<'f, 'a, A: Automaton> Streamer<'a> for Stream<'f, A> {
type Item = (&'a [u8], Output);
fn next(&'a mut self) -> Option<(&'a [u8], Output)> {
self.0.next_with(|_| ()).map(|(key, out, _)| (key, out))
}
}
/// A lexicographically ordered stream of key-value-state triples from an fst
/// and an automaton.
///
/// The key-values are from the underyling FSTP while the states are from the
/// automaton.
///
/// The `A` type parameter corresponds to an optional automaton to filter
/// the stream. By default, no filtering is done.
///
/// The `'m` lifetime parameter refers to the lifetime of the underlying map.
pub struct StreamWithState<'f, A = AlwaysMatch>
where
A: Automaton,
{
fst: FstRef<'f>,
aut: A,
inp: Vec<u8>,
empty_output: Option<Output>,
stack: Vec<StreamState<'f, A::State>>,
end_at: Bound,
}
#[derive(Clone, Debug)]
struct StreamState<'f, S> {
node: Node<'f>,
trans: usize,
out: Output,
aut_state: S,
}
impl<'f, A: Automaton> StreamWithState<'f, A> {
fn new(
fst: FstRef<'f>,
aut: A,
min: Bound,
max: Bound,
) -> StreamWithState<'f, A> {
let mut rdr = StreamWithState {
fst,
aut,
inp: Vec::with_capacity(16),
empty_output: None,
stack: vec![],
end_at: max,
};
rdr.seek_min(min);
rdr
}
/// Seeks the underlying stream such that the next key to be read is the
/// smallest key in the underlying fst that satisfies the given minimum
/// bound.
///
/// This theoretically should be straight-forward, but we need to make
/// sure our stack is correct, which includes accounting for automaton
/// states.
fn seek_min(&mut self, min: Bound) {
if min.is_empty() {
if min.is_inclusive() {
self.empty_output = self.fst.empty_final_output();
}
self.stack = vec![StreamState {
node: self.fst.root(),
trans: 0,
out: Output::zero(),
aut_state: self.aut.start(),
}];
return;
}
let (key, inclusive) = match min {
Bound::Excluded(ref min) => (min, false),
Bound::Included(ref min) => (min, true),
Bound::Unbounded => unreachable!(),
};
// At this point, we need to find the starting location of `min` in
// the FST. However, as we search, we need to maintain a stack of
// reader states so that the reader can pick up where we left off.
// N.B. We do not necessarily need to stop in a final state, unlike
// the one-off `find` method. For the example, the given bound might
// not actually exist in the FST.
let mut node = self.fst.root();
let mut out = Output::zero();
let mut aut_state = self.aut.start();
for &b in key {
match node.find_input(b) {
Some(i) => {
let t = node.transition(i);
let prev_state = aut_state;
aut_state = self.aut.accept(&prev_state, b);
self.inp.push(b);
self.stack.push(StreamState {
node,
trans: i + 1,
out,
aut_state: prev_state,
});
out = out.cat(t.out);
node = self.fst.node(t.addr);
}
None => {
// This is a little tricky. We're in this case if the
// given bound is not a prefix of any key in the FST.
// Since this is a minimum bound, we need to find the
// first transition in this node that proceeds the current
// input byte.
self.stack.push(StreamState {
node,
trans: node
.transitions()
.position(|t| t.inp > b)
.unwrap_or(node.len()),
out,
aut_state,
});
return;
}
}
}
if !self.stack.is_empty() {
let last = self.stack.len() - 1;
if inclusive {
self.stack[last].trans -= 1;
self.inp.pop();
} else {
let node = self.stack[last].node;
let trans = self.stack[last].trans;
self.stack.push(StreamState {
node: self.fst.node(node.transition(trans - 1).addr),
trans: 0,
out,
aut_state,
});
}
}
}
fn next_with<T>(
&mut self,
mut map: impl FnMut(&A::State) -> T,
) -> Option<(&[u8], Output, T)> {
if let Some(out) = self.empty_output.take() {
if self.end_at.exceeded_by(&[]) {
self.stack.clear();
return None;
}
let start = self.aut.start();
if self.aut.is_match(&start) {
return Some((&[], out, map(&start)));
}
}
while let Some(state) = self.stack.pop() {
if state.trans >= state.node.len()
|| !self.aut.can_match(&state.aut_state)
{
if state.node.addr() != self.fst.root_addr() {
self.inp.pop().unwrap();
}
continue;
}
let trans = state.node.transition(state.trans);
let out = state.out.cat(trans.out);
let next_state = self.aut.accept(&state.aut_state, trans.inp);
let t = map(&next_state);
let mut is_match = self.aut.is_match(&next_state);
let next_node = self.fst.node(trans.addr);
self.inp.push(trans.inp);
if next_node.is_final() {
if let Some(eof_state) = self.aut.accept_eof(&next_state) {
is_match = self.aut.is_match(&eof_state);
}
}
self.stack.push(StreamState { trans: state.trans + 1, ..state });
self.stack.push(StreamState {
node: next_node,
trans: 0,
out,
aut_state: next_state,
});
if self.end_at.exceeded_by(&self.inp) {
// We are done, forever.
self.stack.clear();
return None;
}
if next_node.is_final() && is_match {
return Some((
&self.inp,
out.cat(next_node.final_output()),
t,
));
}
}
None
}
}
impl<'a, 'f, A: 'a + Automaton> Streamer<'a> for StreamWithState<'f, A>
where
A::State: Clone,
{
type Item = (&'a [u8], Output, A::State);
fn next(&'a mut self) -> Option<(&'a [u8], Output, A::State)> {
self.next_with(|state| state.clone())
}
}
/// An output is a value that is associated with a key in a finite state
/// transducer.
///
/// Note that outputs must satisfy an algebra. Namely, it must have an additive
/// identity and the following binary operations defined: `prefix`,
/// `concatenation` and `subtraction`. `prefix` and `concatenation` are
/// commutative while `subtraction` is not. `subtraction` is only defined on
/// pairs of operands where the first operand is greater than or equal to the
/// second operand.
///
/// Currently, output values must be `u64`. However, in theory, an output value
/// can be anything that satisfies the above algebra. Future versions of this
/// crate may make outputs generic on this algebra.
#[derive(Copy, Clone, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct Output(u64);
impl Output {
/// Create a new output from a `u64`.
#[inline]
pub fn new(v: u64) -> Output {
Output(v)
}
/// Create a zero output.
#[inline]
pub fn zero() -> Output {
Output(0)
}
/// Retrieve the value inside this output.
#[inline]
pub fn value(self) -> u64 {
self.0
}
/// Returns true if this is a zero output.
#[inline]
pub fn is_zero(self) -> bool {
self.0 == 0
}
/// Returns the prefix of this output and `o`.
#[inline]
pub fn prefix(self, o: Output) -> Output {
Output(cmp::min(self.0, o.0))
}
/// Returns the concatenation of this output and `o`.
#[inline]
pub fn cat(self, o: Output) -> Output {
Output(self.0 + o.0)
}
/// Returns the subtraction of `o` from this output.
///
/// This function panics if `self > o`.
#[inline]
pub fn sub(self, o: Output) -> Output {
Output(
self.0
.checked_sub(o.0)
.expect("BUG: underflow subtraction not allowed"),
)
}
}
/// A transition from one note to another.
#[derive(Copy, Clone, Hash, Eq, PartialEq)]
pub struct Transition {
/// The byte input associated with this transition.
pub inp: u8,
/// The output associated with this transition.
pub out: Output,
/// The address of the node that this transition points to.
pub addr: CompiledAddr,
}
impl Default for Transition {
#[inline]
fn default() -> Transition {
Transition { inp: 0, out: Output::zero(), addr: NONE_ADDRESS }
}
}
impl fmt::Debug for Transition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.out.is_zero() {
write!(f, "{} -> {}", self.inp as char, self.addr)
} else {
write!(
f,
"({}, {}) -> {}",
self.inp as char,
self.out.value(),
self.addr
)
}
}
}
#[inline]
#[cfg(target_pointer_width = "64")]
fn u64_to_usize(n: u64) -> usize {
n as usize
}
#[inline]
#[cfg(not(target_pointer_width = "64"))]
fn u64_to_usize(n: u64) -> usize {
if n > std::usize::MAX as u64 {
panic!(
"\
Cannot convert node address {} to a pointer sized variable. If this FST
is very large and was generated on a system with a larger pointer size
than this system, then it is not possible to read this FST on this
system.",
n
);
}
n as usize
}
|
use crate::alloc::alloc::{handle_alloc_error, Layout};
use crate::scopeguard::guard;
use crate::TryReserveError;
use core::hint;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
use core::ptr::NonNull;
cfg_if! {
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets
// at once instead of 8. We don't bother with AVX since it would require
// runtime dispatch and wouldn't gain us much anyways: the probability of
// finding a match drops off drastically after the first few buckets.
//
// I attempted an implementation on ARM using NEON instructions, but it
// turns out that most NEON instructions have multi-cycle latency, which in
// the end outweighs any gains over the generic implementation.
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
not(miri)
))] {
mod sse2;
use sse2 as imp;
} else {
#[path = "generic.rs"]
mod generic;
use generic as imp;
}
}
mod alloc;
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
mod bitmask;
use self::bitmask::{BitMask, BitMaskIter};
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
// On stable we can use #[cold] to get a equivalent effect: this attributes
// suggests that the function is unlikely to be called
#[cfg(not(feature = "nightly"))]
#[inline]
#[cold]
fn cold() {}
#[cfg(not(feature = "nightly"))]
#[inline]
fn likely(b: bool) -> bool {
if !b {
cold()
}
b
}
#[cfg(not(feature = "nightly"))]
#[inline]
fn unlikely(b: bool) -> bool {
if b {
cold()
}
b
}
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
to.offset_from(from) as usize
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
(to as usize - from as usize) / mem::size_of::<T>()
}
/// Whether memory allocation errors should return an error or abort.
#[derive(Copy, Clone)]
enum Fallibility {
Fallible,
Infallible,
}
impl Fallibility {
/// Error to return on capacity overflow.
#[cfg_attr(feature = "inline-more", inline)]
fn capacity_overflow(self) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::CapacityOverflow,
Fallibility::Infallible => panic!("Hash table capacity overflow"),
}
}
/// Error to return on allocation error.
#[cfg_attr(feature = "inline-more", inline)]
fn alloc_err(self, layout: Layout) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::AllocError { layout },
Fallibility::Infallible => handle_alloc_error(layout),
}
}
}
/// Control byte value for an empty bucket.
const EMPTY: u8 = 0b1111_1111;
/// Control byte value for a deleted bucket.
const DELETED: u8 = 0b1000_0000;
/// Checks whether a control byte represents a full bucket (top bit is clear).
#[inline]
fn is_full(ctrl: u8) -> bool {
ctrl & 0x80 == 0
}
/// Checks whether a control byte represents a special value (top bit is set).
#[inline]
fn is_special(ctrl: u8) -> bool {
ctrl & 0x80 != 0
}
/// Checks whether a special control value is EMPTY (just check 1 bit).
#[inline]
fn special_is_empty(ctrl: u8) -> bool {
debug_assert!(is_special(ctrl));
ctrl & 0x01 != 0
}
/// Primary hash function, used to select the initial bucket to probe from.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h1(hash: u64) -> usize {
// On 32-bit platforms we simply ignore the higher hash bits.
hash as usize
}
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
let top7 = hash >> (hash_len * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
/// Probe sequence based on triangular numbers, which is guaranteed (since our
/// table size is a power of two) to visit every group of elements exactly once.
///
/// A triangular probe has us jump by 1 more group every time. So first we
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
///
/// Proof that the probe will visit every group in the table:
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
struct ProbeSeq {
pos: usize,
stride: usize,
}
impl ProbeSeq {
#[inline]
fn move_next(&mut self, bucket_mask: usize) {
// We should have found an empty bucket by now and ended the probe.
debug_assert!(
self.stride <= bucket_mask,
"Went past end of probe sequence"
);
self.stride += Group::WIDTH;
self.pos += self.stride;
self.pos &= bucket_mask;
}
}
/// Returns the number of buckets needed to hold the given number of items,
/// taking the maximum load factor into account.
///
/// Returns `None` if an overflow occurs.
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
#[cfg_attr(target_os = "emscripten", inline(never))]
#[cfg_attr(not(target_os = "emscripten"), inline)]
fn capacity_to_buckets(cap: usize) -> Option<usize> {
debug_assert_ne!(cap, 0);
// For small tables we require at least 1 empty bucket so that lookups are
// guaranteed to terminate if an element doesn't exist in the table.
if cap < 8 {
// We don't bother with a table size of 2 buckets since that can only
// hold a single element. Instead we skip directly to a 4 bucket table
// which can hold 3 elements.
return Some(if cap < 4 { 4 } else { 8 });
}
// Otherwise require 1/8 buckets to be empty (87.5% load)
//
// Be careful when modifying this, calculate_layout relies on the
// overflow check here.
let adjusted_cap = cap.checked_mul(8)? / 7;
// Any overflows will have been caught by the checked_mul. Also, any
// rounding errors from the division above will be cleaned up by
// next_power_of_two (which can't overflow because of the previous divison).
Some(adjusted_cap.next_power_of_two())
}
/// Returns the maximum effective capacity for the given bucket mask, taking
/// the maximum load factor into account.
#[inline]
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
if bucket_mask < 8 {
// For tables with 1/2/4/8 buckets, we always reserve one empty slot.
// Keep in mind that the bucket mask is one less than the bucket count.
bucket_mask
} else {
// For larger tables we reserve 12.5% of the slots as empty.
((bucket_mask + 1) / 8) * 7
}
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Array of buckets
let data = Layout::array::<T>(buckets).ok()?;
// Array of control bytes. This must be aligned to the group size.
//
// We add `Group::WIDTH` control bytes at the end of the array which
// replicate the bytes at the start of the array and thus avoids the need to
// perform bounds-checking while probing.
//
// There is no possible overflow here since buckets is a power of two and
// Group::WIDTH is a small number.
let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
data.extend(ctrl).ok()
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(not(feature = "nightly"))]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Manual layout calculation since Layout methods are not yet stable.
let ctrl_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
let ctrl_offset = mem::size_of::<T>()
.checked_mul(buckets)?
.checked_add(ctrl_align - 1)?
& !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
))
}
/// A reference to a hash table bucket containing a `T`.
///
/// This is usually just a pointer to the element itself. However if the element
/// is a ZST, then we instead track the index of the element in the table so
/// that `erase` works properly.
pub struct Bucket<T> {
// Actually it is pointer to next element than element itself
// this is needed to maintain pointer arithmetic invariants
// keeping direct pointer to element introduces difficulty.
// Using `NonNull` for variance and niche layout
ptr: NonNull<T>,
}
// This Send impl is needed for rayon support. This is safe since Bucket is
// never exposed in a public API.
unsafe impl<T> Send for Bucket<T> {}
impl<T> Clone for Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self { ptr: self.ptr }
}
}
impl<T> Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
// won't overflow because index must be less than length
(index + 1) as *mut T
} else {
base.as_ptr().sub(index)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
if mem::size_of::<T>() == 0 {
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ptr(&self) -> *mut T {
if mem::size_of::<T>() == 0 {
// Just return an arbitrary ZST pointer which is properly aligned
mem::align_of::<T>() as *mut T
} else {
self.ptr.as_ptr().sub(1)
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(self.ptr.as_ptr() as usize + offset) as *mut T
} else {
self.ptr.as_ptr().sub(offset)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn read(&self) -> T {
self.as_ptr().read()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
}
}
/// A raw hash table with an unsafe API.
pub struct RawTable<T, A: Allocator + Clone = Global> {
// Mask to get an index from a hash value. The value is one less than the
// number of buckets in the table.
bucket_mask: usize,
// [Padding], T1, T2, ..., Tlast, C1, C2, ...
// ^ points here
ctrl: NonNull<u8>,
// Number of elements that can be inserted before we need to grow the table
growth_left: usize,
// Number of elements in the table, only really used by len()
items: usize,
// Tell dropck that we own instances of T.
marker: PhantomData<T>,
alloc: A,
}
impl<T> RawTable<T, Global> {
/// Creates a new empty hash table without allocating any memory.
///
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[cfg_attr(feature = "inline-more", inline)]
pub const fn new() -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
marker: PhantomData,
alloc: Global,
}
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
#[cfg(feature = "raw")]
pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
Self::try_with_capacity_in(capacity, Global)
}
/// Allocates a new hash table with at least enough capacity for inserting
/// the given number of elements without reallocating.
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
}
impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Creates a new empty hash table without allocating any memory, using the
/// given allocator.
///
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[cfg_attr(feature = "inline-more", inline)]
pub fn new_in(alloc: A) -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
marker: PhantomData,
alloc,
}
}
/// Allocates a new hash table with the given number of buckets.
///
/// The control bytes are left uninitialized.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
alloc: A,
buckets: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match calculate_layout::<T>(buckets) {
Some(lco) => lco,
None => return Err(fallibility.capacity_overflow()),
};
let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
};
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
marker: PhantomData,
alloc,
})
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
fn fallible_with_capacity(
alloc: A,
capacity: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
if capacity == 0 {
Ok(Self::new_in(alloc))
} else {
unsafe {
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let buckets = match capacity_to_buckets(capacity) {
Some(buckets) => buckets,
None => return Err(fallibility.capacity_overflow()),
};
let result = Self::new_uninitialized(alloc, buckets, fallibility)?;
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
}
}
}
/// Attempts to allocate a new hash table using the given allocator, with at least enough
/// capacity for inserting the given number of elements without reallocating.
#[cfg(feature = "raw")]
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible)
}
/// Allocates a new hash table using the given allocator, with at least enough capacity for
/// inserting the given number of elements without reallocating.
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) {
Ok(capacity) => capacity,
Err(_) => unsafe { hint::unreachable_unchecked() },
}
}
/// Deallocates the table without dropping any entries.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match calculate_layout::<T>(self.buckets()) {
Some(lco) => lco,
None => hint::unreachable_unchecked(),
};
self.alloc.deallocate(
NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)),
layout,
);
}
/// Returns pointer to one past last element of data table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn data_end(&self) -> NonNull<T> {
NonNull::new_unchecked(self.ctrl.as_ptr().cast())
}
/// Returns pointer to start of data table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
pub unsafe fn data_start(&self) -> *mut T {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
/// Returns the index of a bucket from a `Bucket`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
bucket.to_base_index(self.data_end())
}
/// Returns a pointer to a control byte.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
self.ctrl.as_ptr().add(index)
}
/// Returns a pointer to an element in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
#[deprecated(since = "0.8.1", note = "use erase or remove instead")]
pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
debug_assert!(is_full(*self.ctrl(index)));
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
// If we are inside a continuous block of Group::WIDTH full or deleted
// cells then a probe window may have seen a full block when trying to
// insert. We therefore need to keep that block non-empty so that
// lookups will continue searching to the next probe window.
//
// Note that in this context `leading_zeros` refers to the bytes at the
// end of a group, while `trailing_zeros` refers to the bytes at the
// begining of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
self.set_ctrl(index, ctrl);
self.items -= 1;
}
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
#[allow(deprecated)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
item.drop();
}
/// Finds and erases an element from the table, dropping it in place.
/// Returns true if an element was found.
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
// Avoid `Option::map` because it bloats LLVM IR.
if let Some(bucket) = self.find(hash, eq) {
unsafe { self.erase(bucket) };
true
} else {
false
}
}
/// Removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
#[allow(deprecated)]
pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
self.erase_no_drop(&item);
item.read()
}
/// Finds and removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { self.remove(bucket) }),
None => None,
}
}
/// Returns an iterator-like object for a probe sequence on the table.
///
/// This iterator never terminates, but is guaranteed to visit each bucket
/// group exactly once. The loop using `probe_seq` must terminate upon
/// reaching a group containing an empty bucket.
#[cfg_attr(feature = "inline-more", inline)]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
}
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
// the array without using a branch:
// - If index >= Group::WIDTH then index == index2.
// - Otherwise index2 == self.bucket_mask + 1 + index.
//
// The very last replicated control byte is never actually read because
// we mask the initial index for unaligned loads, but we write it
// anyways because it makes the set_ctrl implementation simpler.
//
// If there are fewer buckets than Group::WIDTH then this code will
// replicate the buckets at the end of the trailing group. For example
// with 2 buckets and a group size of 4, the control bytes will look
// like this:
//
// Real | Replicated
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Searches for an empty or deleted bucket which is suitable for inserting
/// a new element.
///
/// There must be at least 1 empty bucket in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn find_insert_slot(&self, hash: u64) -> usize {
let mut probe_seq = self.probe_seq(hash);
loop {
unsafe {
let group = Group::load(self.ctrl(probe_seq.pos));
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
let result = (probe_seq.pos + bit) & self.bucket_mask;
// In tables smaller than the group width, trailing control
// bytes outside the range of the table are filled with
// EMPTY entries. These will unfortunately trigger a
// match, but once masked may point to a full bucket that
// is already occupied. We detect this situation here and
// perform a second scan starting at the begining of the
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
if unlikely(is_full(*self.ctrl(result))) {
debug_assert!(self.bucket_mask < Group::WIDTH);
debug_assert_ne!(probe_seq.pos, 0);
return Group::load_aligned(self.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
}
return result;
}
}
probe_seq.move_next(self.bucket_mask);
}
}
/// Marks all table buckets as empty without dropping their contents.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear_no_drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
}
}
self.items = 0;
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
// Ensure that the table is reset even if one of the drops panic
let self_ = guard(self, |self_| self_.clear_no_drop());
if mem::needs_drop::<T>() && self_.len() != 0 {
unsafe {
for item in self_.iter() {
item.drop();
}
}
}
}
/// Shrinks the table to fit `max(self.len(), min_size)` elements.
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
// Calculate the minimal number of elements that we need to reserve
// space for.
let min_size = usize::max(self.items, min_size);
if min_size == 0 {
*self = Self::new_in(self.alloc.clone());
return;
}
// Calculate the number of buckets that we need for this number of
// elements. If the calculation overflows then the requested bucket
// count must be larger than what we have right and nothing needs to be
// done.
let min_buckets = match capacity_to_buckets(min_size) {
Some(buckets) => buckets,
None => return,
};
// If we have more buckets than we need, shrink the table.
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.items == 0 {
*self = Self::with_capacity_in(min_size, self.alloc.clone())
} else {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
if self
.resize(min_size, hasher, Fallibility::Infallible)
.is_err()
{
unsafe { hint::unreachable_unchecked() }
}
}
}
}
/// Ensures that at least `additional` items can be inserted into the table
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
if additional > self.growth_left {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
if self
.reserve_rehash(additional, hasher, Fallibility::Infallible)
.is_err()
{
unsafe { hint::unreachable_unchecked() }
}
}
}
/// Tries to ensure that at least `additional` items can be inserted into
/// the table without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Fallible)
} else {
Ok(())
}
}
/// Out-of-line slow path for `reserve` and `try_reserve`.
#[cold]
#[inline(never)]
fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let new_items = match self.items.checked_add(additional) {
Some(new_items) => new_items,
None => return Err(fallibility.capacity_overflow()),
};
let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
if new_items <= full_capacity / 2 {
// Rehash in-place without re-allocating if we have plenty of spare
// capacity that is locked up due to DELETED entries.
self.rehash_in_place(hasher);
Ok(())
} else {
// Otherwise, conservatively resize to at least the next size up
// to avoid churning deletes into frequent rehashes.
self.resize(
usize::max(new_items, full_capacity + 1),
hasher,
fallibility,
)
}
}
/// Rehashes the contents of the table in place (i.e. without changing the
/// allocation).
///
/// If `hasher` panics then some the table's contents may be lost.
fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
unsafe {
// Bulk convert all full control bytes to DELETED, and all DELETED
// control bytes to EMPTY. This effectively frees up all buckets
// containing a DELETED entry.
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
group.store_aligned(self.ctrl(i));
}
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
if self.buckets() < Group::WIDTH {
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
// If the hash function panics then properly clean up any elements
// that we haven't rehashed yet. We unfortunately can't preserve the
// element since we lost their hash and have no way of recovering it
// without risking another panic.
let mut guard = guard(self, |self_| {
if mem::needs_drop::<T>() {
for i in 0..self_.buckets() {
if *self_.ctrl(i) == DELETED {
self_.set_ctrl(i, EMPTY);
self_.bucket(i).drop();
self_.items -= 1;
}
}
}
self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
});
// At this point, DELETED elements are elements that we haven't
// rehashed yet. Find them and re-insert them at their ideal
// position.
'outer: for i in 0..guard.buckets() {
if *guard.ctrl(i) != DELETED {
continue;
}
'inner: loop {
// Hash the current item
let item = guard.bucket(i);
let hash = hasher(item.as_ref());
// Search for a suitable place to put it
let new_i = guard.find_insert_slot(hash);
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
// size. If both the new and old position fall within the
// same unaligned group, then there is no benefit in moving
// it and we can just continue to the next item.
let probe_index = |pos: usize| {
(pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask)
/ Group::WIDTH
};
if likely(probe_index(i) == probe_index(new_i)) {
guard.set_ctrl(i, h2(hash));
continue 'outer;
}
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = *guard.ctrl(new_i);
guard.set_ctrl(new_i, h2(hash));
if prev_ctrl == EMPTY {
// If the target slot is empty, simply move the current
// element into the new slot and clear the old control
// byte.
guard.set_ctrl(i, EMPTY);
guard.bucket(new_i).copy_from_nonoverlapping(&item);
continue 'outer;
} else {
// If the target slot is occupied, swap the two elements
// and then continue processing the element that we just
// swapped into the old slot.
debug_assert_eq!(prev_ctrl, DELETED);
mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
continue 'inner;
}
}
}
guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
mem::forget(guard);
}
}
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
debug_assert!(self.items <= capacity);
// Allocate and initialize the new table.
let mut new_table =
Self::fallible_with_capacity(self.alloc.clone(), capacity, fallibility)?;
new_table.growth_left -= self.items;
new_table.items = self.items;
// The hash function may panic, in which case we simply free the new
// table without dropping any elements that may have been copied into
// it.
//
// This guard is also used to free the old table on success, see
// the comment at the bottom of this function.
let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
if !new_table.is_empty_singleton() {
new_table.free_buckets();
}
});
// Copy all elements to the new table.
for item in self.iter() {
// This may panic.
let hash = hasher(item.as_ref());
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = new_table.find_insert_slot(hash);
new_table.set_ctrl(index, h2(hash));
new_table.bucket(index).copy_from_nonoverlapping(&item);
}
// We successfully copied all elements without panicking. Now replace
// self with the new table. The old table will have its memory freed but
// the items will not be dropped (since they have been moved into the
// new table).
mem::swap(self, &mut new_table);
Ok(())
}
}
/// Inserts a new element into the table, and returns its raw bucket.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
let mut index = self.find_insert_slot(hash);
// We can avoid growing the table once we have reached our load
// factor if we are replacing a tombstone. This works since the
// number of EMPTY slots does not change in this case.
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
index = self.find_insert_slot(hash);
}
let bucket = self.bucket(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
bucket
}
}
/// Attempts to insert a new element without growing the table and return its raw bucket.
///
/// Returns an `Err` containing the given element if inserting it would require growing the
/// table.
///
/// This does not check if the given element already exists in the table.
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
unsafe {
let index = self.find_insert_slot(hash);
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
Err(value)
} else {
let bucket = self.bucket(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
Ok(bucket)
}
}
}
/// Inserts a new element into the table, and returns a mutable reference to it.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
unsafe { self.insert(hash, value, hasher).as_mut() }
}
/// Inserts a new element into the table, without growing the table.
///
/// There must be enough space in the table to insert the new element.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
unsafe {
let index = self.find_insert_slot(hash);
let bucket = self.bucket(index);
// If we are replacing a DELETED entry then we don't need to update
// the load counter.
let old_ctrl = *self.ctrl(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
bucket
}
}
/// Temporary removes a bucket, applying the given function to the removed
/// element and optionally put back the returned value in the same bucket.
///
/// Returns `true` if the bucket still contains an element
///
/// This does not check if the given bucket is actually occupied.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
where
F: FnOnce(T) -> Option<T>,
{
let index = self.bucket_index(&bucket);
let old_ctrl = *self.ctrl(index);
debug_assert!(is_full(old_ctrl));
let old_growth_left = self.growth_left;
let item = self.remove(bucket);
if let Some(new_item) = f(item) {
self.growth_left = old_growth_left;
self.set_ctrl(index, old_ctrl);
self.items += 1;
self.bucket(index).write(new_item);
true
} else {
false
}
}
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
unsafe {
for bucket in self.iter_hash(hash) {
let elm = bucket.as_ref();
if likely(eq(elm)) {
return Some(bucket);
}
}
None
}
}
/// Gets a reference to an element in the table.
#[inline]
pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { bucket.as_ref() }),
None => None,
}
}
/// Gets a mutable reference to an element in the table.
#[inline]
pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { bucket.as_mut() }),
None => None,
}
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the table might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
#[cfg_attr(feature = "inline-more", inline)]
pub fn capacity(&self) -> usize {
self.items + self.growth_left
}
/// Returns the number of elements in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn len(&self) -> usize {
self.items
}
/// Returns the number of buckets in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn buckets(&self) -> usize {
self.bucket_mask + 1
}
/// Returns the number of control bytes in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
}
/// Returns whether this table points to the empty singleton with a capacity
/// of 0.
#[cfg_attr(feature = "inline-more", inline)]
fn is_empty_singleton(&self) -> bool {
self.bucket_mask == 0
}
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter(&self) -> RawIter<T> {
let data = Bucket::from_base_index(self.data_end(), 0);
RawIter {
iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
items: self.items,
}
}
/// Returns an iterator over occupied buckets that could match a given hash.
///
/// In rare cases, the iterator may return a bucket with a different hash.
///
/// It is up to the caller to ensure that the `RawTable` outlives the
/// `RawIterHash`. Because we cannot make the `next` method unsafe on the
/// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
RawIterHash::new(self, hash)
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn drain(&mut self) -> RawDrain<'_, T, A> {
unsafe {
let iter = self.iter();
self.drain_iter_from(iter)
}
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory.
///
/// Iteration starts at the provided iterator's current location.
///
/// It is up to the caller to ensure that the iterator is valid for this
/// `RawTable` and covers all items that remain in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.alloc.clone()))),
orig_table: NonNull::from(self),
marker: PhantomData,
}
}
/// Returns an iterator which consumes all elements from the table.
///
/// Iteration starts at the provided iterator's current location.
///
/// It is up to the caller to ensure that the iterator is valid for this
/// `RawTable` and covers all items that remain in the table.
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
debug_assert_eq!(iter.len(), self.len());
let alloc = self.alloc.clone();
let allocation = self.into_allocation();
RawIntoIter {
iter,
allocation,
marker: PhantomData,
alloc,
}
}
/// Converts the table into a raw allocation. The contents of the table
/// should be dropped using a `RawIter` before freeing the allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> {
let alloc = if self.is_empty_singleton() {
None
} else {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match calculate_layout::<T>(self.buckets()) {
Some(lco) => lco,
None => unsafe { hint::unreachable_unchecked() },
};
Some((
unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
))
};
mem::forget(self);
alloc
}
}
unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A> where T: Send {}
unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A> where T: Sync {}
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
fn clone(&self) -> Self {
if self.is_empty_singleton() {
Self::new_in(self.alloc.clone())
} else {
unsafe {
let mut new_table = ManuallyDrop::new(
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
match Self::new_uninitialized(
self.alloc.clone(),
self.buckets(),
Fallibility::Infallible,
) {
Ok(table) => table,
Err(_) => hint::unreachable_unchecked(),
},
);
new_table.clone_from_spec(self, |new_table| {
// We need to free the memory allocated for the new table.
new_table.free_buckets();
});
// Return the newly created table.
ManuallyDrop::into_inner(new_table)
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.is_empty_singleton() {
*self = Self::new_in(self.alloc.clone());
} else {
unsafe {
// First, drop all our elements without clearing the control bytes.
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
// If necessary, resize our table to match the source.
if self.buckets() != source.buckets() {
// Skip our drop by using ptr::write.
if !self.is_empty_singleton() {
self.free_buckets();
}
(self as *mut Self).write(
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
match Self::new_uninitialized(
self.alloc.clone(),
source.buckets(),
Fallibility::Infallible,
) {
Ok(table) => table,
Err(_) => hint::unreachable_unchecked(),
},
);
}
self.clone_from_spec(source, |self_| {
// We need to leave the table in an empty state.
self_.clear_no_drop()
});
}
}
}
}
/// Specialization of `clone_from` for `Copy` types
trait RawTableClone {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
}
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
default_fn! {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
self.clone_from_impl(source, on_panic);
}
}
}
#[cfg(feature = "nightly")]
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
source
.ctrl(0)
.copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
source
.data_start()
.copy_to_nonoverlapping(self.data_start(), self.buckets());
self.items = source.items;
self.growth_left = source.growth_left;
}
}
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
/// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
// Copy the control bytes unchanged. We do this in a single pass
source
.ctrl(0)
.copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
// The cloning of elements may panic, in which case we need
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
if mem::needs_drop::<T>() && self_.len() != 0 {
for i in 0..=*index {
if is_full(*self_.ctrl(i)) {
self_.bucket(i).drop();
}
}
}
// Depending on whether we were called from clone or clone_from, we
// either need to free the memory for the destination table or just
// clear the control bytes.
on_panic(self_);
});
for from in source.iter() {
let index = source.bucket_index(&from);
let to = guard.1.bucket(index);
to.write(from.as_ref().clone());
// Update the index in case we need to unwind.
guard.0 = index;
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard);
self.items = source.items;
self.growth_left = source.growth_left;
}
/// Variant of `clone_from` to use when a hasher is available.
#[cfg(feature = "raw")]
pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
// If we have enough capacity in the table, just clear it and insert
// elements one by one. We don't do this if we have the same number of
// buckets as the source since we can just copy the contents directly
// in that case.
if self.buckets() != source.buckets()
&& bucket_mask_to_capacity(self.bucket_mask) >= source.len()
{
self.clear();
let guard_self = guard(&mut *self, |self_| {
// Clear the partially copied table if a panic occurs, otherwise
// items and growth_left will be out of sync with the contents
// of the table.
self_.clear();
});
unsafe {
for item in source.iter() {
// This may panic.
let item = item.as_ref().clone();
let hash = hasher(&item);
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = guard_self.find_insert_slot(hash);
guard_self.set_ctrl(index, h2(hash));
guard_self.bucket(index).write(item);
}
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard_self);
self.items = source.items;
self.growth_left -= source.items;
} else {
self.clone_from(source);
}
}
}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T, A: Allocator + Clone> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
type Item = T;
type IntoIter = RawIntoIter<T, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_iter(self) -> RawIntoIter<T, A> {
unsafe {
let iter = self.iter();
self.into_iter_from(iter)
}
}
}
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
/// not track an item count.
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
current_group: BitMask,
// Pointer to the buckets for the current group.
data: Bucket<T>,
// Pointer to the next group of control bytes,
// Must be aligned to the group size.
next_ctrl: *const u8,
// Pointer one past the last control byte of this range.
end: *const u8,
}
impl<T> RawIterRange<T> {
/// Returns a `RawIterRange` covering a subset of a table.
///
/// The control byte address must be aligned to the group size.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);
// Load the first group and advance ctrl to point to the next group
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
current_group,
data,
next_ctrl,
end,
}
}
/// Splits a `RawIterRange` into two halves.
///
/// Returns `None` if the remaining range is smaller than or equal to the
/// group width.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rayon")]
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
unsafe {
if self.end <= self.next_ctrl {
// Nothing to split if the group that we are current processing
// is the last one.
(self, None)
} else {
// len is the remaining number of elements after the group that
// we are currently processing. It must be a multiple of the
// group size (small tables are caught by the check above).
let len = offset_from(self.end, self.next_ctrl);
debug_assert_eq!(len % Group::WIDTH, 0);
// Split the remaining elements into two halves, but round the
// midpoint down in case there is an odd number of groups
// remaining. This ensures that:
// - The tail is at least 1 group long.
// - The split is roughly even considering we still have the
// current group to process.
let mid = (len / 2) & !(Group::WIDTH - 1);
let tail = Self::new(
self.next_ctrl.add(mid),
self.data.next_n(Group::WIDTH).next_n(mid),
len - mid,
);
debug_assert_eq!(
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
tail.data.ptr
);
debug_assert_eq!(self.end, tail.end);
self.end = self.next_ctrl.add(mid);
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
(self, Some(tail))
}
}
}
}
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
// in the actual iterator implementations determine the real Send/Sync bounds.
unsafe impl<T> Send for RawIterRange<T> {}
unsafe impl<T> Sync for RawIterRange<T> {}
impl<T> Clone for RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
next_ctrl: self.next_ctrl,
current_group: self.current_group,
end: self.end,
}
}
}
impl<T> Iterator for RawIterRange<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(index) = self.current_group.lowest_set_bit() {
self.current_group = self.current_group.remove_lowest_bit();
return Some(self.data.next_n(index));
}
if self.next_ctrl >= self.end {
return None;
}
// We might read past self.end up to the next group boundary,
// but this is fine because it only occurs on tables smaller
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
// We don't have an item count, so just guess based on the range size.
(
0,
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
)
}
}
impl<T> FusedIterator for RawIterRange<T> {}
/// Iterator which returns a raw pointer to every full bucket in the table.
///
/// For maximum flexibility this iterator is not bound by a lifetime, but you
/// must observe several rules when using it:
/// - You must not free the hash table while iterating (including via growing/shrinking).
/// - It is fine to erase a bucket that has been yielded by the iterator.
/// - Erasing a bucket that has not yet been yielded by the iterator may still
/// result in the iterator yielding that bucket (unless `reflect_remove` is called).
/// - It is unspecified whether an element inserted after the iterator was
/// created will be yielded by that iterator (unless `reflect_insert` is called).
/// - The order in which the iterator yields bucket is unspecified and may
/// change in the future.
pub struct RawIter<T> {
pub(crate) iter: RawIterRange<T>,
items: usize,
}
impl<T> RawIter<T> {
/// Refresh the iterator so that it reflects a removal from the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each removed bucket before `next` is called again.
///
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
pub fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
/// Refresh the iterator so that it reflects an insertion into the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each insert before `next` is called again.
///
/// This method does not guarantee that an insertion of a bucket witha greater
/// index than the last one yielded will be reflected in the iterator.
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
pub fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
unsafe {
if b.as_ptr() > self.iter.data.as_ptr() {
// The iterator has already passed the bucket's group.
// So the toggle isn't relevant to this iterator.
return;
}
if self.iter.next_ctrl < self.iter.end
&& b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
{
// The iterator has not yet reached the bucket's group.
// We don't need to reload anything, but we do need to adjust the item count.
if cfg!(debug_assertions) {
// Double-check that the user isn't lying to us by checking the bucket state.
// To do that, we need to find its control byte. We know that self.iter.data is
// at self.iter.next_ctrl - Group::WIDTH, so we work from there:
let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
// This method should be called _before_ a removal, or _after_ an insert,
// so in both cases the ctrl byte should indicate that the bucket is full.
assert!(is_full(*ctrl));
}
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
return;
}
// The iterator is at the bucket group that the toggled bucket is in.
// We need to do two things:
//
// - Determine if the iterator already yielded the toggled bucket.
// If it did, we're done.
// - Otherwise, update the iterator cached group so that it won't
// yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
// We'll also need ot update the item count accordingly.
if let Some(index) = self.iter.current_group.lowest_set_bit() {
let next_bucket = self.iter.data.next_n(index);
if b.as_ptr() > next_bucket.as_ptr() {
// The toggled bucket is "before" the bucket the iterator would yield next. We
// therefore don't need to do anything --- the iterator has already passed the
// bucket in question.
//
// The item count must already be correct, since a removal or insert "prior" to
// the iterator's position wouldn't affect the item count.
} else {
// The removed bucket is an upcoming bucket. We need to make sure it does _not_
// get yielded, and also that it's no longer included in the item count.
//
// NOTE: We can't just reload the group here, both since that might reflect
// inserts we've already passed, and because that might inadvertently unset the
// bits for _other_ removals. If we do that, we'd have to also decrement the
// item count for those other bits that we unset. But the presumably subsequent
// call to reflect for those buckets might _also_ decrement the item count.
// Instead, we _just_ flip the bit for the particular bucket the caller asked
// us to reflect.
let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let was_full = self.iter.current_group.flip(our_bit);
debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
if cfg!(debug_assertions) {
if b.as_ptr() == next_bucket.as_ptr() {
// The removed bucket should no longer be next
debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
} else {
// We should not have changed what bucket comes next.
debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
}
}
}
} else {
// We must have already iterated past the removed item.
}
}
}
}
impl<T> Clone for RawIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
items: self.items,
}
}
}
impl<T> Iterator for RawIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
if let Some(b) = self.iter.next() {
self.items -= 1;
Some(b)
} else {
// We don't check against items == 0 here to allow the
// compiler to optimize away the item count entirely if the
// iterator length is never queried.
debug_assert_eq!(self.items, 0);
None
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
/// Iterator which consumes a table and returns elements.
pub struct RawIntoIter<T, A: Allocator + Clone = Global> {
iter: RawIter<T>,
allocation: Option<(NonNull<u8>, Layout)>,
marker: PhantomData<T>,
alloc: A,
}
impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A> where T: Send {}
unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A> where T: Sync {}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() && self.iter.len() != 0 {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.allocation {
self.alloc.deallocate(ptr, layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() && self.iter.len() != 0 {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.allocation {
self.alloc
.deallocate(NonNull::new_unchecked(ptr.as_ptr()), layout);
}
}
}
}
impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe { Some(self.iter.next()?.read()) }
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {}
impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {}
/// Iterator which consumes elements without freeing the table storage.
pub struct RawDrain<'a, T, A: Allocator + Clone = Global> {
iter: RawIter<T>,
// The table is moved into the iterator for the duration of the drain. This
// ensures that an empty table is left if the drain iterator is leaked
// without dropping.
table: ManuallyDrop<RawTable<T, A>>,
orig_table: NonNull<RawTable<T, A>>,
// We don't use a &'a mut RawTable<T> because we want RawDrain to be
// covariant over T.
marker: PhantomData<&'a RawTable<T, A>>,
}
impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A> where T: Send {}
unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A> where T: Sync {}
impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements. Note that this may panic.
if mem::needs_drop::<T>() && self.iter.len() != 0 {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Reset the contents of the table now that all elements have been
// dropped.
self.table.clear_no_drop();
// Move the now empty table back to its original location.
self.orig_table
.as_ptr()
.copy_from_nonoverlapping(&*self.table, 1);
}
}
}
impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe {
let item = self.iter.next()?;
Some(item.read())
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {}
impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
/// Iterator over occupied buckets that could match a given hash.
///
/// In rare cases, the iterator may return a bucket with a different hash.
pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
table: &'a RawTable<T, A>,
// The top 7 bits of the hash.
h2_hash: u8,
// The sequence of groups to probe in the search.
probe_seq: ProbeSeq,
group: Group,
// The elements within the group with a matching h2-hash.
bitmask: BitMaskIter,
}
impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
unsafe {
let h2_hash = h2(hash);
let probe_seq = table.probe_seq(hash);
let group = Group::load(table.ctrl(probe_seq.pos));
let bitmask = group.match_byte(h2_hash).into_iter();
RawIterHash {
table,
h2_hash,
probe_seq,
group,
bitmask,
}
}
}
}
impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
type Item = Bucket<T>;
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
let bucket = self.table.bucket(index);
return Some(bucket);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
self.probe_seq.move_next(self.table.bucket_mask);
self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}
}
}
refactor: Extract a non generic part of RawTable
use crate::alloc::alloc::{handle_alloc_error, Layout};
use crate::scopeguard::guard;
use crate::TryReserveError;
use core::hint;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
use core::ptr::NonNull;
cfg_if! {
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets
// at once instead of 8. We don't bother with AVX since it would require
// runtime dispatch and wouldn't gain us much anyways: the probability of
// finding a match drops off drastically after the first few buckets.
//
// I attempted an implementation on ARM using NEON instructions, but it
// turns out that most NEON instructions have multi-cycle latency, which in
// the end outweighs any gains over the generic implementation.
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
not(miri)
))] {
mod sse2;
use sse2 as imp;
} else {
#[path = "generic.rs"]
mod generic;
use generic as imp;
}
}
mod alloc;
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
mod bitmask;
use self::bitmask::{BitMask, BitMaskIter};
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
// On stable we can use #[cold] to get a equivalent effect: this attributes
// suggests that the function is unlikely to be called
#[cfg(not(feature = "nightly"))]
#[inline]
#[cold]
fn cold() {}
#[cfg(not(feature = "nightly"))]
#[inline]
fn likely(b: bool) -> bool {
if !b {
cold()
}
b
}
#[cfg(not(feature = "nightly"))]
#[inline]
fn unlikely(b: bool) -> bool {
if b {
cold()
}
b
}
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
to.offset_from(from) as usize
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
(to as usize - from as usize) / mem::size_of::<T>()
}
/// Whether memory allocation errors should return an error or abort.
#[derive(Copy, Clone)]
enum Fallibility {
Fallible,
Infallible,
}
impl Fallibility {
/// Error to return on capacity overflow.
#[cfg_attr(feature = "inline-more", inline)]
fn capacity_overflow(self) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::CapacityOverflow,
Fallibility::Infallible => panic!("Hash table capacity overflow"),
}
}
/// Error to return on allocation error.
#[cfg_attr(feature = "inline-more", inline)]
fn alloc_err(self, layout: Layout) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::AllocError { layout },
Fallibility::Infallible => handle_alloc_error(layout),
}
}
}
/// Control byte value for an empty bucket.
const EMPTY: u8 = 0b1111_1111;
/// Control byte value for a deleted bucket.
const DELETED: u8 = 0b1000_0000;
/// Checks whether a control byte represents a full bucket (top bit is clear).
#[inline]
fn is_full(ctrl: u8) -> bool {
ctrl & 0x80 == 0
}
/// Checks whether a control byte represents a special value (top bit is set).
#[inline]
fn is_special(ctrl: u8) -> bool {
ctrl & 0x80 != 0
}
/// Checks whether a special control value is EMPTY (just check 1 bit).
#[inline]
fn special_is_empty(ctrl: u8) -> bool {
debug_assert!(is_special(ctrl));
ctrl & 0x01 != 0
}
/// Primary hash function, used to select the initial bucket to probe from.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h1(hash: u64) -> usize {
// On 32-bit platforms we simply ignore the higher hash bits.
hash as usize
}
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
let top7 = hash >> (hash_len * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
/// Probe sequence based on triangular numbers, which is guaranteed (since our
/// table size is a power of two) to visit every group of elements exactly once.
///
/// A triangular probe has us jump by 1 more group every time. So first we
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
///
/// Proof that the probe will visit every group in the table:
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
struct ProbeSeq {
pos: usize,
stride: usize,
}
impl ProbeSeq {
#[inline]
fn move_next(&mut self, bucket_mask: usize) {
// We should have found an empty bucket by now and ended the probe.
debug_assert!(
self.stride <= bucket_mask,
"Went past end of probe sequence"
);
self.stride += Group::WIDTH;
self.pos += self.stride;
self.pos &= bucket_mask;
}
}
/// Returns the number of buckets needed to hold the given number of items,
/// taking the maximum load factor into account.
///
/// Returns `None` if an overflow occurs.
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
#[cfg_attr(target_os = "emscripten", inline(never))]
#[cfg_attr(not(target_os = "emscripten"), inline)]
fn capacity_to_buckets(cap: usize) -> Option<usize> {
debug_assert_ne!(cap, 0);
// For small tables we require at least 1 empty bucket so that lookups are
// guaranteed to terminate if an element doesn't exist in the table.
if cap < 8 {
// We don't bother with a table size of 2 buckets since that can only
// hold a single element. Instead we skip directly to a 4 bucket table
// which can hold 3 elements.
return Some(if cap < 4 { 4 } else { 8 });
}
// Otherwise require 1/8 buckets to be empty (87.5% load)
//
// Be careful when modifying this, calculate_layout relies on the
// overflow check here.
let adjusted_cap = cap.checked_mul(8)? / 7;
// Any overflows will have been caught by the checked_mul. Also, any
// rounding errors from the division above will be cleaned up by
// next_power_of_two (which can't overflow because of the previous divison).
Some(adjusted_cap.next_power_of_two())
}
/// Returns the maximum effective capacity for the given bucket mask, taking
/// the maximum load factor into account.
#[inline]
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
if bucket_mask < 8 {
// For tables with 1/2/4/8 buckets, we always reserve one empty slot.
// Keep in mind that the bucket mask is one less than the bucket count.
bucket_mask
} else {
// For larger tables we reserve 12.5% of the slots as empty.
((bucket_mask + 1) / 8) * 7
}
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Array of buckets
let data = Layout::array::<T>(buckets).ok()?;
// Array of control bytes. This must be aligned to the group size.
//
// We add `Group::WIDTH` control bytes at the end of the array which
// replicate the bytes at the start of the array and thus avoids the need to
// perform bounds-checking while probing.
//
// There is no possible overflow here since buckets is a power of two and
// Group::WIDTH is a small number.
let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
data.extend(ctrl).ok()
}
/// Returns a Layout which describes the allocation required for a hash table,
/// and the offset of the control bytes in the allocation.
/// (the offset is also one past last element of buckets)
///
/// Returns `None` if an overflow occurs.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(not(feature = "nightly"))]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
// Manual layout calculation since Layout methods are not yet stable.
let ctrl_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
let ctrl_offset = mem::size_of::<T>()
.checked_mul(buckets)?
.checked_add(ctrl_align - 1)?
& !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
))
}
/// A reference to a hash table bucket containing a `T`.
///
/// This is usually just a pointer to the element itself. However if the element
/// is a ZST, then we instead track the index of the element in the table so
/// that `erase` works properly.
pub struct Bucket<T> {
// Actually it is pointer to next element than element itself
// this is needed to maintain pointer arithmetic invariants
// keeping direct pointer to element introduces difficulty.
// Using `NonNull` for variance and niche layout
ptr: NonNull<T>,
}
// This Send impl is needed for rayon support. This is safe since Bucket is
// never exposed in a public API.
unsafe impl<T> Send for Bucket<T> {}
impl<T> Clone for Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self { ptr: self.ptr }
}
}
impl<T> Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
// won't overflow because index must be less than length
(index + 1) as *mut T
} else {
base.as_ptr().sub(index)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
if mem::size_of::<T>() == 0 {
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ptr(&self) -> *mut T {
if mem::size_of::<T>() == 0 {
// Just return an arbitrary ZST pointer which is properly aligned
mem::align_of::<T>() as *mut T
} else {
self.ptr.as_ptr().sub(1)
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(self.ptr.as_ptr() as usize + offset) as *mut T
} else {
self.ptr.as_ptr().sub(offset)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn read(&self) -> T {
self.as_ptr().read()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
}
}
/// A raw hash table with an unsafe API.
pub struct RawTable<T, A: Allocator + Clone = Global> {
table: RawTableInner<A>,
// Tell dropck that we own instances of T.
marker: PhantomData<T>,
}
struct RawTableInner<A> {
// Mask to get an index from a hash value. The value is one less than the
// number of buckets in the table.
bucket_mask: usize,
// [Padding], T1, T2, ..., Tlast, C1, C2, ...
// ^ points here
ctrl: NonNull<u8>,
// Number of elements that can be inserted before we need to grow the table
growth_left: usize,
// Number of elements in the table, only really used by len()
items: usize,
alloc: A,
}
impl<T> RawTable<T, Global> {
/// Creates a new empty hash table without allocating any memory.
///
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[cfg_attr(feature = "inline-more", inline)]
pub const fn new() -> Self {
Self {
table: RawTableInner::new_in(Global),
marker: PhantomData,
}
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
#[cfg(feature = "raw")]
pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
Self::try_with_capacity_in(capacity, Global)
}
/// Allocates a new hash table with at least enough capacity for inserting
/// the given number of elements without reallocating.
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
}
impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Creates a new empty hash table without allocating any memory, using the
/// given allocator.
///
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[cfg_attr(feature = "inline-more", inline)]
pub fn new_in(alloc: A) -> Self {
Self {
table: RawTableInner::new_in(alloc),
marker: PhantomData,
}
}
/// Allocates a new hash table with the given number of buckets.
///
/// The control bytes are left uninitialized.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
alloc: A,
buckets: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match calculate_layout::<T>(buckets) {
Some(lco) => lco,
None => return Err(fallibility.capacity_overflow()),
};
Ok(Self {
table: RawTableInner::new_uninitialized(
alloc,
buckets,
fallibility,
layout,
ctrl_offset,
)?,
marker: PhantomData,
})
}
/// Attempts to allocate a new hash table with at least enough capacity
/// for inserting the given number of elements without reallocating.
fn fallible_with_capacity(
alloc: A,
capacity: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
if capacity == 0 {
Ok(Self::new_in(alloc))
} else {
unsafe {
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let buckets = match capacity_to_buckets(capacity) {
Some(buckets) => buckets,
None => return Err(fallibility.capacity_overflow()),
};
let result = Self::new_uninitialized(alloc, buckets, fallibility)?;
result
.table
.ctrl(0)
.write_bytes(EMPTY, result.table.num_ctrl_bytes());
Ok(result)
}
}
}
/// Attempts to allocate a new hash table using the given allocator, with at least enough
/// capacity for inserting the given number of elements without reallocating.
#[cfg(feature = "raw")]
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible)
}
/// Allocates a new hash table using the given allocator, with at least enough capacity for
/// inserting the given number of elements without reallocating.
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) {
Ok(capacity) => capacity,
Err(_) => unsafe { hint::unreachable_unchecked() },
}
}
/// Deallocates the table without dropping any entries.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match calculate_layout::<T>(self.buckets()) {
Some(lco) => lco,
None => hint::unreachable_unchecked(),
};
self.table.alloc.deallocate(
NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)),
layout,
);
}
/// Returns pointer to one past last element of data table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn data_end(&self) -> NonNull<T> {
NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
}
/// Returns pointer to start of data table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
pub unsafe fn data_start(&self) -> *mut T {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
/// Returns the index of a bucket from a `Bucket`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
bucket.to_base_index(self.data_end())
}
/// Returns a pointer to an element in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.table.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
#[deprecated(since = "0.8.1", note = "use erase or remove instead")]
pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
debug_assert!(is_full(*self.table.ctrl(index)));
let index_before = index.wrapping_sub(Group::WIDTH) & self.table.bucket_mask;
let empty_before = Group::load(self.table.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.table.ctrl(index)).match_empty();
// If we are inside a continuous block of Group::WIDTH full or deleted
// cells then a probe window may have seen a full block when trying to
// insert. We therefore need to keep that block non-empty so that
// lookups will continue searching to the next probe window.
//
// Note that in this context `leading_zeros` refers to the bytes at the
// end of a group, while `trailing_zeros` refers to the bytes at the
// begining of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.table.growth_left += 1;
EMPTY
};
self.table.set_ctrl(index, ctrl);
self.table.items -= 1;
}
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
#[allow(deprecated)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
item.drop();
}
/// Finds and erases an element from the table, dropping it in place.
/// Returns true if an element was found.
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
// Avoid `Option::map` because it bloats LLVM IR.
if let Some(bucket) = self.find(hash, eq) {
unsafe { self.erase(bucket) };
true
} else {
false
}
}
/// Removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
#[allow(deprecated)]
pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
self.erase_no_drop(&item);
item.read()
}
/// Finds and removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { self.remove(bucket) }),
None => None,
}
}
/// Searches for an empty or deleted bucket which is suitable for inserting
/// a new element.
///
/// There must be at least 1 empty bucket in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn find_insert_slot(&self, hash: u64) -> usize {
let mut probe_seq = self.table.probe_seq(hash);
loop {
unsafe {
let group = Group::load(self.table.ctrl(probe_seq.pos));
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
let result = (probe_seq.pos + bit) & self.table.bucket_mask;
// In tables smaller than the group width, trailing control
// bytes outside the range of the table are filled with
// EMPTY entries. These will unfortunately trigger a
// match, but once masked may point to a full bucket that
// is already occupied. We detect this situation here and
// perform a second scan starting at the begining of the
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
if unlikely(is_full(*self.table.ctrl(result))) {
debug_assert!(self.table.bucket_mask < Group::WIDTH);
debug_assert_ne!(probe_seq.pos, 0);
return Group::load_aligned(self.table.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
}
return result;
}
}
probe_seq.move_next(self.table.bucket_mask);
}
}
/// Marks all table buckets as empty without dropping their contents.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear_no_drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
self.table
.ctrl(0)
.write_bytes(EMPTY, self.table.num_ctrl_bytes());
}
}
self.table.items = 0;
self.table.growth_left = bucket_mask_to_capacity(self.table.bucket_mask);
}
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
// Ensure that the table is reset even if one of the drops panic
let self_ = guard(self, |self_| self_.clear_no_drop());
if mem::needs_drop::<T>() && self_.len() != 0 {
unsafe {
for item in self_.iter() {
item.drop();
}
}
}
}
/// Shrinks the table to fit `max(self.len(), min_size)` elements.
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
// Calculate the minimal number of elements that we need to reserve
// space for.
let min_size = usize::max(self.table.items, min_size);
if min_size == 0 {
*self = Self::new_in(self.table.alloc.clone());
return;
}
// Calculate the number of buckets that we need for this number of
// elements. If the calculation overflows then the requested bucket
// count must be larger than what we have right and nothing needs to be
// done.
let min_buckets = match capacity_to_buckets(min_size) {
Some(buckets) => buckets,
None => return,
};
// If we have more buckets than we need, shrink the table.
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.table.items == 0 {
*self = Self::with_capacity_in(min_size, self.table.alloc.clone())
} else {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
if self
.resize(min_size, hasher, Fallibility::Infallible)
.is_err()
{
unsafe { hint::unreachable_unchecked() }
}
}
}
}
/// Ensures that at least `additional` items can be inserted into the table
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
if additional > self.table.growth_left {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
if self
.reserve_rehash(additional, hasher, Fallibility::Infallible)
.is_err()
{
unsafe { hint::unreachable_unchecked() }
}
}
}
/// Tries to ensure that at least `additional` items can be inserted into
/// the table without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.table.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Fallible)
} else {
Ok(())
}
}
/// Out-of-line slow path for `reserve` and `try_reserve`.
#[cold]
#[inline(never)]
fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let new_items = match self.table.items.checked_add(additional) {
Some(new_items) => new_items,
None => return Err(fallibility.capacity_overflow()),
};
let full_capacity = bucket_mask_to_capacity(self.table.bucket_mask);
if new_items <= full_capacity / 2 {
// Rehash in-place without re-allocating if we have plenty of spare
// capacity that is locked up due to DELETED entries.
self.rehash_in_place(hasher);
Ok(())
} else {
// Otherwise, conservatively resize to at least the next size up
// to avoid churning deletes into frequent rehashes.
self.resize(
usize::max(new_items, full_capacity + 1),
hasher,
fallibility,
)
}
}
/// Rehashes the contents of the table in place (i.e. without changing the
/// allocation).
///
/// If `hasher` panics then some the table's contents may be lost.
fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
unsafe {
// Bulk convert all full control bytes to DELETED, and all DELETED
// control bytes to EMPTY. This effectively frees up all buckets
// containing a DELETED entry.
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.table.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
group.store_aligned(self.table.ctrl(i));
}
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
if self.buckets() < Group::WIDTH {
self.table
.ctrl(0)
.copy_to(self.table.ctrl(Group::WIDTH), self.buckets());
} else {
self.table
.ctrl(0)
.copy_to(self.table.ctrl(self.buckets()), Group::WIDTH);
}
// If the hash function panics then properly clean up any elements
// that we haven't rehashed yet. We unfortunately can't preserve the
// element since we lost their hash and have no way of recovering it
// without risking another panic.
let mut guard = guard(self, |self_| {
if mem::needs_drop::<T>() {
for i in 0..self_.buckets() {
if *self_.table.ctrl(i) == DELETED {
self_.table.set_ctrl(i, EMPTY);
self_.bucket(i).drop();
self_.table.items -= 1;
}
}
}
self_.table.growth_left =
bucket_mask_to_capacity(self_.table.bucket_mask) - self_.table.items;
});
// At this point, DELETED elements are elements that we haven't
// rehashed yet. Find them and re-insert them at their ideal
// position.
'outer: for i in 0..guard.buckets() {
if *guard.table.ctrl(i) != DELETED {
continue;
}
'inner: loop {
// Hash the current item
let item = guard.bucket(i);
let hash = hasher(item.as_ref());
// Search for a suitable place to put it
let new_i = guard.find_insert_slot(hash);
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
// size. If both the new and old position fall within the
// same unaligned group, then there is no benefit in moving
// it and we can just continue to the next item.
let probe_index = |pos: usize| {
(pos.wrapping_sub(guard.table.probe_seq(hash).pos)
& guard.table.bucket_mask)
/ Group::WIDTH
};
if likely(probe_index(i) == probe_index(new_i)) {
guard.table.set_ctrl(i, h2(hash));
continue 'outer;
}
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = *guard.table.ctrl(new_i);
guard.table.set_ctrl(new_i, h2(hash));
if prev_ctrl == EMPTY {
// If the target slot is empty, simply move the current
// element into the new slot and clear the old control
// byte.
guard.table.set_ctrl(i, EMPTY);
guard.bucket(new_i).copy_from_nonoverlapping(&item);
continue 'outer;
} else {
// If the target slot is occupied, swap the two elements
// and then continue processing the element that we just
// swapped into the old slot.
debug_assert_eq!(prev_ctrl, DELETED);
mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
continue 'inner;
}
}
}
guard.table.growth_left =
bucket_mask_to_capacity(guard.table.bucket_mask) - guard.table.items;
mem::forget(guard);
}
}
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
debug_assert!(self.table.items <= capacity);
// Allocate and initialize the new table.
let mut new_table =
Self::fallible_with_capacity(self.table.alloc.clone(), capacity, fallibility)?;
new_table.table.growth_left -= self.table.items;
new_table.table.items = self.table.items;
// The hash function may panic, in which case we simply free the new
// table without dropping any elements that may have been copied into
// it.
//
// This guard is also used to free the old table on success, see
// the comment at the bottom of this function.
let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
if !new_table.is_empty_singleton() {
new_table.free_buckets();
}
});
// Copy all elements to the new table.
for item in self.iter() {
// This may panic.
let hash = hasher(item.as_ref());
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = new_table.find_insert_slot(hash);
new_table.table.set_ctrl(index, h2(hash));
new_table.bucket(index).copy_from_nonoverlapping(&item);
}
// We successfully copied all elements without panicking. Now replace
// self with the new table. The old table will have its memory freed but
// the items will not be dropped (since they have been moved into the
// new table).
mem::swap(self, &mut new_table);
Ok(())
}
}
/// Inserts a new element into the table, and returns its raw bucket.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
let mut index = self.find_insert_slot(hash);
// We can avoid growing the table once we have reached our load
// factor if we are replacing a tombstone. This works since the
// number of EMPTY slots does not change in this case.
let old_ctrl = *self.table.ctrl(index);
if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
index = self.find_insert_slot(hash);
}
let bucket = self.bucket(index);
self.table.growth_left -= special_is_empty(old_ctrl) as usize;
self.table.set_ctrl(index, h2(hash));
bucket.write(value);
self.table.items += 1;
bucket
}
}
/// Attempts to insert a new element without growing the table and return its raw bucket.
///
/// Returns an `Err` containing the given element if inserting it would require growing the
/// table.
///
/// This does not check if the given element already exists in the table.
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
unsafe {
let index = self.find_insert_slot(hash);
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
Err(value)
} else {
let bucket = self.bucket(index);
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl(index, h2(hash));
bucket.write(value);
self.items += 1;
Ok(bucket)
}
}
}
/// Inserts a new element into the table, and returns a mutable reference to it.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
unsafe { self.insert(hash, value, hasher).as_mut() }
}
/// Inserts a new element into the table, without growing the table.
///
/// There must be enough space in the table to insert the new element.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
unsafe {
let index = self.find_insert_slot(hash);
let bucket = self.bucket(index);
// If we are replacing a DELETED entry then we don't need to update
// the load counter.
let old_ctrl = *self.table.ctrl(index);
self.table.growth_left -= special_is_empty(old_ctrl) as usize;
self.table.set_ctrl(index, h2(hash));
bucket.write(value);
self.table.items += 1;
bucket
}
}
/// Temporary removes a bucket, applying the given function to the removed
/// element and optionally put back the returned value in the same bucket.
///
/// Returns `true` if the bucket still contains an element
///
/// This does not check if the given bucket is actually occupied.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
where
F: FnOnce(T) -> Option<T>,
{
let index = self.bucket_index(&bucket);
let old_ctrl = *self.table.ctrl(index);
debug_assert!(is_full(old_ctrl));
let old_growth_left = self.table.growth_left;
let item = self.remove(bucket);
if let Some(new_item) = f(item) {
self.table.growth_left = old_growth_left;
self.table.set_ctrl(index, old_ctrl);
self.table.items += 1;
self.bucket(index).write(new_item);
true
} else {
false
}
}
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
unsafe {
for bucket in self.iter_hash(hash) {
let elm = bucket.as_ref();
if likely(eq(elm)) {
return Some(bucket);
}
}
None
}
}
/// Gets a reference to an element in the table.
#[inline]
pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { bucket.as_ref() }),
None => None,
}
}
/// Gets a mutable reference to an element in the table.
#[inline]
pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { bucket.as_mut() }),
None => None,
}
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the table might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
#[cfg_attr(feature = "inline-more", inline)]
pub fn capacity(&self) -> usize {
self.table.items + self.table.growth_left
}
/// Returns the number of elements in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn len(&self) -> usize {
self.table.items
}
/// Returns the number of buckets in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub fn buckets(&self) -> usize {
self.table.bucket_mask + 1
}
/// Returns whether this table points to the empty singleton with a capacity
/// of 0.
#[cfg_attr(feature = "inline-more", inline)]
fn is_empty_singleton(&self) -> bool {
self.table.bucket_mask == 0
}
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter(&self) -> RawIter<T> {
let data = Bucket::from_base_index(self.data_end(), 0);
RawIter {
iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()),
items: self.table.items,
}
}
/// Returns an iterator over occupied buckets that could match a given hash.
///
/// In rare cases, the iterator may return a bucket with a different hash.
///
/// It is up to the caller to ensure that the `RawTable` outlives the
/// `RawIterHash`. Because we cannot make the `next` method unsafe on the
/// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
RawIterHash::new(self, hash)
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn drain(&mut self) -> RawDrain<'_, T, A> {
unsafe {
let iter = self.iter();
self.drain_iter_from(iter)
}
}
/// Returns an iterator which removes all elements from the table without
/// freeing the memory.
///
/// Iteration starts at the provided iterator's current location.
///
/// It is up to the caller to ensure that the iterator is valid for this
/// `RawTable` and covers all items that remain in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))),
orig_table: NonNull::from(self),
marker: PhantomData,
}
}
/// Returns an iterator which consumes all elements from the table.
///
/// Iteration starts at the provided iterator's current location.
///
/// It is up to the caller to ensure that the iterator is valid for this
/// `RawTable` and covers all items that remain in the table.
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
debug_assert_eq!(iter.len(), self.len());
let alloc = self.table.alloc.clone();
let allocation = self.into_allocation();
RawIntoIter {
iter,
allocation,
marker: PhantomData,
alloc,
}
}
/// Converts the table into a raw allocation. The contents of the table
/// should be dropped using a `RawIter` before freeing the allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> {
let alloc = if self.table.is_empty_singleton() {
None
} else {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) {
Some(lco) => lco,
None => unsafe { hint::unreachable_unchecked() },
};
Some((
unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
))
};
mem::forget(self);
alloc
}
}
unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A> where T: Send {}
unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A> where T: Sync {}
impl<A> RawTableInner<A> {
#[cfg_attr(feature = "inline-more", inline)]
const fn new_in(alloc: A) -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
alloc,
}
}
}
impl<A: Allocator + Clone> RawTableInner<A> {
unsafe fn new_uninitialized(
alloc: A,
buckets: usize,
fallibility: Fallibility,
layout: Layout,
ctrl_offset: usize,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
};
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
alloc,
})
}
/// Returns an iterator-like object for a probe sequence on the table.
///
/// This iterator never terminates, but is guaranteed to visit each bucket
/// group exactly once. The loop using `probe_seq` must terminate upon
/// reaching a group containing an empty bucket.
#[cfg_attr(feature = "inline-more", inline)]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
}
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
// the array without using a branch:
// - If index >= Group::WIDTH then index == index2.
// - Otherwise index2 == self.bucket_mask + 1 + index.
//
// The very last replicated control byte is never actually read because
// we mask the initial index for unaligned loads, but we write it
// anyways because it makes the set_ctrl implementation simpler.
//
// If there are fewer buckets than Group::WIDTH then this code will
// replicate the buckets at the end of the trailing group. For example
// with 2 buckets and a group size of 4, the control bytes will look
// like this:
//
// Real | Replicated
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Returns a pointer to a control byte.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
self.ctrl.as_ptr().add(index)
}
#[cfg_attr(feature = "inline-more", inline)]
fn capacity(&self) -> usize {
self.items + self.growth_left
}
#[cfg_attr(feature = "inline-more", inline)]
fn len(&self) -> usize {
self.items
}
#[cfg_attr(feature = "inline-more", inline)]
fn buckets(&self) -> usize {
self.bucket_mask + 1
}
#[cfg_attr(feature = "inline-more", inline)]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
}
#[cfg_attr(feature = "inline-more", inline)]
fn is_empty_singleton(&self) -> bool {
self.bucket_mask == 0
}
}
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
fn clone(&self) -> Self {
if self.table.is_empty_singleton() {
Self::new_in(self.table.alloc.clone())
} else {
unsafe {
let mut new_table = ManuallyDrop::new(
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
match Self::new_uninitialized(
self.table.alloc.clone(),
self.table.buckets(),
Fallibility::Infallible,
) {
Ok(table) => table,
Err(_) => hint::unreachable_unchecked(),
},
);
new_table.clone_from_spec(self, |new_table| {
// We need to free the memory allocated for the new table.
new_table.free_buckets();
});
// Return the newly created table.
ManuallyDrop::into_inner(new_table)
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.table.is_empty_singleton() {
*self = Self::new_in(self.table.alloc.clone());
} else {
unsafe {
// First, drop all our elements without clearing the control bytes.
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
// If necessary, resize our table to match the source.
if self.buckets() != source.buckets() {
// Skip our drop by using ptr::write.
if !self.is_empty_singleton() {
self.free_buckets();
}
(self as *mut Self).write(
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
match Self::new_uninitialized(
self.table.alloc.clone(),
source.buckets(),
Fallibility::Infallible,
) {
Ok(table) => table,
Err(_) => hint::unreachable_unchecked(),
},
);
}
self.clone_from_spec(source, |self_| {
// We need to leave the table in an empty state.
self_.clear_no_drop()
});
}
}
}
}
/// Specialization of `clone_from` for `Copy` types
trait RawTableClone {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
}
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
default_fn! {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
self.clone_from_impl(source, on_panic);
}
}
}
#[cfg(feature = "nightly")]
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
source
.table
.ctrl(0)
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
source
.data_start()
.copy_to_nonoverlapping(self.data_start(), self.buckets());
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
}
}
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
/// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
// Copy the control bytes unchanged. We do this in a single pass
source
.table
.ctrl(0)
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
// The cloning of elements may panic, in which case we need
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
if mem::needs_drop::<T>() && self_.len() != 0 {
for i in 0..=*index {
if is_full(*self_.table.ctrl(i)) {
self_.bucket(i).drop();
}
}
}
// Depending on whether we were called from clone or clone_from, we
// either need to free the memory for the destination table or just
// clear the control bytes.
on_panic(self_);
});
for from in source.iter() {
let index = source.bucket_index(&from);
let to = guard.1.bucket(index);
to.write(from.as_ref().clone());
// Update the index in case we need to unwind.
guard.0 = index;
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard);
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
}
/// Variant of `clone_from` to use when a hasher is available.
#[cfg(feature = "raw")]
pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
// If we have enough capacity in the table, just clear it and insert
// elements one by one. We don't do this if we have the same number of
// buckets as the source since we can just copy the contents directly
// in that case.
if self.buckets() != source.buckets()
&& bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
{
self.clear();
let guard_self = guard(&mut *self, |self_| {
// Clear the partially copied table if a panic occurs, otherwise
// items and growth_left will be out of sync with the contents
// of the table.
self_.clear();
});
unsafe {
for item in source.iter() {
// This may panic.
let item = item.as_ref().clone();
let hash = hasher(&item);
// We can use a simpler version of insert() here since:
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = guard_self.find_insert_slot(hash);
guard_self.table.set_ctrl(index, h2(hash));
guard_self.bucket(index).write(item);
}
}
// Successfully cloned all items, no need to clean up.
mem::forget(guard_self);
self.table.items = source.table.items;
self.table.growth_left -= source.table.items;
} else {
self.clone_from(source);
}
}
}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T, A: Allocator + Clone> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
self.free_buckets();
}
}
}
}
impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
type Item = T;
type IntoIter = RawIntoIter<T, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_iter(self) -> RawIntoIter<T, A> {
unsafe {
let iter = self.iter();
self.into_iter_from(iter)
}
}
}
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
/// not track an item count.
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
current_group: BitMask,
// Pointer to the buckets for the current group.
data: Bucket<T>,
// Pointer to the next group of control bytes,
// Must be aligned to the group size.
next_ctrl: *const u8,
// Pointer one past the last control byte of this range.
end: *const u8,
}
impl<T> RawIterRange<T> {
/// Returns a `RawIterRange` covering a subset of a table.
///
/// The control byte address must be aligned to the group size.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);
// Load the first group and advance ctrl to point to the next group
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
current_group,
data,
next_ctrl,
end,
}
}
/// Splits a `RawIterRange` into two halves.
///
/// Returns `None` if the remaining range is smaller than or equal to the
/// group width.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rayon")]
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
unsafe {
if self.end <= self.next_ctrl {
// Nothing to split if the group that we are current processing
// is the last one.
(self, None)
} else {
// len is the remaining number of elements after the group that
// we are currently processing. It must be a multiple of the
// group size (small tables are caught by the check above).
let len = offset_from(self.end, self.next_ctrl);
debug_assert_eq!(len % Group::WIDTH, 0);
// Split the remaining elements into two halves, but round the
// midpoint down in case there is an odd number of groups
// remaining. This ensures that:
// - The tail is at least 1 group long.
// - The split is roughly even considering we still have the
// current group to process.
let mid = (len / 2) & !(Group::WIDTH - 1);
let tail = Self::new(
self.next_ctrl.add(mid),
self.data.next_n(Group::WIDTH).next_n(mid),
len - mid,
);
debug_assert_eq!(
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
tail.data.ptr
);
debug_assert_eq!(self.end, tail.end);
self.end = self.next_ctrl.add(mid);
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
(self, Some(tail))
}
}
}
}
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
// in the actual iterator implementations determine the real Send/Sync bounds.
unsafe impl<T> Send for RawIterRange<T> {}
unsafe impl<T> Sync for RawIterRange<T> {}
impl<T> Clone for RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
next_ctrl: self.next_ctrl,
current_group: self.current_group,
end: self.end,
}
}
}
impl<T> Iterator for RawIterRange<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(index) = self.current_group.lowest_set_bit() {
self.current_group = self.current_group.remove_lowest_bit();
return Some(self.data.next_n(index));
}
if self.next_ctrl >= self.end {
return None;
}
// We might read past self.end up to the next group boundary,
// but this is fine because it only occurs on tables smaller
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
// We don't have an item count, so just guess based on the range size.
(
0,
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
)
}
}
impl<T> FusedIterator for RawIterRange<T> {}
/// Iterator which returns a raw pointer to every full bucket in the table.
///
/// For maximum flexibility this iterator is not bound by a lifetime, but you
/// must observe several rules when using it:
/// - You must not free the hash table while iterating (including via growing/shrinking).
/// - It is fine to erase a bucket that has been yielded by the iterator.
/// - Erasing a bucket that has not yet been yielded by the iterator may still
/// result in the iterator yielding that bucket (unless `reflect_remove` is called).
/// - It is unspecified whether an element inserted after the iterator was
/// created will be yielded by that iterator (unless `reflect_insert` is called).
/// - The order in which the iterator yields bucket is unspecified and may
/// change in the future.
pub struct RawIter<T> {
pub(crate) iter: RawIterRange<T>,
items: usize,
}
impl<T> RawIter<T> {
/// Refresh the iterator so that it reflects a removal from the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each removed bucket before `next` is called again.
///
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
pub fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
/// Refresh the iterator so that it reflects an insertion into the given bucket.
///
/// For the iterator to remain valid, this method must be called once
/// for each insert before `next` is called again.
///
/// This method does not guarantee that an insertion of a bucket witha greater
/// index than the last one yielded will be reflected in the iterator.
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
pub fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
unsafe {
if b.as_ptr() > self.iter.data.as_ptr() {
// The iterator has already passed the bucket's group.
// So the toggle isn't relevant to this iterator.
return;
}
if self.iter.next_ctrl < self.iter.end
&& b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
{
// The iterator has not yet reached the bucket's group.
// We don't need to reload anything, but we do need to adjust the item count.
if cfg!(debug_assertions) {
// Double-check that the user isn't lying to us by checking the bucket state.
// To do that, we need to find its control byte. We know that self.iter.data is
// at self.iter.next_ctrl - Group::WIDTH, so we work from there:
let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
// This method should be called _before_ a removal, or _after_ an insert,
// so in both cases the ctrl byte should indicate that the bucket is full.
assert!(is_full(*ctrl));
}
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
return;
}
// The iterator is at the bucket group that the toggled bucket is in.
// We need to do two things:
//
// - Determine if the iterator already yielded the toggled bucket.
// If it did, we're done.
// - Otherwise, update the iterator cached group so that it won't
// yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
// We'll also need ot update the item count accordingly.
if let Some(index) = self.iter.current_group.lowest_set_bit() {
let next_bucket = self.iter.data.next_n(index);
if b.as_ptr() > next_bucket.as_ptr() {
// The toggled bucket is "before" the bucket the iterator would yield next. We
// therefore don't need to do anything --- the iterator has already passed the
// bucket in question.
//
// The item count must already be correct, since a removal or insert "prior" to
// the iterator's position wouldn't affect the item count.
} else {
// The removed bucket is an upcoming bucket. We need to make sure it does _not_
// get yielded, and also that it's no longer included in the item count.
//
// NOTE: We can't just reload the group here, both since that might reflect
// inserts we've already passed, and because that might inadvertently unset the
// bits for _other_ removals. If we do that, we'd have to also decrement the
// item count for those other bits that we unset. But the presumably subsequent
// call to reflect for those buckets might _also_ decrement the item count.
// Instead, we _just_ flip the bit for the particular bucket the caller asked
// us to reflect.
let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let was_full = self.iter.current_group.flip(our_bit);
debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
if cfg!(debug_assertions) {
if b.as_ptr() == next_bucket.as_ptr() {
// The removed bucket should no longer be next
debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
} else {
// We should not have changed what bucket comes next.
debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
}
}
}
} else {
// We must have already iterated past the removed item.
}
}
}
}
impl<T> Clone for RawIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
items: self.items,
}
}
}
impl<T> Iterator for RawIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
if let Some(b) = self.iter.next() {
self.items -= 1;
Some(b)
} else {
// We don't check against items == 0 here to allow the
// compiler to optimize away the item count entirely if the
// iterator length is never queried.
debug_assert_eq!(self.items, 0);
None
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
/// Iterator which consumes a table and returns elements.
pub struct RawIntoIter<T, A: Allocator + Clone = Global> {
iter: RawIter<T>,
allocation: Option<(NonNull<u8>, Layout)>,
marker: PhantomData<T>,
alloc: A,
}
impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A> where T: Send {}
unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A> where T: Sync {}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() && self.iter.len() != 0 {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.allocation {
self.alloc.deallocate(ptr, layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements
if mem::needs_drop::<T>() && self.iter.len() != 0 {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Free the table
if let Some((ptr, layout)) = self.allocation {
self.alloc
.deallocate(NonNull::new_unchecked(ptr.as_ptr()), layout);
}
}
}
}
impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe { Some(self.iter.next()?.read()) }
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {}
impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {}
/// Iterator which consumes elements without freeing the table storage.
pub struct RawDrain<'a, T, A: Allocator + Clone = Global> {
iter: RawIter<T>,
// The table is moved into the iterator for the duration of the drain. This
// ensures that an empty table is left if the drain iterator is leaked
// without dropping.
table: ManuallyDrop<RawTable<T, A>>,
orig_table: NonNull<RawTable<T, A>>,
// We don't use a &'a mut RawTable<T> because we want RawDrain to be
// covariant over T.
marker: PhantomData<&'a RawTable<T, A>>,
}
impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A> where T: Send {}
unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A> where T: Sync {}
impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
// Drop all remaining elements. Note that this may panic.
if mem::needs_drop::<T>() && self.iter.len() != 0 {
while let Some(item) = self.iter.next() {
item.drop();
}
}
// Reset the contents of the table now that all elements have been
// dropped.
self.table.clear_no_drop();
// Move the now empty table back to its original location.
self.orig_table
.as_ptr()
.copy_from_nonoverlapping(&*self.table, 1);
}
}
}
impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe {
let item = self.iter.next()?;
Some(item.read())
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {}
impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
/// Iterator over occupied buckets that could match a given hash.
///
/// In rare cases, the iterator may return a bucket with a different hash.
pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
table: &'a RawTable<T, A>,
// The top 7 bits of the hash.
h2_hash: u8,
// The sequence of groups to probe in the search.
probe_seq: ProbeSeq,
group: Group,
// The elements within the group with a matching h2-hash.
bitmask: BitMaskIter,
}
impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
unsafe {
let h2_hash = h2(hash);
let probe_seq = table.table.probe_seq(hash);
let group = Group::load(table.table.ctrl(probe_seq.pos));
let bitmask = group.match_byte(h2_hash).into_iter();
RawIterHash {
table,
h2_hash,
probe_seq,
group,
bitmask,
}
}
}
}
impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
type Item = Bucket<T>;
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
let index = (self.probe_seq.pos + bit) & self.table.table.bucket_mask;
let bucket = self.table.bucket(index);
return Some(bucket);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
self.probe_seq.move_next(self.table.table.bucket_mask);
self.group = Group::load(self.table.table.ctrl(self.probe_seq.pos));
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}
}
}
|
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std::collections::HashMap;
use rustc_serialize::{json, Decodable};
/// This enum represents the various types of HTTP requests.
#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
pub enum Method {
Get,
Put,
Post,
Delete,
NoImpl,
}
/// A trait that allows for extracting variables from URIs.
pub trait FromUri {
/// A function to parse a string into the correct type.
fn from_uri(data: &str) -> Self;
}
impl FromUri for String {
fn from_uri(data: &str) -> String {
String::from(data)
}
}
impl FromUri for i32 {
fn from_uri(data: &str) -> i32 {
match data.parse::<i32>() {
Ok(v) => v,
Err(e) => panic!("matched integer can't be parsed: {:?}", e),
}
}
}
impl FromUri for u32 {
fn from_uri(data: &str) -> u32 {
match data.parse::<u32>() {
Ok(v) => v,
Err(e) => panic!("matched integer can't be parsed: {:?}", e),
}
}
}
impl FromUri for f32 {
fn from_uri(data: &str) -> f32 {
match data.parse::<f32>() {
Ok(v) => v,
Err(e) => panic!("matched float can't be parsed: {:?}", e),
}
}
}
/// This struct represents a request from an HTTP client.
#[derive(Debug)]
pub struct Request {
pub method: Method,
pub path: String,
pub payload: Vec<u8>,
pub params: HashMap<String, String>,
headers: HashMap<String, String>,
}
impl Request {
/// Create a new, empty Request.
pub fn new() -> Request {
Request {
method: Method::NoImpl,
path: String::new(),
headers: HashMap::new(),
params: HashMap::new(),
payload: Vec::with_capacity(2048),
}
}
/// Create a Request from an HTTP request string.
pub fn from_str(rqstr: &str) -> Request {
let mut req = Request::new();
req.parse(rqstr);
req
}
/// Get a variable from the URI.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello/<str:name>"
/// fn handler(req: &Request) -> Response {
/// let name: String = req.get("name");
/// utils::make_response(format!("<b>Hello, {}!</b>", name), "text/html", 200)
/// }
/// ```
pub fn get<T: FromUri>(&self, name: &str) -> T {
if !self.params.contains_key(name) {
panic!("invalid route parameter {:?}", name);
}
FromUri::from_uri(&self.params[name])
}
/// Get a raw JSON payload from the request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data = req.get_json();
///
/// match data {
/// Some(val) => utils::make_response(format!("We got: {}", val), "text/plain", 200),
/// None => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json(&self) -> Option<json::Json> {
match String::from_utf8(self.payload.clone()) {
Err(_) => None,
Ok(payload) => {
match json::Json::from_str(&payload) {
Ok(data) => Some(data),
Err(_) => None,
}
}
}
}
/// Get a composed JSON payload from the request.
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::{Request, Response};
///
/// #[derive(RustcDecodable)]
/// struct Foo {
/// item: i32,
/// }
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data: Foo = req.get_json_obj();
///
/// match data {
/// Ok(foo) => utils::make_response(format!("We got: {}!", data.item), "text/plain", 200),
/// Err(_) => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json_obj<T: Decodable>(&self) -> Result<T, json::DecoderError> {
let data = String::from_utf8(self.payload.clone()).unwrap();
json::decode(&data)
}
fn parse(&mut self, rqstr: &str) {
let mut buf: Vec<&str> = rqstr.splitn(2, "\r\n").collect();
let ask: Vec<&str> = buf[0].splitn(3, ' ').collect();
self.method = match ask[0] {
"GET" => Method::Get,
"PUT" | "PATCH" => Method::Put,
"POST" => Method::Post,
"DELETE" => Method::Delete,
_ => Method::NoImpl,
};
self.path = String::from(ask[1]);
loop {
buf = buf[1].splitn(2, "\r\n").collect();
if buf[0] == "" {
if buf.len() == 1 || buf[1] == "" {
// no payload
break;
}
self.payload.extend(buf[1].as_bytes());
break;
}
let hdr: Vec<&str> = buf[0].splitn(2, ": ").collect();
if hdr.len() == 2 {
self.headers.insert(String::from(hdr[0]), String::from(hdr[1]));
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(RustcDecodable)]
struct Foo {
item: i32,
}
#[test]
fn test_fromuri_trait_i32() {
let pos = String::from("1234");
assert_eq!(1234, FromUri::from_uri(&pos));
let neg = String::from("-4321");
assert_eq!(-4321, FromUri::from_uri(&neg));
}
#[test]
fn test_fromuri_trait_u32() {
let orig = String::from("1234");
assert_eq!(1234, FromUri::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_string() {
let orig = String::from("foobar");
assert_eq!("foobar", <String as FromUri>::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_float() {
let pos = String::from("123.45");
assert_eq!(123.45f32, FromUri::from_uri(&pos));
let neg = String::from("-54.321");
assert_eq!(-54.321f32, FromUri::from_uri(&neg));
}
#[test]
fn test_get_fromuri_i32() {
let mut req = Request::new();
req.params.insert(String::from("test"), String::from("1234"));
assert_eq!(1234, req.get("test"));
}
#[test]
fn test_get_json() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data = req.get_json().unwrap();
assert_eq!(true, data.is_object());
let obj = data.as_object().unwrap();
let val = obj.get("item").unwrap();
assert_eq!(true, val.is_u64());
assert_eq!(123u64, val.as_u64().unwrap());
}
#[test]
fn test_get_json_obj() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data: Foo = req.get_json_obj().unwrap();
assert_eq!(123, data.item);
}
}
add a function I forgot I'd missed to get HTTP headers from the request
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std::collections::HashMap;
use rustc_serialize::{json, Decodable};
/// This enum represents the various types of HTTP requests.
#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
pub enum Method {
Get,
Put,
Post,
Delete,
NoImpl,
}
/// A trait that allows for extracting variables from URIs.
pub trait FromUri {
/// A function to parse a string into the correct type.
fn from_uri(data: &str) -> Self;
}
impl FromUri for String {
fn from_uri(data: &str) -> String {
String::from(data)
}
}
impl FromUri for i32 {
fn from_uri(data: &str) -> i32 {
match data.parse::<i32>() {
Ok(v) => v,
Err(e) => panic!("matched integer can't be parsed: {:?}", e),
}
}
}
impl FromUri for u32 {
fn from_uri(data: &str) -> u32 {
match data.parse::<u32>() {
Ok(v) => v,
Err(e) => panic!("matched integer can't be parsed: {:?}", e),
}
}
}
impl FromUri for f32 {
fn from_uri(data: &str) -> f32 {
match data.parse::<f32>() {
Ok(v) => v,
Err(e) => panic!("matched float can't be parsed: {:?}", e),
}
}
}
/// This struct represents a request from an HTTP client.
#[derive(Debug)]
pub struct Request {
pub method: Method,
pub path: String,
pub payload: Vec<u8>,
pub params: HashMap<String, String>,
headers: HashMap<String, String>,
}
impl Request {
/// Create a new, empty Request.
pub fn new() -> Request {
Request {
method: Method::NoImpl,
path: String::new(),
headers: HashMap::new(),
params: HashMap::new(),
payload: Vec::with_capacity(2048),
}
}
/// Create a Request from an HTTP request string.
pub fn from_str(rqstr: &str) -> Request {
let mut req = Request::new();
req.parse(rqstr);
req
}
/// Get an HTTP header contained in the Request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello"
/// fn handler(req: &Request) -> Response {
/// let browser = req.get_header("User-Agent");
///
/// match browser {
/// Some(ua) => utils::make_response(format!("You're using {}!", ua), "text/plain", 200),
/// None => utils::make_response("Bad browser, no user agent!", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_header(&self, name: &str) -> Option<String> {
let key = String::from(name);
match self.headers.get(&key) {
Some(val) => Some(val.clone()),
None => None,
}
}
/// Get a variable from the URI.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello/<str:name>"
/// fn handler(req: &Request) -> Response {
/// let name: String = req.get("name");
/// utils::make_response(format!("<b>Hello, {}!</b>", name), "text/html", 200)
/// }
/// ```
pub fn get<T: FromUri>(&self, name: &str) -> T {
if !self.params.contains_key(name) {
panic!("invalid route parameter {:?}", name);
}
FromUri::from_uri(&self.params[name])
}
/// Get a raw JSON payload from the request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data = req.get_json();
///
/// match data {
/// Some(val) => utils::make_response(format!("We got: {}", val), "text/plain", 200),
/// None => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json(&self) -> Option<json::Json> {
match String::from_utf8(self.payload.clone()) {
Err(_) => None,
Ok(payload) => {
match json::Json::from_str(&payload) {
Ok(data) => Some(data),
Err(_) => None,
}
}
}
}
/// Get a composed JSON payload from the request.
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::{Request, Response};
///
/// #[derive(RustcDecodable)]
/// struct Foo {
/// item: i32,
/// }
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data: Foo = req.get_json_obj();
///
/// match data {
/// Ok(foo) => utils::make_response(format!("We got: {}!", data.item), "text/plain", 200),
/// Err(_) => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json_obj<T: Decodable>(&self) -> Result<T, json::DecoderError> {
let data = String::from_utf8(self.payload.clone()).unwrap();
json::decode(&data)
}
fn parse(&mut self, rqstr: &str) {
let mut buf: Vec<&str> = rqstr.splitn(2, "\r\n").collect();
let ask: Vec<&str> = buf[0].splitn(3, ' ').collect();
self.method = match ask[0] {
"GET" => Method::Get,
"PUT" | "PATCH" => Method::Put,
"POST" => Method::Post,
"DELETE" => Method::Delete,
_ => Method::NoImpl,
};
self.path = String::from(ask[1]);
loop {
buf = buf[1].splitn(2, "\r\n").collect();
if buf[0] == "" {
if buf.len() == 1 || buf[1] == "" {
// no payload
break;
}
self.payload.extend(buf[1].as_bytes());
break;
}
let hdr: Vec<&str> = buf[0].splitn(2, ": ").collect();
if hdr.len() == 2 {
self.headers.insert(String::from(hdr[0]), String::from(hdr[1]));
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(RustcDecodable)]
struct Foo {
item: i32,
}
#[test]
fn test_fromuri_trait_i32() {
let pos = String::from("1234");
assert_eq!(1234, FromUri::from_uri(&pos));
let neg = String::from("-4321");
assert_eq!(-4321, FromUri::from_uri(&neg));
}
#[test]
fn test_fromuri_trait_u32() {
let orig = String::from("1234");
assert_eq!(1234, FromUri::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_string() {
let orig = String::from("foobar");
assert_eq!("foobar", <String as FromUri>::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_float() {
let pos = String::from("123.45");
assert_eq!(123.45f32, FromUri::from_uri(&pos));
let neg = String::from("-54.321");
assert_eq!(-54.321f32, FromUri::from_uri(&neg));
}
#[test]
fn test_get_fromuri_i32() {
let mut req = Request::new();
req.params.insert(String::from("test"), String::from("1234"));
assert_eq!(1234, req.get("test"));
}
#[test]
fn test_get_json() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data = req.get_json().unwrap();
assert_eq!(true, data.is_object());
let obj = data.as_object().unwrap();
let val = obj.get("item").unwrap();
assert_eq!(true, val.is_u64());
assert_eq!(123u64, val.as_u64().unwrap());
}
#[test]
fn test_get_json_obj() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data: Foo = req.get_json_obj().unwrap();
assert_eq!(123, data.item);
}
}
|
//
// Copyright 2014 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
extern crate libc;
use std::collections::BTreeMap;
use std::ffi::{CStr, CString};
use std::fs;
use std::io;
use std::ops::Deref;
use std::path::Path;
use std::slice;
use std::str::from_utf8;
use self::libc::{c_void, size_t};
use rocksdb_ffi::{self, DBCFHandle, error_message};
use rocksdb_options::Options;
pub struct DB {
inner: rocksdb_ffi::DBInstance,
cfs: BTreeMap<String, DBCFHandle>,
}
unsafe impl Send for DB {}
unsafe impl Sync for DB {}
pub struct WriteBatch {
inner: rocksdb_ffi::DBWriteBatch,
}
pub struct ReadOptions {
inner: rocksdb_ffi::DBReadOptions,
}
pub struct Snapshot<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBSnapshot,
}
pub struct DBIterator<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBIterator,
direction: Direction,
just_seeked: bool,
}
pub enum Direction {
forward,
reverse,
}
impl<'a> Iterator for DBIterator<'a> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> {
let native_iter = self.inner;
if !self.just_seeked {
match self.direction {
Direction::forward => unsafe { rocksdb_ffi::rocksdb_iter_next(native_iter) },
Direction::reverse => unsafe { rocksdb_ffi::rocksdb_iter_prev(native_iter) },
}
} else {
self.just_seeked = false;
}
if unsafe { rocksdb_ffi::rocksdb_iter_valid(native_iter) } {
let mut key_len: size_t = 0;
let key_len_ptr: *mut size_t = &mut key_len;
let mut val_len: size_t = 0;
let val_len_ptr: *mut size_t = &mut val_len;
let key_ptr = unsafe {
rocksdb_ffi::rocksdb_iter_key(native_iter, key_len_ptr)
};
let key = unsafe {
slice::from_raw_parts(key_ptr, key_len as usize)
};
let val_ptr = unsafe {
rocksdb_ffi::rocksdb_iter_value(native_iter, val_len_ptr)
};
let val = unsafe {
slice::from_raw_parts(val_ptr, val_len as usize)
};
Some((key.to_vec().into_boxed_slice(),
val.to_vec().into_boxed_slice()))
} else {
None
}
}
}
pub enum IteratorMode<'a> {
Start,
End,
From(&'a [u8], Direction),
}
impl<'a> DBIterator<'a> {
fn new<'b>(db: &'a DB, readopts: &'b ReadOptions, mode: IteratorMode) -> DBIterator<'a> {
unsafe {
let iterator = rocksdb_ffi::rocksdb_create_iterator(db.inner,
readopts.inner);
let mut rv = DBIterator {
db: db,
inner: iterator,
direction: Direction::forward, // blown away by set_mode()
just_seeked: false,
};
rv.set_mode(mode);
rv
}
}
pub fn set_mode(&mut self, mode: IteratorMode) {
unsafe {
match mode {
IteratorMode::Start => {
rocksdb_ffi::rocksdb_iter_seek_to_first(self.inner);
self.direction = Direction::forward;
},
IteratorMode::End => {
rocksdb_ffi::rocksdb_iter_seek_to_last(self.inner);
self.direction = Direction::reverse;
},
IteratorMode::From(key, dir) => {
rocksdb_ffi::rocksdb_iter_seek(self.inner,
key.as_ptr(),
key.len() as size_t);
self.direction = dir;
}
};
self.just_seeked = true;
}
}
fn new_cf(db: &'a DB,
cf_handle: DBCFHandle,
readopts: &ReadOptions,
mode: IteratorMode)
-> Result<DBIterator<'a>, String> {
unsafe {
let iterator =
rocksdb_ffi::rocksdb_create_iterator_cf(db.inner,
readopts.inner,
cf_handle);
let mut rv = DBIterator {
db: db,
inner: iterator,
direction: Direction::forward, // blown away by set_mode()
just_seeked: false,
};
rv.set_mode(mode);
Ok(rv)
}
}
}
impl<'a> Drop for DBIterator<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_iter_destroy(self.inner);
}
}
}
impl <'a> Snapshot<'a> {
pub fn new(db: &DB) -> Snapshot {
let snapshot = unsafe {
rocksdb_ffi::rocksdb_create_snapshot(db.inner)
};
Snapshot {
db: db,
inner: snapshot,
}
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
DBIterator::new(self.db, &readopts, mode)
}
}
impl <'a> Drop for Snapshot<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_release_snapshot(self.db.inner, self.inner);
}
}
}
// This is for the DB and write batches to share the same API
pub trait Writable {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn delete(&self, key: &[u8]) -> Result<(), String>;
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String>;
}
impl DB {
pub fn open_default(path: &str) -> Result<DB, String> {
let mut opts = Options::new();
opts.create_if_missing(true);
DB::open(&opts, path)
}
pub fn open(opts: &Options, path: &str) -> Result<DB, String> {
DB::open_cf(opts, path, &[])
}
pub fn open_cf(opts: &Options,
path: &str,
cfs: &[&str])
-> Result<DB, String> {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => return Err("Failed to convert path to CString when \
opening rocksdb"
.to_string()),
};
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
match fs::create_dir_all(&ospath) {
Err(e) =>
return Err("Failed to create rocksdb directory.".to_string()),
Ok(_) => (),
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let db: rocksdb_ffi::DBInstance;
let mut cfMap = BTreeMap::new();
if cfs.len() == 0 {
unsafe {
db = rocksdb_ffi::rocksdb_open(opts.inner, cpath_ptr as *const _, err_ptr);
}
} else {
let mut cfs_v = cfs.to_vec();
// Always open the default column family
if !cfs_v.contains(&"default") {
cfs_v.push("default");
}
// We need to store our CStrings in an intermediate vector
// so that their pointers remain valid.
let c_cfs: Vec<CString> = cfs_v.iter()
.map(|cf| {
CString::new(cf.as_bytes())
.unwrap()
})
.collect();
let cfnames: Vec<*const _> = c_cfs.iter()
.map(|cf| cf.as_ptr())
.collect();
// These handles will be populated by DB.
let mut cfhandles: Vec<rocksdb_ffi::DBCFHandle> =
cfs_v.iter()
.map(|_| rocksdb_ffi::DBCFHandle(0 as *mut c_void))
.collect();
// TODO(tyler) allow options to be passed in.
let cfopts: Vec<rocksdb_ffi::DBOptions> =
cfs_v.iter()
.map(|_| unsafe { rocksdb_ffi::rocksdb_options_create() })
.collect();
// Prepare to ship to C.
let copts: *const rocksdb_ffi::DBOptions = cfopts.as_ptr();
let handles: *const rocksdb_ffi::DBCFHandle = cfhandles.as_ptr();
let nfam = cfs_v.len();
unsafe {
db = rocksdb_ffi::rocksdb_open_column_families(opts.inner, cpath_ptr as *const _,
nfam as libc::c_int,
cfnames.as_ptr() as *const _,
copts, handles, err_ptr);
}
for handle in cfhandles.iter() {
if handle.0.is_null() {
return Err("Received null column family handle from DB."
.to_string());
}
}
for (n, h) in cfs_v.iter().zip(cfhandles) {
cfMap.insert(n.to_string(), h);
}
}
if !err.is_null() {
return Err(error_message(err));
}
if db.0.is_null() {
return Err("Could not initialize database.".to_string());
}
Ok(DB {
inner: db,
cfs: cfMap,
})
}
pub fn destroy(opts: &Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_destroy_db(opts.inner, cpath_ptr as *const _, err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn repair(opts: Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_repair_db(opts.inner, cpath_ptr as *const _, err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn write(&self, batch: WriteBatch) -> Result<(), String> {
let writeopts = unsafe { rocksdb_ffi::rocksdb_writeoptions_create() };
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_write(self.inner,
writeopts.clone(),
batch.inner,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
}
if !err.is_null() {
return Err(error_message(err));
}
return Ok(());
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
unsafe {
let readopts = rocksdb_ffi::rocksdb_readoptions_create();
if readopts.0.is_null() {
return Err("Unable to create rocksdb read options. This is \
a fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded \
rocksdb library."
.to_string());
}
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get(self.inner,
readopts.clone(),
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
rocksdb_ffi::rocksdb_readoptions_destroy(readopts);
if !err.is_null() {
return Err(error_message(err));
}
match val.is_null() {
true => Ok(None),
false => {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
}
pub fn get_cf(&self,
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
unsafe {
let readopts = rocksdb_ffi::rocksdb_readoptions_create();
if readopts.0.is_null() {
return Err("Unable to create rocksdb read options. This is \
a fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded \
rocksdb library."
.to_string());
}
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get_cf(self.inner,
readopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
rocksdb_ffi::rocksdb_readoptions_destroy(readopts);
if !err.is_null() {
return Err(error_message(err));
}
match val.is_null() {
true => Ok(None),
false => {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
}
pub fn create_cf(&mut self,
name: &str,
opts: &Options)
-> Result<DBCFHandle, String> {
let cname = match CString::new(name.as_bytes()) {
Ok(c) => c,
Err(_) => return Err("Failed to convert path to CString when \
opening rocksdb"
.to_string()),
};
let cname_ptr = cname.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let cf_handler = unsafe {
let cf_handler =
rocksdb_ffi::rocksdb_create_column_family(self.inner,
opts.inner,
cname_ptr as *const _,
err_ptr);
self.cfs.insert(name.to_string(), cf_handler);
cf_handler
};
if !err.is_null() {
return Err(error_message(err));
}
Ok(cf_handler)
}
pub fn drop_cf(&mut self, name: &str) -> Result<(), String> {
let cf = self.cfs.get(name);
if cf.is_none() {
return Err(format!("Invalid column family: {}", name).to_string());
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_drop_column_family(self.inner,
*cf.unwrap(),
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn cf_handle(&self, name: &str) -> Option<&DBCFHandle> {
self.cfs.get(name)
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
let opts = ReadOptions::new();
DBIterator::new(&self, &opts, mode)
}
pub fn iterator_cf(&self, cf_handle: DBCFHandle, mode: IteratorMode) -> Result<DBIterator, String> {
let opts = ReadOptions::new();
DBIterator::new_cf(&self, cf_handle, &opts, mode)
}
pub fn snapshot(&self) -> Snapshot {
Snapshot::new(self)
}
}
impl Writable for DB {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put(self.inner,
writeopts.clone(),
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put_cf(self.inner,
writeopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge(self.inner,
writeopts.clone(),
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge_cf(self.inner,
writeopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete(self.inner,
writeopts.clone(),
key.as_ptr(),
key.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete_cf(self.inner,
writeopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
}
impl WriteBatch {
pub fn new() -> WriteBatch {
WriteBatch {
inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() },
}
}
}
impl Drop for WriteBatch {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_writebatch_destroy(self.inner) }
}
}
impl Drop for DB {
fn drop(&mut self) {
unsafe {
for (_, cf) in self.cfs.iter() {
rocksdb_ffi::rocksdb_column_family_handle_destroy(*cf);
}
rocksdb_ffi::rocksdb_close(self.inner);
}
}
}
impl Writable for WriteBatch {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete(self.inner,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
}
impl Drop for ReadOptions {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_readoptions_destroy(self.inner) }
}
}
impl ReadOptions {
fn new() -> ReadOptions {
unsafe {
ReadOptions { inner: rocksdb_ffi::rocksdb_readoptions_create() }
}
}
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
fn fill_cache(&mut self, v: bool) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_fill_cache(self.inner, v);
}
}
fn set_snapshot(&mut self, snapshot: &Snapshot) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_snapshot(self.inner,
snapshot.inner);
}
}
}
pub struct DBVector {
base: *mut u8,
len: usize,
}
impl Deref for DBVector {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.base, self.len) }
}
}
impl Drop for DBVector {
fn drop(&mut self) {
unsafe {
libc::free(self.base as *mut libc::c_void);
}
}
}
impl DBVector {
pub fn from_c(val: *mut u8, val_len: size_t) -> DBVector {
unsafe {
DBVector {
base: val,
len: val_len as usize,
}
}
}
pub fn to_utf8<'a>(&'a self) -> Option<&'a str> {
from_utf8(self.deref()).ok()
}
}
#[test]
fn external() {
let path = "_rust_rocksdb_externaltest";
{
let mut db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
let opts = Options::new();
let result = DB::destroy(&opts, path);
assert!(result.is_ok());
}
#[test]
fn errors_do_stuff() {
let path = "_rust_rocksdb_error";
let mut db = DB::open_default(path).unwrap();
let opts = Options::new();
// The DB will still be open when we try to destroy and the lock should fail
match DB::destroy(&opts, path) {
Err(ref s) => assert!(s ==
"IO error: lock _rust_rocksdb_error/LOCK: No \
locks available"),
Ok(_) => panic!("should fail"),
}
}
#[test]
fn writebatch_works() {
let path = "_rust_rocksdb_writebacktest";
{
let mut db = DB::open_default(path).unwrap();
{
// test put
let mut batch = WriteBatch::new();
assert!(db.get(b"k1").unwrap().is_none());
batch.put(b"k1", b"v1111");
assert!(db.get(b"k1").unwrap().is_none());
let p = db.write(batch);
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
}
{
// test delete
let mut batch = WriteBatch::new();
batch.delete(b"k1");
let p = db.write(batch);
assert!(p.is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
}
#[test]
fn iterator_test() {
let path = "_rust_rocksdb_iteratortest";
{
let mut db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let p = db.put(b"k2", b"v2222");
assert!(p.is_ok());
let p = db.put(b"k3", b"v3333");
assert!(p.is_ok());
let mut iter = db.iterator(IteratorMode::Start);
for (k, v) in iter {
println!("Hello {}: {}",
from_utf8(&*k).unwrap(),
from_utf8(&*v).unwrap());
}
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
}
Add a DB#put_opt method for use with writeOptions.
//
// Copyright 2014 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
extern crate libc;
use std::collections::BTreeMap;
use std::ffi::{CStr, CString};
use std::fs;
use std::io;
use std::ops::Deref;
use std::path::Path;
use std::slice;
use std::str::from_utf8;
use self::libc::{c_void, size_t};
use rocksdb_ffi::{self, DBCFHandle, error_message};
use rocksdb_options::{Options,WriteOptions};
pub struct DB {
inner: rocksdb_ffi::DBInstance,
cfs: BTreeMap<String, DBCFHandle>,
}
unsafe impl Send for DB {}
unsafe impl Sync for DB {}
pub struct WriteBatch {
inner: rocksdb_ffi::DBWriteBatch,
}
pub struct ReadOptions {
inner: rocksdb_ffi::DBReadOptions,
}
pub struct Snapshot<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBSnapshot,
}
pub struct DBIterator<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBIterator,
direction: Direction,
just_seeked: bool,
}
pub enum Direction {
forward,
reverse,
}
impl<'a> Iterator for DBIterator<'a> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> {
let native_iter = self.inner;
if !self.just_seeked {
match self.direction {
Direction::forward => unsafe { rocksdb_ffi::rocksdb_iter_next(native_iter) },
Direction::reverse => unsafe { rocksdb_ffi::rocksdb_iter_prev(native_iter) },
}
} else {
self.just_seeked = false;
}
if unsafe { rocksdb_ffi::rocksdb_iter_valid(native_iter) } {
let mut key_len: size_t = 0;
let key_len_ptr: *mut size_t = &mut key_len;
let mut val_len: size_t = 0;
let val_len_ptr: *mut size_t = &mut val_len;
let key_ptr = unsafe {
rocksdb_ffi::rocksdb_iter_key(native_iter, key_len_ptr)
};
let key = unsafe {
slice::from_raw_parts(key_ptr, key_len as usize)
};
let val_ptr = unsafe {
rocksdb_ffi::rocksdb_iter_value(native_iter, val_len_ptr)
};
let val = unsafe {
slice::from_raw_parts(val_ptr, val_len as usize)
};
Some((key.to_vec().into_boxed_slice(),
val.to_vec().into_boxed_slice()))
} else {
None
}
}
}
pub enum IteratorMode<'a> {
Start,
End,
From(&'a [u8], Direction),
}
impl<'a> DBIterator<'a> {
fn new<'b>(db: &'a DB, readopts: &'b ReadOptions, mode: IteratorMode) -> DBIterator<'a> {
unsafe {
let iterator = rocksdb_ffi::rocksdb_create_iterator(db.inner,
readopts.inner);
let mut rv = DBIterator {
db: db,
inner: iterator,
direction: Direction::forward, // blown away by set_mode()
just_seeked: false,
};
rv.set_mode(mode);
rv
}
}
pub fn set_mode(&mut self, mode: IteratorMode) {
unsafe {
match mode {
IteratorMode::Start => {
rocksdb_ffi::rocksdb_iter_seek_to_first(self.inner);
self.direction = Direction::forward;
},
IteratorMode::End => {
rocksdb_ffi::rocksdb_iter_seek_to_last(self.inner);
self.direction = Direction::reverse;
},
IteratorMode::From(key, dir) => {
rocksdb_ffi::rocksdb_iter_seek(self.inner,
key.as_ptr(),
key.len() as size_t);
self.direction = dir;
}
};
self.just_seeked = true;
}
}
fn new_cf(db: &'a DB,
cf_handle: DBCFHandle,
readopts: &ReadOptions,
mode: IteratorMode)
-> Result<DBIterator<'a>, String> {
unsafe {
let iterator =
rocksdb_ffi::rocksdb_create_iterator_cf(db.inner,
readopts.inner,
cf_handle);
let mut rv = DBIterator {
db: db,
inner: iterator,
direction: Direction::forward, // blown away by set_mode()
just_seeked: false,
};
rv.set_mode(mode);
Ok(rv)
}
}
}
impl<'a> Drop for DBIterator<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_iter_destroy(self.inner);
}
}
}
impl <'a> Snapshot<'a> {
pub fn new(db: &DB) -> Snapshot {
let snapshot = unsafe {
rocksdb_ffi::rocksdb_create_snapshot(db.inner)
};
Snapshot {
db: db,
inner: snapshot,
}
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
DBIterator::new(self.db, &readopts, mode)
}
}
impl <'a> Drop for Snapshot<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_release_snapshot(self.db.inner, self.inner);
}
}
}
// This is for the DB and write batches to share the same API
pub trait Writable {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn delete(&self, key: &[u8]) -> Result<(), String>;
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String>;
}
impl DB {
pub fn open_default(path: &str) -> Result<DB, String> {
let mut opts = Options::new();
opts.create_if_missing(true);
DB::open(&opts, path)
}
pub fn open(opts: &Options, path: &str) -> Result<DB, String> {
DB::open_cf(opts, path, &[])
}
pub fn open_cf(opts: &Options,
path: &str,
cfs: &[&str])
-> Result<DB, String> {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => return Err("Failed to convert path to CString when \
opening rocksdb"
.to_string()),
};
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
match fs::create_dir_all(&ospath) {
Err(e) =>
return Err("Failed to create rocksdb directory.".to_string()),
Ok(_) => (),
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let db: rocksdb_ffi::DBInstance;
let mut cfMap = BTreeMap::new();
if cfs.len() == 0 {
unsafe {
db = rocksdb_ffi::rocksdb_open(opts.inner, cpath_ptr as *const _, err_ptr);
}
} else {
let mut cfs_v = cfs.to_vec();
// Always open the default column family
if !cfs_v.contains(&"default") {
cfs_v.push("default");
}
// We need to store our CStrings in an intermediate vector
// so that their pointers remain valid.
let c_cfs: Vec<CString> = cfs_v.iter()
.map(|cf| {
CString::new(cf.as_bytes())
.unwrap()
})
.collect();
let cfnames: Vec<*const _> = c_cfs.iter()
.map(|cf| cf.as_ptr())
.collect();
// These handles will be populated by DB.
let mut cfhandles: Vec<rocksdb_ffi::DBCFHandle> =
cfs_v.iter()
.map(|_| rocksdb_ffi::DBCFHandle(0 as *mut c_void))
.collect();
// TODO(tyler) allow options to be passed in.
let cfopts: Vec<rocksdb_ffi::DBOptions> =
cfs_v.iter()
.map(|_| unsafe { rocksdb_ffi::rocksdb_options_create() })
.collect();
// Prepare to ship to C.
let copts: *const rocksdb_ffi::DBOptions = cfopts.as_ptr();
let handles: *const rocksdb_ffi::DBCFHandle = cfhandles.as_ptr();
let nfam = cfs_v.len();
unsafe {
db = rocksdb_ffi::rocksdb_open_column_families(opts.inner, cpath_ptr as *const _,
nfam as libc::c_int,
cfnames.as_ptr() as *const _,
copts, handles, err_ptr);
}
for handle in cfhandles.iter() {
if handle.0.is_null() {
return Err("Received null column family handle from DB."
.to_string());
}
}
for (n, h) in cfs_v.iter().zip(cfhandles) {
cfMap.insert(n.to_string(), h);
}
}
if !err.is_null() {
return Err(error_message(err));
}
if db.0.is_null() {
return Err("Could not initialize database.".to_string());
}
Ok(DB {
inner: db,
cfs: cfMap,
})
}
pub fn destroy(opts: &Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_destroy_db(opts.inner, cpath_ptr as *const _, err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn repair(opts: Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_repair_db(opts.inner, cpath_ptr as *const _, err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn write(&self, batch: WriteBatch) -> Result<(), String> {
let writeopts = unsafe { rocksdb_ffi::rocksdb_writeoptions_create() };
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_write(self.inner,
writeopts.clone(),
batch.inner,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
}
if !err.is_null() {
return Err(error_message(err));
}
return Ok(());
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
unsafe {
let readopts = rocksdb_ffi::rocksdb_readoptions_create();
if readopts.0.is_null() {
return Err("Unable to create rocksdb read options. This is \
a fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded \
rocksdb library."
.to_string());
}
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get(self.inner,
readopts.clone(),
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
rocksdb_ffi::rocksdb_readoptions_destroy(readopts);
if !err.is_null() {
return Err(error_message(err));
}
match val.is_null() {
true => Ok(None),
false => {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
}
pub fn get_cf(&self,
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
unsafe {
let readopts = rocksdb_ffi::rocksdb_readoptions_create();
if readopts.0.is_null() {
return Err("Unable to create rocksdb read options. This is \
a fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded \
rocksdb library."
.to_string());
}
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get_cf(self.inner,
readopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
rocksdb_ffi::rocksdb_readoptions_destroy(readopts);
if !err.is_null() {
return Err(error_message(err));
}
match val.is_null() {
true => Ok(None),
false => {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
}
pub fn create_cf(&mut self,
name: &str,
opts: &Options)
-> Result<DBCFHandle, String> {
let cname = match CString::new(name.as_bytes()) {
Ok(c) => c,
Err(_) => return Err("Failed to convert path to CString when \
opening rocksdb"
.to_string()),
};
let cname_ptr = cname.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let cf_handler = unsafe {
let cf_handler =
rocksdb_ffi::rocksdb_create_column_family(self.inner,
opts.inner,
cname_ptr as *const _,
err_ptr);
self.cfs.insert(name.to_string(), cf_handler);
cf_handler
};
if !err.is_null() {
return Err(error_message(err));
}
Ok(cf_handler)
}
pub fn drop_cf(&mut self, name: &str) -> Result<(), String> {
let cf = self.cfs.get(name);
if cf.is_none() {
return Err(format!("Invalid column family: {}", name).to_string());
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_drop_column_family(self.inner,
*cf.unwrap(),
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn cf_handle(&self, name: &str) -> Option<&DBCFHandle> {
self.cfs.get(name)
}
pub fn iterator(&self, mode: IteratorMode) -> DBIterator {
let opts = ReadOptions::new();
DBIterator::new(&self, &opts, mode)
}
pub fn iterator_cf(&self, cf_handle: DBCFHandle, mode: IteratorMode) -> Result<DBIterator, String> {
let opts = ReadOptions::new();
DBIterator::new_cf(&self, cf_handle, &opts, mode)
}
pub fn snapshot(&self) -> Snapshot {
Snapshot::new(self)
}
fn put_opt(&self, key: &[u8], value: &[u8], writeopts: &WriteOptions) -> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
}
impl Writable for DB {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.put_opt(key, value, &WriteOptions::new())
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put_cf(self.inner,
writeopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge(self.inner,
writeopts.clone(),
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge_cf(self.inner,
writeopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete(self.inner,
writeopts.clone(),
key.as_ptr(),
key.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
unsafe {
let writeopts = rocksdb_ffi::rocksdb_writeoptions_create();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete_cf(self.inner,
writeopts.clone(),
cf,
key.as_ptr(),
key.len() as size_t,
err_ptr);
rocksdb_ffi::rocksdb_writeoptions_destroy(writeopts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
}
impl WriteBatch {
pub fn new() -> WriteBatch {
WriteBatch {
inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() },
}
}
}
impl Drop for WriteBatch {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_writebatch_destroy(self.inner) }
}
}
impl Drop for DB {
fn drop(&mut self) {
unsafe {
for (_, cf) in self.cfs.iter() {
rocksdb_ffi::rocksdb_column_family_handle_destroy(*cf);
}
rocksdb_ffi::rocksdb_close(self.inner);
}
}
}
impl Writable for WriteBatch {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete(self.inner,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
}
impl Drop for ReadOptions {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_readoptions_destroy(self.inner) }
}
}
impl ReadOptions {
fn new() -> ReadOptions {
unsafe {
ReadOptions { inner: rocksdb_ffi::rocksdb_readoptions_create() }
}
}
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
fn fill_cache(&mut self, v: bool) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_fill_cache(self.inner, v);
}
}
fn set_snapshot(&mut self, snapshot: &Snapshot) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_snapshot(self.inner,
snapshot.inner);
}
}
}
pub struct DBVector {
base: *mut u8,
len: usize,
}
impl Deref for DBVector {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.base, self.len) }
}
}
impl Drop for DBVector {
fn drop(&mut self) {
unsafe {
libc::free(self.base as *mut libc::c_void);
}
}
}
impl DBVector {
pub fn from_c(val: *mut u8, val_len: size_t) -> DBVector {
unsafe {
DBVector {
base: val,
len: val_len as usize,
}
}
}
pub fn to_utf8<'a>(&'a self) -> Option<&'a str> {
from_utf8(self.deref()).ok()
}
}
#[test]
fn external() {
let path = "_rust_rocksdb_externaltest";
{
let mut db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
let opts = Options::new();
let result = DB::destroy(&opts, path);
assert!(result.is_ok());
}
#[test]
fn errors_do_stuff() {
let path = "_rust_rocksdb_error";
let mut db = DB::open_default(path).unwrap();
let opts = Options::new();
// The DB will still be open when we try to destroy and the lock should fail
match DB::destroy(&opts, path) {
Err(ref s) => assert!(s ==
"IO error: lock _rust_rocksdb_error/LOCK: No \
locks available"),
Ok(_) => panic!("should fail"),
}
}
#[test]
fn writebatch_works() {
let path = "_rust_rocksdb_writebacktest";
{
let mut db = DB::open_default(path).unwrap();
{
// test put
let mut batch = WriteBatch::new();
assert!(db.get(b"k1").unwrap().is_none());
batch.put(b"k1", b"v1111");
assert!(db.get(b"k1").unwrap().is_none());
let p = db.write(batch);
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
}
{
// test delete
let mut batch = WriteBatch::new();
batch.delete(b"k1");
let p = db.write(batch);
assert!(p.is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
}
#[test]
fn iterator_test() {
let path = "_rust_rocksdb_iteratortest";
{
let mut db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let p = db.put(b"k2", b"v2222");
assert!(p.is_ok());
let p = db.put(b"k3", b"v3333");
assert!(p.is_ok());
let mut iter = db.iterator(IteratorMode::Start);
for (k, v) in iter {
println!("Hello {}: {}",
from_utf8(&*k).unwrap(),
from_utf8(&*v).unwrap());
}
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
}
|
use std::collections::BTreeMap;
use config::{RouteHost, RoutePath};
/// Map host port to a route of arbitrary type
///
/// Returns destination route and relative path
pub fn route<'x, D>(host: &str, path: &'x str,
table: &'x BTreeMap<RouteHost, BTreeMap<RoutePath, D>>)
-> Option<(&'x D, &'x str, &'x str)>
{
// TODO(tailhook) transform into range iteration when `btree_range` is
// stable
for (route_host, sub_table) in table.iter().rev() {
if route_host.matches(host) {
for (route_path, result) in sub_table.iter().rev() {
if path_match(&route_path, path) {
// Longest match is the last in reversed iteration
let prefix = route_path.as_ref().map(|x| &x[..]).unwrap_or("");
return Some((result, prefix, &path[prefix.len()..]));
}
}
return None;
}
}
return None;
}
fn path_match<S: AsRef<str>>(pattern: &Option<S>, value: &str) -> bool {
if let Some(ref prefix) = *pattern {
let prefix = prefix.as_ref();
if value.starts_with(prefix) && (
value.len() == prefix.len() ||
value[prefix.len()..].starts_with("/") ||
value[prefix.len()..].starts_with("?"))
{
return true;
}
return false;
} else {
return true;
}
}
/// Returns host with trimmed whitespace and without port number if exists
pub fn parse_host(host_header: &str) -> &str {
match host_header.find(':') {
Some(idx) => &host_header[..idx],
None => host_header,
}.trim()
}
#[cfg(test)]
mod test {
use config::{RouteHost, RoutePath};
use super::route;
#[test]
fn route_host() {
let table = vec![
(RouteHost::Exact("example.com".into()), vec![
(None, 1),
].into_iter().collect()),
].into_iter().collect();
assert_eq!(route("example.com", "/hello", &table),
Some((&1, "", "/hello")));
assert_eq!(route("example.com", "/", &table),
Some((&1, "", "/")));
assert_eq!(route("example.org", "/hello", &table), None);
assert_eq!(route("example.org", "/", &table), None);
}
#[test]
fn route_host_suffix() {
// Routing table
// example.com: 1
// *.example.com: 2
// *.example.com/static: 3
// www.example.com/static/favicon.ico: 4
// xxx.example.com: 5
let table = vec![
(RouteHost::Exact("example.com".into()), vec![
(None, 1),
].into_iter().collect()),
(RouteHost::Suffix(".example.com".into()), vec![
(None, 2),
(Some("/static".into()), 3),
].into_iter().collect()),
(RouteHost::Exact("www.example.com".into()), vec![
(Some("/static/favicon.ico".into()), 4),
].into_iter().collect()),
(RouteHost::Exact("xxx.example.com".into()), vec![
(None, 5),
].into_iter().collect()),
].into_iter().collect();
assert_eq!(route("test.example.com", "/hello", &table),
Some((&2, "", "/hello")));
assert_eq!(route("www.example.com", "/", &table), None);
assert_eq!(route("www.example.com", "/static/i", &table), None);
assert_eq!(route("www.example.com", "/static/favicon.ico", &table),
Some((&4, "/static/favicon.ico", "")));
assert_eq!(route("xxx.example.com", "/hello", &table),
Some((&5, "", "/hello")));
assert_eq!(route("example.org", "/", &table), None);
assert_eq!(route("example.com", "/hello", &table),
Some((&1, "", "/hello")));
assert_eq!(route("city.example.com", "/static", &table),
Some((&3, "/static", "")));
}
/*
#[test]
fn route_path() {
let table = vec![
(Route { host: "ex.com".into(), path: Some("/one".into()) }, 1),
(Route { host: "ex.com".into(), path: None }, 0),
(Route { host: "ex.com".into(), path: Some("/two".into()) }, 2),
].into_iter().collect();
assert_eq!(route("ex.com", "/one", &table),
Some((&1, "")));
assert_eq!(route("ex.com", "/one/end", &table),
Some((&1, "/end")));
assert_eq!(route("ex.com", "/two", &table),
Some((&2, "")));
assert_eq!(route("ex.com","/two/some", &table),
Some((&2, "/some")));
assert_eq!(route("ex.com", "/three", &table),
Some((&0, "/three")));
assert_eq!(route("ex.com", "/", &table),
Some((&0, "/")));
assert_eq!(route("ex.org", "/one", &table), None);
assert_eq!(route("subdomain.ex.org", "/two", &table), None);
assert_eq!(route("example.org", "/", &table), None);
assert_eq!(route("example.org", "/two", &table), None);
}
*/
}
Another bad routing case
use std::collections::BTreeMap;
use config::{RouteHost, RoutePath};
/// Map host port to a route of arbitrary type
///
/// Returns destination route and relative path
pub fn route<'x, D>(host: &str, path: &'x str,
table: &'x BTreeMap<RouteHost, BTreeMap<RoutePath, D>>)
-> Option<(&'x D, &'x str, &'x str)>
{
// TODO(tailhook) transform into range iteration when `btree_range` is
// stable
for (route_host, sub_table) in table.iter().rev() {
if route_host.matches(host) {
for (route_path, result) in sub_table.iter().rev() {
if path_match(&route_path, path) {
// Longest match is the last in reversed iteration
let prefix = route_path.as_ref().map(|x| &x[..]).unwrap_or("");
return Some((result, prefix, &path[prefix.len()..]));
}
}
return None;
}
}
return None;
}
fn path_match<S: AsRef<str>>(pattern: &Option<S>, value: &str) -> bool {
if let Some(ref prefix) = *pattern {
let prefix = prefix.as_ref();
if value.starts_with(prefix) && (
value.len() == prefix.len() ||
value[prefix.len()..].starts_with("/") ||
value[prefix.len()..].starts_with("?"))
{
return true;
}
return false;
} else {
return true;
}
}
/// Returns host with trimmed whitespace and without port number if exists
pub fn parse_host(host_header: &str) -> &str {
match host_header.find(':') {
Some(idx) => &host_header[..idx],
None => host_header,
}.trim()
}
#[cfg(test)]
mod test {
use config::{RouteHost, RoutePath};
use super::route;
#[test]
fn route_host() {
let table = vec![
(RouteHost::Exact("example.com".into()), vec![
(None, 1),
].into_iter().collect()),
].into_iter().collect();
assert_eq!(route("example.com", "/hello", &table),
Some((&1, "", "/hello")));
assert_eq!(route("example.com", "/", &table),
Some((&1, "", "/")));
assert_eq!(route("example.org", "/hello", &table), None);
assert_eq!(route("example.org", "/", &table), None);
}
#[test]
fn route_host_suffix() {
// Routing table
// example.com: 1
// *.example.com: 2
// *.example.com/static: 3
// www.example.com/static/favicon.ico: 4
// xxx.example.com: 5
// *.aaa.example.com: 6
let table = vec![
(RouteHost::Exact("example.com".into()), vec![
(None, 1),
].into_iter().collect()),
(RouteHost::Suffix(".example.com".into()), vec![
(None, 2),
(Some("/static".into()), 3),
].into_iter().collect()),
(RouteHost::Exact("www.example.com".into()), vec![
(Some("/static/favicon.ico".into()), 4),
].into_iter().collect()),
(RouteHost::Exact("xxx.example.com".into()), vec![
(None, 5),
].into_iter().collect()),
(RouteHost::Suffix("*.aaa.example.com".into()), vec![
(None, 6),
].into_iter().collect()),
].into_iter().collect();
assert_eq!(route("test.example.com", "/hello", &table),
Some((&2, "", "/hello")));
assert_eq!(route("www.example.com", "/", &table), None);
assert_eq!(route("www.example.com", "/static/i", &table), None);
assert_eq!(route("www.example.com", "/static/favicon.ico", &table),
Some((&4, "/static/favicon.ico", "")));
assert_eq!(route("xxx.example.com", "/hello", &table),
Some((&5, "", "/hello")));
assert_eq!(route("example.org", "/", &table), None);
assert_eq!(route("example.com", "/hello", &table),
Some((&1, "", "/hello")));
assert_eq!(route("xxx.aaa.example.com", "/hello", &table),
Some((&6, "", "/hello")));
assert_eq!(route("city.example.com", "/static", &table),
Some((&3, "/static", "")));
}
/*
#[test]
fn route_path() {
let table = vec![
(Route { host: "ex.com".into(), path: Some("/one".into()) }, 1),
(Route { host: "ex.com".into(), path: None }, 0),
(Route { host: "ex.com".into(), path: Some("/two".into()) }, 2),
].into_iter().collect();
assert_eq!(route("ex.com", "/one", &table),
Some((&1, "")));
assert_eq!(route("ex.com", "/one/end", &table),
Some((&1, "/end")));
assert_eq!(route("ex.com", "/two", &table),
Some((&2, "")));
assert_eq!(route("ex.com","/two/some", &table),
Some((&2, "/some")));
assert_eq!(route("ex.com", "/three", &table),
Some((&0, "/three")));
assert_eq!(route("ex.com", "/", &table),
Some((&0, "/")));
assert_eq!(route("ex.org", "/one", &table), None);
assert_eq!(route("subdomain.ex.org", "/two", &table), None);
assert_eq!(route("example.org", "/", &table), None);
assert_eq!(route("example.org", "/two", &table), None);
}
*/
}
|
use bytes::{Buf, Bytes};
use futures::stream::{self, Stream};
use std::io::{self, BufRead, Cursor, Read};
use std::marker::PhantomData;
use tokio_postgres::Error;
pub struct CopyOutReader<'a> {
it: stream::Wait<tokio_postgres::CopyOut>,
cur: Cursor<Bytes>,
_p: PhantomData<&'a mut ()>,
}
// no-op impl to extend borrow until drop
impl<'a> Drop for CopyOutReader<'a> {
fn drop(&mut self) {}
}
impl<'a> CopyOutReader<'a> {
pub(crate) fn new(stream: tokio_postgres::CopyOut) -> Result<CopyOutReader<'a>, Error> {
let mut it = stream.wait();
let cur = match it.next() {
Some(Ok(cur)) => cur,
Some(Err(e)) => return Err(e),
None => Bytes::new(),
};
Ok(CopyOutReader {
it,
cur: Cursor::new(cur),
_p: PhantomData,
})
}
}
impl<'a> Read for CopyOutReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let b = self.fill_buf()?;
let len = usize::min(buf.len(), b.len());
buf[..len].copy_from_slice(&b[..len]);
self.consume(len);
Ok(len)
}
}
impl<'a> BufRead for CopyOutReader<'a> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
if self.cur.remaining() == 0 {
match self.it.next() {
Some(Ok(cur)) => self.cur = Cursor::new(cur),
Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)),
None => {}
};
}
Ok(Buf::bytes(&self.cur))
}
fn consume(&mut self, amt: usize) {
self.cur.advance(amt);
}
}
Fix clippy warning
use bytes::{Buf, Bytes};
use futures::stream::{self, Stream};
use std::io::{self, BufRead, Cursor, Read};
use std::marker::PhantomData;
use tokio_postgres::Error;
pub struct CopyOutReader<'a> {
it: stream::Wait<tokio_postgres::CopyOut>,
cur: Cursor<Bytes>,
_p: PhantomData<&'a mut ()>,
}
// no-op impl to extend borrow until drop
impl<'a> Drop for CopyOutReader<'a> {
fn drop(&mut self) {}
}
impl<'a> CopyOutReader<'a> {
#[allow(clippy::new_ret_no_self)]
pub(crate) fn new(stream: tokio_postgres::CopyOut) -> Result<CopyOutReader<'a>, Error> {
let mut it = stream.wait();
let cur = match it.next() {
Some(Ok(cur)) => cur,
Some(Err(e)) => return Err(e),
None => Bytes::new(),
};
Ok(CopyOutReader {
it,
cur: Cursor::new(cur),
_p: PhantomData,
})
}
}
impl<'a> Read for CopyOutReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let b = self.fill_buf()?;
let len = usize::min(buf.len(), b.len());
buf[..len].copy_from_slice(&b[..len]);
self.consume(len);
Ok(len)
}
}
impl<'a> BufRead for CopyOutReader<'a> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
if self.cur.remaining() == 0 {
match self.it.next() {
Some(Ok(cur)) => self.cur = Cursor::new(cur),
Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)),
None => {}
};
}
Ok(Buf::bytes(&self.cur))
}
fn consume(&mut self, amt: usize) {
self.cur.advance(amt);
}
}
|
use core::cell::UnsafeCell;
use core::default::Default;
use core::fmt;
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::NonNull;
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// An [`RwLockUpgradeableGuard`](RwLockUpgradeableGuard) can be upgraded to a
/// writable guard through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade)
/// [`RwLockUpgradeableGuard::try_upgrade`](RwLockUpgradeableGuard::try_upgrade) functions.
/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
/// functions.
///
/// Based on Facebook's
/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
///
/// # Examples
///
/// ```
/// use spin;
///
/// let lock = spin::RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T: ?Sized> {
lock: AtomicUsize,
data: UnsafeCell<T>,
}
const READER: usize = 4;
const UPGRADED: usize = 2;
const WRITER: usize = 1;
/// A guard from which the protected data can be read
///
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
#[derive(Debug)]
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
}
/// A guard to which the protected data can be written
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
}
/// A guard from which the protected data can be read, and can be upgraded
/// to a writable guard if needed
///
/// No writers or other upgradeable guards can exist while this is in scope. New reader
/// creation is prevented (to alleviate writer starvation) but there may be existing readers
/// when the lock is acquired.
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct RwLockUpgradeableGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
}
// Same unsafe impls as `std::sync::RwLock`
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
impl<T> RwLock<T> {
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// use spin;
///
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
///
/// fn demo() {
/// let lock = RW_LOCK.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
pub const fn new(user_data: T) -> RwLock<T> {
RwLock {
lock: AtomicUsize::new(0),
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
#[inline]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data, .. } = self;
data.into_inner()
}
}
impl<T: ?Sized> RwLock<T> {
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
/// hold the lock. There may be other readers currently inside the lock when
/// this method returns. This method does not provide any guarantees with
/// respect to the ordering of whether contentious readers or writers will
/// acquire the lock first.
///
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.read();
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn read(&self) -> RwLockReadGuard<T> {
loop {
match self.try_read() {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
/// would otherwise succeed. Returns `Some` of an RAII guard which will
/// release the shared access of this thread when dropped, or `None` if the
/// access could not be granted. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_read() {
/// Some(data) => {
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
let value = self.lock.fetch_add(READER, Ordering::Acquire);
// We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
// This helps reduce writer starvation.
if value & (WRITER | UPGRADED) != 0 {
// Lock is taken, undo.
self.lock.fetch_sub(READER, Ordering::Release);
None
} else {
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
})
}
}
/// Force decrement the reader count.
///
/// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
/// live, or if called more times than `read` has been called, but can be
/// useful in FFI contexts where the caller doesn't know how to deal with
/// RAII. The underlying atomic operation uses `Ordering::Release`.
#[inline]
pub unsafe fn force_read_decrement(&self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
self.lock.fetch_sub(READER, Ordering::Release);
}
/// Force unlock exclusive write access.
///
/// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
/// live, or if called when there are current readers, but can be useful in
/// FFI contexts where the caller doesn't know how to deal with RAII. The
/// underlying atomic operation uses `Ordering::Release`.
#[inline]
pub unsafe fn force_write_unlock(&self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
}
/// Lock this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.write();
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn write(&self) -> RwLockWriteGuard<T> {
loop {
// Use compare_exchange_weak as a slight optimisation instead of just calling try_write which
// uses compare_exchange (strong) internally.
if self
.lock
.compare_exchange_weak(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
return RwLockWriteGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
};
} else {
cpu_relax();
}
}
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// This function does not ever block, and it will return `None` if a call
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_write() {
/// Some(mut data) => {
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is implicitly dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
if self
.lock
.compare_exchange(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
})
} else {
None
}
}
/// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
/// Upgrades can be done through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade) method.
#[inline]
pub fn upgradeable_read(&self) -> RwLockUpgradeableGuard<T> {
loop {
match self.try_upgradeable_read() {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
/// Tries to obtain an upgradeable lock guard.
#[inline]
pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradeableGuard<T>> {
if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
Some(RwLockUpgradeableGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
})
} else {
// We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
// When they unlock, they will clear the bit.
None
}
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(guard) => write!(f, "RwLock {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T: ?Sized> RwLockUpgradeableGuard<'rwlock, T> {
/// Upgrades an upgradeable lock guard to a writable lock guard.
///
/// ```
/// let mylock = spin::RwLock::new(0);
///
/// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
/// let writable = upgradeable.upgrade();
/// ```
#[inline]
pub fn upgrade(self) -> RwLockWriteGuard<'rwlock, T> {
loop {
// Use compare_exchange_weak as a slight optimisation instead of just calling try_upgrade which
// uses compare_exchange (strong) internally.
if self
.lock
.compare_exchange_weak(UPGRADED, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
// Upgrade successful
let out = RwLockWriteGuard {
lock: self.lock,
data: self.data,
};
// Forget the old guard so its destructor doesn't run
mem::forget(self);
return out;
}
cpu_relax();
}
}
/// Tries to upgrade an upgradeable lock guard to a writable lock guard.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
///
/// match upgradeable.try_upgrade() {
/// Ok(writable) => /* upgrade successful - use writable lock guard */ (),
/// Err(upgradeable) => /* upgrade unsuccessful */ (),
/// };
/// ```
#[inline]
pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
if self
.lock
.compare_exchange(UPGRADED, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
// Upgrade successful
let out = Ok(RwLockWriteGuard {
lock: &self.lock,
data: self.data,
});
// Forget the old guard so its destructor doesn't run
mem::forget(self);
out
} else {
Err(self)
}
}
#[inline]
/// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
///
/// ```
/// let mylock = spin::RwLock::new(1);
///
/// let upgradeable = mylock.upgradeable_read();
/// assert!(mylock.try_read().is_none());
/// assert_eq!(*upgradeable, 1);
///
/// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
/// assert!(mylock.try_read().is_some());
/// assert_eq!(*readable, 1);
/// ```
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
// Reserve the read guard for ourselves
self.lock.fetch_add(READER, Ordering::Acquire);
RwLockReadGuard {
lock: &self.lock,
data: self.data,
}
// Dropping self removes the UPGRADED bit
}
}
impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
/// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
///
/// ```
/// let mylock = spin::RwLock::new(0);
///
/// let mut writable = mylock.write();
/// *writable = 1;
///
/// let readable = writable.downgrade(); // This is guaranteed not to spin
/// # let readable_2 = mylock.try_read().unwrap();
/// assert_eq!(*readable, 1);
/// ```
#[inline]
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
// Reserve the read guard for ourselves
self.lock.fetch_add(READER, Ordering::Acquire);
RwLockReadGuard {
lock: &self.lock,
data: self.data,
}
// Dropping self removes the WRITER bit
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockUpgradeableGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { self.data.as_mut() }
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
self.lock.fetch_sub(READER, Ordering::Release);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockUpgradeableGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(
self.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
UPGRADED
);
self.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & WRITER, WRITER);
// Writer is responsible for clearing both WRITER and UPGRADED bits.
// The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
// TODO: needs RNG
//#[test]
//fn frob() {
// static R: RwLock = RwLock::new();
// const N: usize = 10;
// const M: usize = 1000;
//
// let (tx, rx) = channel::<()>();
// for _ in 0..N {
// let tx = tx.clone();
// thread::spawn(move|| {
// let mut rng = rand::thread_rng();
// for _ in 0..M {
// if rng.gen_weighted_bool(N) {
// drop(R.write());
// } else {
// drop(R.read());
// }
// }
// drop(tx);
// });
// }
// drop(tx);
// let _ = rx.recv();
// unsafe { R.destroy(); }
//}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(
false,
"try_write should not succeed while read_guard is in scope"
),
}
drop(read_guard);
}
#[test]
fn test_rw_try_read() {
let m = RwLock::new(0);
mem::forget(m.write());
assert!(m.try_read().is_none());
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_force_read_decrement() {
let m = RwLock::new(());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
m.force_read_decrement();
}
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
}
assert!(m.try_write().is_some());
}
#[test]
fn test_force_write_unlock() {
let m = RwLock::new(());
::std::mem::forget(m.write());
assert!(m.try_read().is_none());
unsafe {
m.force_write_unlock();
}
assert!(m.try_read().is_some());
}
#[test]
fn test_upgrade_downgrade() {
let m = RwLock::new(());
{
let _r = m.read();
let upg = m.try_upgradeable_read().unwrap();
assert!(m.try_read().is_none());
assert!(m.try_write().is_none());
assert!(upg.try_upgrade().is_err());
}
{
let w = m.write();
assert!(m.try_upgradeable_read().is_none());
let _r = w.downgrade();
assert!(m.try_upgradeable_read().is_some());
assert!(m.try_read().is_some());
assert!(m.try_write().is_none());
}
{
let _u = m.upgradeable_read();
assert!(m.try_upgradeable_read().is_none());
}
assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
}
}
Make upgradeable / writable guards invariant over T
use core::cell::UnsafeCell;
use core::default::Default;
use core::fmt;
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::NonNull;
use core::marker::PhantomData;
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// An [`RwLockUpgradeableGuard`](RwLockUpgradeableGuard) can be upgraded to a
/// writable guard through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade)
/// [`RwLockUpgradeableGuard::try_upgrade`](RwLockUpgradeableGuard::try_upgrade) functions.
/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
/// functions.
///
/// Based on Facebook's
/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
///
/// # Examples
///
/// ```
/// use spin;
///
/// let lock = spin::RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T: ?Sized> {
lock: AtomicUsize,
data: UnsafeCell<T>,
}
const READER: usize = 4;
const UPGRADED: usize = 2;
const WRITER: usize = 1;
/// A guard from which the protected data can be read
///
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
#[derive(Debug)]
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
}
/// A guard to which the protected data can be written
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
#[doc(hidden)]
_invariant: PhantomData<&'a mut T>,
}
/// A guard from which the protected data can be read, and can be upgraded
/// to a writable guard if needed
///
/// No writers or other upgradeable guards can exist while this is in scope. New reader
/// creation is prevented (to alleviate writer starvation) but there may be existing readers
/// when the lock is acquired.
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct RwLockUpgradeableGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
#[doc(hidden)]
_invariant: PhantomData<&'a mut T>,
}
// Same unsafe impls as `std::sync::RwLock`
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
impl<T> RwLock<T> {
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// use spin;
///
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
///
/// fn demo() {
/// let lock = RW_LOCK.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
pub const fn new(user_data: T) -> RwLock<T> {
RwLock {
lock: AtomicUsize::new(0),
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
#[inline]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data, .. } = self;
data.into_inner()
}
}
impl<T: ?Sized> RwLock<T> {
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
/// hold the lock. There may be other readers currently inside the lock when
/// this method returns. This method does not provide any guarantees with
/// respect to the ordering of whether contentious readers or writers will
/// acquire the lock first.
///
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.read();
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn read(&self) -> RwLockReadGuard<T> {
loop {
match self.try_read() {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
/// would otherwise succeed. Returns `Some` of an RAII guard which will
/// release the shared access of this thread when dropped, or `None` if the
/// access could not be granted. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_read() {
/// Some(data) => {
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
let value = self.lock.fetch_add(READER, Ordering::Acquire);
// We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
// This helps reduce writer starvation.
if value & (WRITER | UPGRADED) != 0 {
// Lock is taken, undo.
self.lock.fetch_sub(READER, Ordering::Release);
None
} else {
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
})
}
}
/// Force decrement the reader count.
///
/// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
/// live, or if called more times than `read` has been called, but can be
/// useful in FFI contexts where the caller doesn't know how to deal with
/// RAII. The underlying atomic operation uses `Ordering::Release`.
#[inline]
pub unsafe fn force_read_decrement(&self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
self.lock.fetch_sub(READER, Ordering::Release);
}
/// Force unlock exclusive write access.
///
/// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
/// live, or if called when there are current readers, but can be useful in
/// FFI contexts where the caller doesn't know how to deal with RAII. The
/// underlying atomic operation uses `Ordering::Release`.
#[inline]
pub unsafe fn force_write_unlock(&self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
}
/// Lock this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.write();
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn write(&self) -> RwLockWriteGuard<T> {
loop {
// Use compare_exchange_weak as a slight optimisation instead of just calling try_write which
// uses compare_exchange (strong) internally.
if self
.lock
.compare_exchange_weak(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
return RwLockWriteGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
_invariant: PhantomData,
};
} else {
cpu_relax();
}
}
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// This function does not ever block, and it will return `None` if a call
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_write() {
/// Some(mut data) => {
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is implicitly dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
if self
.lock
.compare_exchange(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
_invariant: PhantomData,
})
} else {
None
}
}
/// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
/// Upgrades can be done through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade) method.
#[inline]
pub fn upgradeable_read(&self) -> RwLockUpgradeableGuard<T> {
loop {
match self.try_upgradeable_read() {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
/// Tries to obtain an upgradeable lock guard.
#[inline]
pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradeableGuard<T>> {
if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
Some(RwLockUpgradeableGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
_invariant: PhantomData,
})
} else {
// We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
// When they unlock, they will clear the bit.
None
}
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(guard) => write!(f, "RwLock {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T: ?Sized> RwLockUpgradeableGuard<'rwlock, T> {
/// Upgrades an upgradeable lock guard to a writable lock guard.
///
/// ```
/// let mylock = spin::RwLock::new(0);
///
/// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
/// let writable = upgradeable.upgrade();
/// ```
#[inline]
pub fn upgrade(self) -> RwLockWriteGuard<'rwlock, T> {
loop {
// Use compare_exchange_weak as a slight optimisation instead of just calling try_upgrade which
// uses compare_exchange (strong) internally.
if self
.lock
.compare_exchange_weak(UPGRADED, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
// Upgrade successful
let out = RwLockWriteGuard {
lock: self.lock,
data: self.data,
_invariant: PhantomData,
};
// Forget the old guard so its destructor doesn't run
mem::forget(self);
return out;
}
cpu_relax();
}
}
/// Tries to upgrade an upgradeable lock guard to a writable lock guard.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
///
/// match upgradeable.try_upgrade() {
/// Ok(writable) => /* upgrade successful - use writable lock guard */ (),
/// Err(upgradeable) => /* upgrade unsuccessful */ (),
/// };
/// ```
#[inline]
pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
if self
.lock
.compare_exchange(UPGRADED, WRITER, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
// Upgrade successful
let out = Ok(RwLockWriteGuard {
lock: &self.lock,
data: self.data,
_invariant: PhantomData,
});
// Forget the old guard so its destructor doesn't run
mem::forget(self);
out
} else {
Err(self)
}
}
#[inline]
/// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
///
/// ```
/// let mylock = spin::RwLock::new(1);
///
/// let upgradeable = mylock.upgradeable_read();
/// assert!(mylock.try_read().is_none());
/// assert_eq!(*upgradeable, 1);
///
/// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
/// assert!(mylock.try_read().is_some());
/// assert_eq!(*readable, 1);
/// ```
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
// Reserve the read guard for ourselves
self.lock.fetch_add(READER, Ordering::Acquire);
RwLockReadGuard {
lock: &self.lock,
data: self.data,
}
// Dropping self removes the UPGRADED bit
}
}
impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
/// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
///
/// ```
/// let mylock = spin::RwLock::new(0);
///
/// let mut writable = mylock.write();
/// *writable = 1;
///
/// let readable = writable.downgrade(); // This is guaranteed not to spin
/// # let readable_2 = mylock.try_read().unwrap();
/// assert_eq!(*readable, 1);
/// ```
#[inline]
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
// Reserve the read guard for ourselves
self.lock.fetch_add(READER, Ordering::Acquire);
RwLockReadGuard {
lock: &self.lock,
data: self.data,
}
// Dropping self removes the WRITER bit
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockUpgradeableGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { self.data.as_mut() }
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
self.lock.fetch_sub(READER, Ordering::Release);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockUpgradeableGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(
self.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
UPGRADED
);
self.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & WRITER, WRITER);
// Writer is responsible for clearing both WRITER and UPGRADED bits.
// The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
// TODO: needs RNG
//#[test]
//fn frob() {
// static R: RwLock = RwLock::new();
// const N: usize = 10;
// const M: usize = 1000;
//
// let (tx, rx) = channel::<()>();
// for _ in 0..N {
// let tx = tx.clone();
// thread::spawn(move|| {
// let mut rng = rand::thread_rng();
// for _ in 0..M {
// if rng.gen_weighted_bool(N) {
// drop(R.write());
// } else {
// drop(R.read());
// }
// }
// drop(tx);
// });
// }
// drop(tx);
// let _ = rx.recv();
// unsafe { R.destroy(); }
//}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(
false,
"try_write should not succeed while read_guard is in scope"
),
}
drop(read_guard);
}
#[test]
fn test_rw_try_read() {
let m = RwLock::new(0);
mem::forget(m.write());
assert!(m.try_read().is_none());
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_force_read_decrement() {
let m = RwLock::new(());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
m.force_read_decrement();
}
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
}
assert!(m.try_write().is_some());
}
#[test]
fn test_force_write_unlock() {
let m = RwLock::new(());
::std::mem::forget(m.write());
assert!(m.try_read().is_none());
unsafe {
m.force_write_unlock();
}
assert!(m.try_read().is_some());
}
#[test]
fn test_upgrade_downgrade() {
let m = RwLock::new(());
{
let _r = m.read();
let upg = m.try_upgradeable_read().unwrap();
assert!(m.try_read().is_none());
assert!(m.try_write().is_none());
assert!(upg.try_upgrade().is_err());
}
{
let w = m.write();
assert!(m.try_upgradeable_read().is_none());
let _r = w.downgrade();
assert!(m.try_upgradeable_read().is_some());
assert!(m.try_read().is_some());
assert!(m.try_write().is_none());
}
{
let _u = m.upgradeable_read();
assert!(m.try_upgradeable_read().is_none());
}
assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
}
}
|
use crate::utils;
use dirs;
use std::fs;
use std::io::ErrorKind;
use std::process::Command;
pub fn setup() {
// Environment (Unix)
utils::install_app("1password");
utils::install_package("ripgrep");
// Version control (Git)
utils::install_package("git");
let home_path = match dirs::home_dir() {
Some(directory) => directory,
None => panic!("Cannot find the home directory."),
};
let repos_path = home_path.join("repos");
utils::create_dir(&repos_path);
match fs::read_dir(&repos_path) {
Ok(_) => println!("The repos are already installed."),
Err(_) => {
utils::clone_repo(&repos_path, "https://github.com/trevordmiller/scripts");
utils::clone_repo(&repos_path, "https://github.com/trevordmiller/study");
}
};
// Editor (Vim)
utils::install_package("vim");
let editor_plugins_path = home_path
.join(".vim")
.join("pack")
.join("plugins")
.join("start");
utils::create_dir(&editor_plugins_path);
let editor_configuration_path = home_path.join(".vimrc");
utils::create_file(&editor_configuration_path);
match fs::read_dir(&editor_plugins_path) {
Ok(_) => println!("The editor plugins are already installed."),
Err(_) => {
utils::clone_repo(&editor_plugins_path, "https://github.com/tpope/vim-sensible");
utils::clone_repo(&editor_plugins_path, "https://github.com/tpope/vim-sleuth");
utils::clone_repo(&editor_plugins_path, "https://github.com/sheerun/vim-polyglot");
utils::clone_repo(&editor_plugins_path, "https://github.com/octref/RootIgnore");
utils::clone_repo(&editor_plugins_path, "https://github.com/dense-analysis/ale");
utils::clone_repo(&editor_plugins_path, "https://github.com/arcticicestudio/nord-vim");
println!("Adding editor configuration.");
match fs::write(&editor_configuration_path, "set grepprg=rg\\ --vimgrep\nset grepformat=%f:%l:%c:%m\ncolorscheme nord") {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
}
};
// Application programming (JavaScript)
utils::install_package("node");
// Systems programming (Rust)
utils::install_package("rustup-init");
let rustup_path_check = match Command::new("which").arg("rustup").output() {
Ok(output) => output.stdout,
Err(error) => panic!("There was a problem: {:?}", error),
};
if rustup_path_check.is_empty() {
match Command::new("rustup-init").output() {
Ok(_) => (),
Err(error) => match error.kind() {
ErrorKind::NotFound => panic!("The rustup-init command is missing."),
other_error => panic!("There was a problem: {:?}", other_error),
},
}
} else {
println!("The rustup-init has already been run.")
}
}
pub fn upgrade() {
// Environment (Unix)
println!("Upgrading package manager.");
match Command::new("brew").arg("update").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
utils::upgrade_app("1password");
utils::upgrade_package("ripgrep");
// Version control (Git)
utils::upgrade_package("git");
// Editor (Vim)
utils::upgrade_package("vim");
println!("Upgrading editor plugins.");
let home_path = match dirs::home_dir() {
Some(directory) => directory,
None => panic!("Cannot find the home directory."),
};
let editor_plugins_path = home_path
.join(".vim")
.join("pack")
.join("plugins")
.join("start");
match fs::read_dir(&editor_plugins_path) {
Ok(paths) => {
for path in paths {
match Command::new("git")
.current_dir(match path {
Ok(path) => path.path(),
Err(error) => panic!("There was a problem: {:?}", error),
})
.arg("pull")
.output()
{
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
}
}
Err(error) => panic!("There was a problem: {:?}", error),
};
// Application programming (JavaScript)
utils::upgrade_package("node");
// Systems programming (Rust)
utils::upgrade_package("rustup-init");
println!("Upgrading systems programming toolchain.");
match Command::new("rustup").arg("update").output() {
Ok(_) => (),
Err(error) => match error.kind() {
ErrorKind::NotFound => panic!("The rustup command is missing."),
other_error => panic!("There was a problem: {:?}", other_error),
},
}
}
pub fn end() {
// Environment (Unix)
println!("Removing package manager artifacts.");
match Command::new("brew").arg("cleanup").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
// Version control (Git)
println!("Making sure all repos are clean.");
let home_path = match dirs::home_dir() {
Some(directory) => directory,
None => panic!("Cannot find the home directory."),
};
let repos_path = home_path.join("repos");
match fs::read_dir(&repos_path) {
Ok(paths) => {
let mut all_repos_clean = true;
for path in paths {
let path = match &path {
Ok(path) => path.path(),
Err(error) => panic!("There was a problem: {:?}", error),
};
let status_check = match Command::new("git")
.current_dir(&path)
.arg("status")
.arg("--porcelain")
.output()
{
Ok(output) => output.stdout,
Err(error) => panic!("There was a problem: {:?}", error),
};
let unpushed_check = match Command::new("git")
.current_dir(&path)
.arg("log")
.arg("@{u}..")
.output()
{
Ok(output) => output.stdout,
Err(error) => panic!("There was a problem: {:?}", error),
};
if !status_check.is_empty() || !unpushed_check.is_empty() {
all_repos_clean = false;
eprintln!("A dirty repo was found: {}", &path.display());
}
}
if !all_repos_clean {
panic!("Repos are dirty.");
}
}
Err(error) => panic!("There was a problem: {:?}", error),
};
// Editor (Vim)
println!("Quitting editor processes.");
match Command::new("killall").arg("vim").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
// Application programming (JavaScript)
println!("Quitting application programming processes.");
match Command::new("killall").arg("node").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
// Systems programming (Rust)
println!("Quitting systems programming processes.");
match Command::new("killall").arg("rls").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
}
Remove OS specific app
use crate::utils;
use dirs;
use std::fs;
use std::io::ErrorKind;
use std::process::Command;
pub fn setup() {
// Environment (Unix)
utils::install_package("ripgrep");
// Version control (Git)
utils::install_package("git");
let home_path = match dirs::home_dir() {
Some(directory) => directory,
None => panic!("Cannot find the home directory."),
};
let repos_path = home_path.join("repos");
utils::create_dir(&repos_path);
match fs::read_dir(&repos_path) {
Ok(_) => println!("The repos are already installed."),
Err(_) => {
utils::clone_repo(&repos_path, "https://github.com/trevordmiller/scripts");
utils::clone_repo(&repos_path, "https://github.com/trevordmiller/study");
}
};
// Editor (Vim)
utils::install_package("vim");
let editor_plugins_path = home_path
.join(".vim")
.join("pack")
.join("plugins")
.join("start");
utils::create_dir(&editor_plugins_path);
let editor_configuration_path = home_path.join(".vimrc");
utils::create_file(&editor_configuration_path);
match fs::read_dir(&editor_plugins_path) {
Ok(_) => println!("The editor plugins are already installed."),
Err(_) => {
utils::clone_repo(&editor_plugins_path, "https://github.com/tpope/vim-sensible");
utils::clone_repo(&editor_plugins_path, "https://github.com/tpope/vim-sleuth");
utils::clone_repo(&editor_plugins_path, "https://github.com/sheerun/vim-polyglot");
utils::clone_repo(&editor_plugins_path, "https://github.com/octref/RootIgnore");
utils::clone_repo(&editor_plugins_path, "https://github.com/dense-analysis/ale");
utils::clone_repo(&editor_plugins_path, "https://github.com/arcticicestudio/nord-vim");
println!("Adding editor configuration.");
match fs::write(&editor_configuration_path, "set grepprg=rg\\ --vimgrep\nset grepformat=%f:%l:%c:%m\ncolorscheme nord") {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
}
};
// Application programming (JavaScript)
utils::install_package("node");
// Systems programming (Rust)
utils::install_package("rustup-init");
let rustup_path_check = match Command::new("which").arg("rustup").output() {
Ok(output) => output.stdout,
Err(error) => panic!("There was a problem: {:?}", error),
};
if rustup_path_check.is_empty() {
match Command::new("rustup-init").output() {
Ok(_) => (),
Err(error) => match error.kind() {
ErrorKind::NotFound => panic!("The rustup-init command is missing."),
other_error => panic!("There was a problem: {:?}", other_error),
},
}
} else {
println!("The rustup-init has already been run.")
}
}
pub fn upgrade() {
// Environment (Unix)
println!("Upgrading package manager.");
match Command::new("brew").arg("update").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
utils::upgrade_package("ripgrep");
// Version control (Git)
utils::upgrade_package("git");
// Editor (Vim)
utils::upgrade_package("vim");
println!("Upgrading editor plugins.");
let home_path = match dirs::home_dir() {
Some(directory) => directory,
None => panic!("Cannot find the home directory."),
};
let editor_plugins_path = home_path
.join(".vim")
.join("pack")
.join("plugins")
.join("start");
match fs::read_dir(&editor_plugins_path) {
Ok(paths) => {
for path in paths {
match Command::new("git")
.current_dir(match path {
Ok(path) => path.path(),
Err(error) => panic!("There was a problem: {:?}", error),
})
.arg("pull")
.output()
{
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
}
}
Err(error) => panic!("There was a problem: {:?}", error),
};
// Application programming (JavaScript)
utils::upgrade_package("node");
// Systems programming (Rust)
utils::upgrade_package("rustup-init");
println!("Upgrading systems programming toolchain.");
match Command::new("rustup").arg("update").output() {
Ok(_) => (),
Err(error) => match error.kind() {
ErrorKind::NotFound => panic!("The rustup command is missing."),
other_error => panic!("There was a problem: {:?}", other_error),
},
}
}
pub fn end() {
// Environment (Unix)
println!("Removing package manager artifacts.");
match Command::new("brew").arg("cleanup").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
// Version control (Git)
println!("Making sure all repos are clean.");
let home_path = match dirs::home_dir() {
Some(directory) => directory,
None => panic!("Cannot find the home directory."),
};
let repos_path = home_path.join("repos");
match fs::read_dir(&repos_path) {
Ok(paths) => {
let mut all_repos_clean = true;
for path in paths {
let path = match &path {
Ok(path) => path.path(),
Err(error) => panic!("There was a problem: {:?}", error),
};
let status_check = match Command::new("git")
.current_dir(&path)
.arg("status")
.arg("--porcelain")
.output()
{
Ok(output) => output.stdout,
Err(error) => panic!("There was a problem: {:?}", error),
};
let unpushed_check = match Command::new("git")
.current_dir(&path)
.arg("log")
.arg("@{u}..")
.output()
{
Ok(output) => output.stdout,
Err(error) => panic!("There was a problem: {:?}", error),
};
if !status_check.is_empty() || !unpushed_check.is_empty() {
all_repos_clean = false;
eprintln!("A dirty repo was found: {}", &path.display());
}
}
if !all_repos_clean {
panic!("Repos are dirty.");
}
}
Err(error) => panic!("There was a problem: {:?}", error),
};
// Editor (Vim)
println!("Quitting editor processes.");
match Command::new("killall").arg("vim").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
// Application programming (JavaScript)
println!("Quitting application programming processes.");
match Command::new("killall").arg("node").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
// Systems programming (Rust)
println!("Quitting systems programming processes.");
match Command::new("killall").arg("rls").output() {
Ok(_) => (),
Err(error) => panic!("There was a problem: {:?}", error),
}
}
|
use std::io;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::{future, Future, BoxFuture, Stream};
use tokio_proto::streaming::{Message, Body};
use tokio_service::Service;
use bytes::{BufMut, Bytes, BytesMut};
use admin;
use codec::PacketHeader;
use job::Job;
use packet::PacketMagic;
use queues::{HandleJobStorage, JobQueuePriority, SharedJobStorage};
use worker::{SharedWorkers, Worker, Wake};
use constants::*;
pub type GearmanBody = Body<BytesMut, io::Error>;
pub type GearmanMessage = Message<PacketHeader, GearmanBody>;
fn new_res(ptype: u32, data: BytesMut) -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::RES,
ptype: ptype,
psize: data.len() as u32,
},
Body::from(data))
}
pub struct GearmanService {
pub conn_id: usize,
pub queues: SharedJobStorage,
pub workers: SharedWorkers,
pub worker: Arc<Mutex<Worker>>,
pub job_count: Arc<AtomicUsize>,
}
fn next_field(buf: &mut BytesMut) -> Result<Bytes, io::Error> {
match buf[..].iter().position(|b| *b == b'\0') {
Some(null_pos) => {
let value = buf.split_to(null_pos);
buf.split_to(1);
Ok(value.freeze())
}
None => Err(io::Error::new(io::ErrorKind::Other, "Can't find null")),
}
}
impl GearmanService {
/// Things that don't require a body should use this
fn response_from_header(&self,
header: &PacketHeader)
-> Message<PacketHeader, Body<BytesMut, io::Error>> {
match header.ptype {
ADMIN_VERSION => {
let resp_str = "OK some-rustygear-version\n";
let mut resp_body = BytesMut::with_capacity(resp_str.len());
resp_body.put(&resp_str[..]);
let resp_body = Body::from(resp_body);
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: header.ptype,
psize: resp_str.len() as u32,
},
resp_body)
}
ADMIN_STATUS => admin::admin_command_status(self.queues.clone(), self.workers.clone()),
_ => {
panic!("response_from_header called with invalid ptype: {}",
header.ptype)
}
}
}
pub fn new(conn_id: usize,
queues: SharedJobStorage,
workers: SharedWorkers,
job_count: Arc<AtomicUsize>)
-> GearmanService {
GearmanService {
conn_id: conn_id,
queues: queues.clone(),
worker: Arc::new(Mutex::new((Worker::new()))),
workers: workers.clone(),
job_count: job_count.clone(),
}
}
fn no_response() -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: ADMIN_RESPONSE,
psize: 0,
},
Body::from(BytesMut::new()))
}
fn handle_can_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let workers = self.workers.clone();
let conn_id = self.conn_id;
trace!("handle_can_do");
body.concat2()
.and_then(move |fname| {
let fname = fname.freeze();
debug!("CAN_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.can_do(fname);
workers.clone().wakeup(&mut worker, conn_id);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_cant_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
body.concat2()
.and_then(move |fname| {
let fname = fname.freeze();
debug!("CANT_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.cant_do(&fname);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_grab_job_all(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_all");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(4 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_slice(b"\0");
data.extend(&j.fname);
data.put_slice(b"\0");
data.extend(&j.unique);
data.put_slice(b"\0");
// reducer not implemented
data.put_slice(b"\0");
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_ALL, data)).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, BytesMut::new())).boxed()
})
.boxed()
}
fn handle_pre_sleep(&self) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let ref mut w = worker.lock().unwrap();
self.workers.clone().sleep(w, self.conn_id);
future::finished(Self::no_response()).boxed()
}
fn handle_submit_job(&self,
priority: JobQueuePriority,
wait: bool,
body: GearmanBody)
-> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let conn_id = match wait {
true => Some(self.conn_id),
false => None,
};
let mut workers = self.workers.clone();
let job_count = self.job_count.clone();
body.concat2()
.and_then(move |mut fields| {
let fname = next_field(&mut fields).unwrap();
let unique = next_field(&mut fields).unwrap();
let data = fields.freeze();
let mut add = false;
let handle = match queues.coalesce_unique(&unique, conn_id) {
Some(handle) => handle,
None => {
workers.queue_wake(&fname);
// H:091234567890
let mut handle = BytesMut::with_capacity(12);
let job_num = job_count.fetch_add(1, Ordering::Relaxed);
debug!("job_num = {}", job_num);
handle.extend(format!("H:{:010}", job_num).as_bytes());
add = true;
handle.freeze()
}
};
if add {
let job = Arc::new(Job::new(fname, unique, data, handle.clone()));
info!("Created job {:?}", job);
queues.add_job(job.clone(), priority, conn_id);
}
future::finished(new_res(JOB_CREATED, BytesMut::from(handle)))
})
.boxed()
}
}
impl Service for GearmanService {
type Request = GearmanMessage;
type Response = GearmanMessage;
type Error = io::Error;
type Future = BoxFuture<Self::Response, Self::Error>;
fn call(&self, req: Self::Request) -> Self::Future {
debug!("Got a req {:?}", req);
match req {
Message::WithoutBody(header) => {
match header.ptype {
ADMIN_VERSION | ADMIN_STATUS => {
future::ok(self.response_from_header(&header)).boxed()
}
_ => {
future::err(io::Error::new(io::ErrorKind::Other,
format!("Bodyless packet type = {}",
header.ptype)))
.boxed()
}
}
}
Message::WithBody(header, body) => {
match header.ptype {
SUBMIT_JOB => self.handle_submit_job(PRIORITY_NORMAL, false, body),
SUBMIT_JOB_HIGH => self.handle_submit_job(PRIORITY_HIGH, false, body),
SUBMIT_JOB_LOW => self.handle_submit_job(PRIORITY_LOW, false, body),
SUBMIT_JOB_BG => self.handle_submit_job(PRIORITY_NORMAL, true, body),
SUBMIT_JOB_HIGH_BG => self.handle_submit_job(PRIORITY_HIGH, true, body),
SUBMIT_JOB_LOW_BG => self.handle_submit_job(PRIORITY_LOW, true, body),
PRE_SLEEP => self.handle_pre_sleep(),
CAN_DO => self.handle_can_do(body),
CANT_DO => self.handle_cant_do(body),/*
GRAB_JOB => self.handle_grab_job(),
GRAB_JOB_UNIQ => self.handle_grab_job_uniq(),*/
GRAB_JOB_ALL => self.handle_grab_job_all(body),/*
WORK_COMPLETE => self.handle_work_complete(),
WORK_STATUS | WORK_DATA | WORK_WARNING => self.handle_work_update(),
ECHO_REQ => self.handle_echo_req(&req),*/
_ => {
error!("Unimplemented: {:?} processing packet", header);
future::err(io::Error::new(io::ErrorKind::Other,
format!("Invalid packet type {}", header.ptype)))
.boxed()
}
}
}
}
}
}
Finish GRAB_JOBs
use std::io;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::{future, Future, BoxFuture, Stream};
use tokio_proto::streaming::{Message, Body};
use tokio_service::Service;
use bytes::{BufMut, Bytes, BytesMut};
use admin;
use codec::PacketHeader;
use job::Job;
use packet::PacketMagic;
use queues::{HandleJobStorage, JobQueuePriority, SharedJobStorage};
use worker::{SharedWorkers, Worker, Wake};
use constants::*;
pub type GearmanBody = Body<BytesMut, io::Error>;
pub type GearmanMessage = Message<PacketHeader, GearmanBody>;
fn new_res(ptype: u32, data: BytesMut) -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::RES,
ptype: ptype,
psize: data.len() as u32,
},
Body::from(data))
}
pub struct GearmanService {
pub conn_id: usize,
pub queues: SharedJobStorage,
pub workers: SharedWorkers,
pub worker: Arc<Mutex<Worker>>,
pub job_count: Arc<AtomicUsize>,
}
fn next_field(buf: &mut BytesMut) -> Result<Bytes, io::Error> {
match buf[..].iter().position(|b| *b == b'\0') {
Some(null_pos) => {
let value = buf.split_to(null_pos);
buf.split_to(1);
Ok(value.freeze())
}
None => Err(io::Error::new(io::ErrorKind::Other, "Can't find null")),
}
}
impl GearmanService {
/// Things that don't require a body should use this
fn response_from_header(&self,
header: &PacketHeader)
-> Message<PacketHeader, Body<BytesMut, io::Error>> {
match header.ptype {
ADMIN_VERSION => {
let resp_str = "OK some-rustygear-version\n";
let mut resp_body = BytesMut::with_capacity(resp_str.len());
resp_body.put(&resp_str[..]);
let resp_body = Body::from(resp_body);
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: header.ptype,
psize: resp_str.len() as u32,
},
resp_body)
}
ADMIN_STATUS => admin::admin_command_status(self.queues.clone(), self.workers.clone()),
_ => {
panic!("response_from_header called with invalid ptype: {}",
header.ptype)
}
}
}
pub fn new(conn_id: usize,
queues: SharedJobStorage,
workers: SharedWorkers,
job_count: Arc<AtomicUsize>)
-> GearmanService {
GearmanService {
conn_id: conn_id,
queues: queues.clone(),
worker: Arc::new(Mutex::new((Worker::new()))),
workers: workers.clone(),
job_count: job_count.clone(),
}
}
fn no_response() -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: ADMIN_RESPONSE,
psize: 0,
},
Body::from(BytesMut::new()))
}
fn handle_can_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let workers = self.workers.clone();
let conn_id = self.conn_id;
trace!("handle_can_do");
body.concat2()
.and_then(move |fname| {
let fname = fname.freeze();
debug!("CAN_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.can_do(fname);
workers.clone().wakeup(&mut worker, conn_id);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_cant_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
body.concat2()
.and_then(move |fname| {
let fname = fname.freeze();
debug!("CANT_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.cant_do(&fname);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_grab_job_all(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_all");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(4 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_slice(b"\0");
data.extend(&j.fname);
data.put_slice(b"\0");
data.extend(&j.unique);
data.put_slice(b"\0");
// reducer not implemented
data.put_slice(b"\0");
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_ALL, data)).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, BytesMut::new())).boxed()
})
.boxed()
}
fn handle_grab_job_uniq(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_uniq");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(3 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_slice(b"\0");
data.extend(&j.fname);
data.put_slice(b"\0");
data.extend(&j.unique);
data.put_slice(b"\0");
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_UNIQ, data)).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, BytesMut::new())).boxed()
})
.boxed()
}
fn handle_grab_job(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(2 + j.handle.len() +
j.fname.len() +
j.data.len());
data.extend(&j.handle);
data.put_slice(b"\0");
data.extend(&j.fname);
data.put_slice(b"\0");
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN, data)).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, BytesMut::new())).boxed()
})
.boxed()
}
fn handle_pre_sleep(&self) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let ref mut w = worker.lock().unwrap();
self.workers.clone().sleep(w, self.conn_id);
future::finished(Self::no_response()).boxed()
}
fn handle_submit_job(&self,
priority: JobQueuePriority,
wait: bool,
body: GearmanBody)
-> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let conn_id = match wait {
true => Some(self.conn_id),
false => None,
};
let mut workers = self.workers.clone();
let job_count = self.job_count.clone();
body.concat2()
.and_then(move |mut fields| {
let fname = next_field(&mut fields).unwrap();
let unique = next_field(&mut fields).unwrap();
let data = fields.freeze();
let mut add = false;
let handle = match queues.coalesce_unique(&unique, conn_id) {
Some(handle) => handle,
None => {
workers.queue_wake(&fname);
// H:091234567890
let mut handle = BytesMut::with_capacity(12);
let job_num = job_count.fetch_add(1, Ordering::Relaxed);
debug!("job_num = {}", job_num);
handle.extend(format!("H:{:010}", job_num).as_bytes());
add = true;
handle.freeze()
}
};
if add {
let job = Arc::new(Job::new(fname, unique, data, handle.clone()));
info!("Created job {:?}", job);
queues.add_job(job.clone(), priority, conn_id);
}
future::finished(new_res(JOB_CREATED, BytesMut::from(handle)))
})
.boxed()
}
}
impl Service for GearmanService {
type Request = GearmanMessage;
type Response = GearmanMessage;
type Error = io::Error;
type Future = BoxFuture<Self::Response, Self::Error>;
fn call(&self, req: Self::Request) -> Self::Future {
debug!("Got a req {:?}", req);
match req {
Message::WithoutBody(header) => {
match header.ptype {
ADMIN_VERSION | ADMIN_STATUS => {
future::ok(self.response_from_header(&header)).boxed()
}
_ => {
future::err(io::Error::new(io::ErrorKind::Other,
format!("Bodyless packet type = {}",
header.ptype)))
.boxed()
}
}
}
Message::WithBody(header, body) => {
match header.ptype {
SUBMIT_JOB => self.handle_submit_job(PRIORITY_NORMAL, false, body),
SUBMIT_JOB_HIGH => self.handle_submit_job(PRIORITY_HIGH, false, body),
SUBMIT_JOB_LOW => self.handle_submit_job(PRIORITY_LOW, false, body),
SUBMIT_JOB_BG => self.handle_submit_job(PRIORITY_NORMAL, true, body),
SUBMIT_JOB_HIGH_BG => self.handle_submit_job(PRIORITY_HIGH, true, body),
SUBMIT_JOB_LOW_BG => self.handle_submit_job(PRIORITY_LOW, true, body),
PRE_SLEEP => self.handle_pre_sleep(),
CAN_DO => self.handle_can_do(body),
CANT_DO => self.handle_cant_do(body),
GRAB_JOB => self.handle_grab_job(body),
GRAB_JOB_UNIQ => self.handle_grab_job_uniq(body),
GRAB_JOB_ALL => self.handle_grab_job_all(body),/*
WORK_COMPLETE => self.handle_work_complete(),
WORK_STATUS | WORK_DATA | WORK_WARNING => self.handle_work_update(),
ECHO_REQ => self.handle_echo_req(&req),*/
_ => {
error!("Unimplemented: {:?} processing packet", header);
future::err(io::Error::new(io::ErrorKind::Other,
format!("Invalid packet type {}", header.ptype)))
.boxed()
}
}
}
}
}
}
|
use std::collections::HashMap;
use std::io;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::{future, Future, BoxFuture, Stream, Sink};
use futures::sync::mpsc::{channel, Sender};
use tokio_core::reactor::Remote;
use tokio_proto::streaming::{Message, Body};
use tokio_service::Service;
use bytes::{BufMut, Bytes, BytesMut};
use admin;
use codec::PacketHeader;
use job::Job;
use packet::PacketMagic;
use queues::{HandleJobStorage, JobQueuePriority, SharedJobStorage};
use worker::{SharedWorkers, Worker, Wake};
use constants::*;
pub type GearmanBody = Body<Bytes, io::Error>;
pub type GearmanMessage = Message<PacketHeader, GearmanBody>;
const WAKE_BACKLOG_SIZE: usize = 8;
fn new_res(ptype: u32, data: Bytes) -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::RES,
ptype: ptype,
psize: data.len() as u32,
},
Body::from(data))
}
type JobBodySenders = Arc<Mutex<HashMap<Bytes, Vec<Sender<Result<Bytes, io::Error>>>>>>;
pub struct GearmanService {
pub conn_id: usize,
pub queues: SharedJobStorage,
pub workers: SharedWorkers,
pub worker: Arc<Mutex<Worker>>,
pub job_count: Arc<AtomicUsize>,
pub connections: Arc<Mutex<HashMap<usize, Sender<()>>>>,
job_body_senders: JobBodySenders,
remote: Remote,
}
fn next_field(buf: &mut Bytes) -> Result<Bytes, io::Error> {
match buf[..].iter().position(|b| *b == b'\0') {
Some(null_pos) => {
let value = buf.split_to(null_pos);
buf.split_to(1);
Ok(value)
}
None => Err(io::Error::new(io::ErrorKind::Other, "Can't find null")),
}
}
impl GearmanService {
/// Things that don't require a body should use this
fn response_from_header(&self,
header: &PacketHeader)
-> Message<PacketHeader, Body<Bytes, io::Error>> {
match header.ptype {
ADMIN_VERSION => {
let resp_str = "OK some-rustygear-version\n";
let mut resp_body = BytesMut::with_capacity(resp_str.len());
resp_body.put(&resp_str[..]);
let resp_body = Body::from(resp_body.freeze());
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: header.ptype,
psize: resp_str.len() as u32,
},
resp_body)
}
ADMIN_STATUS => admin::admin_command_status(self.queues.clone(), self.workers.clone()),
_ => {
panic!("response_from_header called with invalid ptype: {}",
header.ptype)
}
}
}
pub fn new(conn_id: usize,
queues: SharedJobStorage,
workers: SharedWorkers,
job_count: Arc<AtomicUsize>,
connections: Arc<Mutex<HashMap<usize, Sender<()>>>>,
job_body_senders: JobBodySenders,
remote: Remote)
-> GearmanService {
GearmanService {
conn_id: conn_id,
queues: queues,
worker: Arc::new(Mutex::new((Worker::new()))),
workers: workers,
job_count: job_count,
connections: connections,
job_body_senders: job_body_senders,
remote: remote,
}
}
fn no_response() -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: ADMIN_RESPONSE,
psize: 0,
},
Body::from(Bytes::new()))
}
fn handle_can_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let workers = self.workers.clone();
let conn_id = self.conn_id;
trace!("handle_can_do");
body.concat2()
.and_then(move |fname| {
debug!("CAN_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.can_do(fname);
workers.clone().wakeup(&mut worker, conn_id);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_cant_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
body.concat2()
.and_then(move |fname| {
debug!("CANT_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.cant_do(&fname);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_grab_job_all(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_all");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(4 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_u8(b'\0');
data.extend(&j.fname);
data.put_u8(b'\0');
data.extend(&j.unique);
data.put_u8(b'\0');
// reducer not implemented
data.put_u8(b'\0');
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_ALL, data.freeze())).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, Bytes::new())).boxed()
})
.boxed()
}
fn handle_grab_job_uniq(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_uniq");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(3 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_u8(b'\0');
data.extend(&j.fname);
data.put_u8(b'\0');
data.extend(&j.unique);
data.put_u8(b'\0');
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_UNIQ, data.freeze()))
.boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, Bytes::new())).boxed()
})
.boxed()
}
fn handle_grab_job(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(2 + j.handle.len() +
j.fname.len() +
j.data.len());
data.extend(&j.handle);
data.put_u8(b'\0');
data.extend(&j.fname);
data.put_u8(b'\0');
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN, data.freeze())).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, Bytes::new())).boxed()
})
.boxed()
}
fn handle_pre_sleep(&self) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let ref mut w = worker.lock().unwrap();
self.workers.clone().sleep(w, self.conn_id);
// When we get woke, send a NOOP
let (tx, rx) = channel(WAKE_BACKLOG_SIZE);
{
let mut connections = self.connections.lock().unwrap();
connections.insert(self.conn_id, tx);
}
let resp = new_res(NOOP, Bytes::new());
// If there are more, they are pointless until the NOOP is queued, and once it is queued,
// the connections hashmap will have dropped the sender, and this future resolving
// should drop the receiver and all of its backed up items.
let connections = self.connections.clone();
let conn_id = self.conn_id;
rx.take(1)
.for_each(move |_| {
{
let mut connections = connections.lock().unwrap();
connections.remove(&conn_id);
}
Ok(())
})
.map_err(move |_| io::Error::new(io::ErrorKind::Other, "receiver error"))
.map(move |_| resp)
.boxed()
}
fn handle_submit_job(&self,
priority: JobQueuePriority,
wait: bool,
body: GearmanBody)
-> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let conn_id = match wait {
true => Some(self.conn_id),
false => None,
};
let mut workers = self.workers.clone();
let job_count = self.job_count.clone();
let connections = self.connections.clone();
let remote = self.remote.clone();
let (tx, response_body) = Body::pair();
let job_body_senders = self.job_body_senders.clone();
let ret = body.concat2()
.and_then(move |mut fields| {
let fname = next_field(&mut fields).unwrap();
let unique = next_field(&mut fields).unwrap();
let mut add = false;
let handle = match queues.coalesce_unique(&unique, conn_id) {
Some(handle) => handle,
None => {
{
let mut connections = connections.lock().unwrap();
for wake in workers.queue_wake(&fname) {
match connections.get_mut(&wake) {
None => {
debug!("No connection found to wake up for conn_id = {}",
wake);
}
Some(tx) => {
let tx = tx.clone();
remote.spawn(move |handle| {
handle.spawn(tx.send(()).then(|res| {
match res {
Ok(_) => {}
Err(e) => error!("Send Error! {:?}", e),
}
Ok(())
}));
Ok(())
});
}
}
}
}
// H:091234567890
let mut handle = BytesMut::with_capacity(12);
let job_num = job_count.fetch_add(1, Ordering::Relaxed);
debug!("job_num = {}", job_num);
handle.extend(format!("H:{:010}", job_num).as_bytes());
add = true;
handle.freeze()
}
};
if add {
let job = Arc::new(Job::new(fname, unique, fields, handle.clone()));
info!("Created job {:?}", job);
queues.add_job(job.clone(), priority, conn_id);
}
tx.send(Ok(handle.clone())).then(move |tx| match tx {
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
Ok(tx) => {
// If we don't store any senders, the sender will be dropped and the rx
// stream should end thus releasing the waiter immediately.
let psize = handle.len() as u32;
if wait {
let mut job_body_senders = job_body_senders.lock().unwrap();
job_body_senders.entry(handle).or_insert(Vec::new()).push(tx);
}
Ok(Message::WithBody(PacketHeader {
magic: PacketMagic::RES,
ptype: JOB_CREATED,
psize: psize,
},
response_body))
}
})
})
.boxed();
ret
}
}
impl Service for GearmanService {
type Request = GearmanMessage;
type Response = GearmanMessage;
type Error = io::Error;
type Future = BoxFuture<Self::Response, Self::Error>;
fn call(&self, req: Self::Request) -> Self::Future {
debug!("Got a req {:?}", req);
match req {
Message::WithoutBody(header) => {
match header.ptype {
ADMIN_VERSION | ADMIN_STATUS => {
future::ok(self.response_from_header(&header)).boxed()
}
_ => {
future::err(io::Error::new(io::ErrorKind::Other,
format!("Bodyless packet type = {}",
header.ptype)))
.boxed()
}
}
}
Message::WithBody(header, body) => {
match header.ptype {
SUBMIT_JOB => self.handle_submit_job(PRIORITY_NORMAL, false, body),
SUBMIT_JOB_HIGH => self.handle_submit_job(PRIORITY_HIGH, false, body),
SUBMIT_JOB_LOW => self.handle_submit_job(PRIORITY_LOW, false, body),
SUBMIT_JOB_BG => self.handle_submit_job(PRIORITY_NORMAL, true, body),
SUBMIT_JOB_HIGH_BG => self.handle_submit_job(PRIORITY_HIGH, true, body),
SUBMIT_JOB_LOW_BG => self.handle_submit_job(PRIORITY_LOW, true, body),
PRE_SLEEP => self.handle_pre_sleep(),
CAN_DO => self.handle_can_do(body),
CANT_DO => self.handle_cant_do(body),
GRAB_JOB => self.handle_grab_job(body),
GRAB_JOB_UNIQ => self.handle_grab_job_uniq(body),
GRAB_JOB_ALL => self.handle_grab_job_all(body),/*
WORK_COMPLETE => self.handle_work_complete(),
WORK_STATUS | WORK_DATA | WORK_WARNING => self.handle_work_update(),
ECHO_REQ => self.handle_echo_req(&req),*/
_ => {
error!("Unimplemented: {:?} processing packet", header);
future::err(io::Error::new(io::ErrorKind::Other,
format!("Invalid packet type {}", header.ptype)))
.boxed()
}
}
}
}
}
}
Sends bodies
use std::collections::HashMap;
use std::io;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::{future, Future, BoxFuture, Stream, Sink};
use futures::sync::mpsc::{channel, Sender};
use tokio_core::reactor::Remote;
use tokio_proto::streaming::{Message, Body};
use tokio_service::Service;
use bytes::{BufMut, Bytes, BytesMut};
use admin;
use codec::PacketHeader;
use job::Job;
use packet::PacketMagic;
use queues::{HandleJobStorage, JobQueuePriority, SharedJobStorage};
use worker::{SharedWorkers, Worker, Wake};
use constants::*;
pub type GearmanBody = Body<Bytes, io::Error>;
pub type GearmanMessage = Message<PacketHeader, GearmanBody>;
const WAKE_BACKLOG_SIZE: usize = 8;
fn new_res(ptype: u32, data: Bytes) -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::RES,
ptype: ptype,
psize: data.len() as u32,
},
Body::from(data))
}
type JobBodySenders = Arc<Mutex<HashMap<Bytes, Vec<Sender<Result<Bytes, io::Error>>>>>>;
pub struct GearmanService {
pub conn_id: usize,
pub queues: SharedJobStorage,
pub workers: SharedWorkers,
pub worker: Arc<Mutex<Worker>>,
pub job_count: Arc<AtomicUsize>,
pub connections: Arc<Mutex<HashMap<usize, Sender<()>>>>,
job_body_senders: JobBodySenders,
remote: Remote,
}
fn next_field(buf: &mut Bytes) -> Result<Bytes, io::Error> {
match buf[..].iter().position(|b| *b == b'\0') {
Some(null_pos) => {
let value = buf.split_to(null_pos);
buf.split_to(1);
Ok(value)
}
None => Err(io::Error::new(io::ErrorKind::Other, "Can't find null")),
}
}
impl GearmanService {
/// Things that don't require a body should use this
fn response_from_header(&self,
header: &PacketHeader)
-> Message<PacketHeader, Body<Bytes, io::Error>> {
match header.ptype {
ADMIN_VERSION => {
let resp_str = "OK some-rustygear-version\n";
let mut resp_body = BytesMut::with_capacity(resp_str.len());
resp_body.put(&resp_str[..]);
let resp_body = Body::from(resp_body.freeze());
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: header.ptype,
psize: resp_str.len() as u32,
},
resp_body)
}
ADMIN_STATUS => admin::admin_command_status(self.queues.clone(), self.workers.clone()),
_ => {
panic!("response_from_header called with invalid ptype: {}",
header.ptype)
}
}
}
pub fn new(conn_id: usize,
queues: SharedJobStorage,
workers: SharedWorkers,
job_count: Arc<AtomicUsize>,
connections: Arc<Mutex<HashMap<usize, Sender<()>>>>,
job_body_senders: JobBodySenders,
remote: Remote)
-> GearmanService {
GearmanService {
conn_id: conn_id,
queues: queues,
worker: Arc::new(Mutex::new((Worker::new()))),
workers: workers,
job_count: job_count,
connections: connections,
job_body_senders: job_body_senders,
remote: remote,
}
}
fn no_response() -> GearmanMessage {
Message::WithBody(PacketHeader {
magic: PacketMagic::TEXT,
ptype: ADMIN_RESPONSE,
psize: 0,
},
Body::from(Bytes::new()))
}
fn handle_can_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let workers = self.workers.clone();
let conn_id = self.conn_id;
trace!("handle_can_do");
body.concat2()
.and_then(move |fname| {
debug!("CAN_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.can_do(fname);
workers.clone().wakeup(&mut worker, conn_id);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_cant_do(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
body.concat2()
.and_then(move |fname| {
debug!("CANT_DO fname = {:?}", fname);
let mut worker = worker.lock().unwrap();
worker.cant_do(&fname);
future::finished(Self::no_response())
})
.boxed()
}
fn handle_grab_job_all(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_all");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(4 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_u8(b'\0');
data.extend(&j.fname);
data.put_u8(b'\0');
data.extend(&j.unique);
data.put_u8(b'\0');
// reducer not implemented
data.put_u8(b'\0');
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_ALL, data.freeze())).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, Bytes::new())).boxed()
})
.boxed()
}
fn handle_grab_job_uniq(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job_uniq");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(3 + j.handle.len() +
j.fname.len() +
j.unique.len() +
j.data.len());
data.extend(&j.handle);
data.put_u8(b'\0');
data.extend(&j.fname);
data.put_u8(b'\0');
data.extend(&j.unique);
data.put_u8(b'\0');
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN_UNIQ, data.freeze()))
.boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, Bytes::new())).boxed()
})
.boxed()
}
fn handle_grab_job(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let worker = self.worker.clone();
trace!("handle_grab_job");
body.concat2()
.and_then(move |_| {
let mut worker = worker.lock().unwrap();
let ref mut worker = worker;
if queues.get_job(worker) {
match worker.job() {
Some(ref j) => {
let mut data = BytesMut::with_capacity(2 + j.handle.len() +
j.fname.len() +
j.data.len());
data.extend(&j.handle);
data.put_u8(b'\0');
data.extend(&j.fname);
data.put_u8(b'\0');
data.extend(&j.data);
return future::finished(new_res(JOB_ASSIGN, data.freeze())).boxed();
}
None => {}
}
};
future::finished(new_res(NO_JOB, Bytes::new())).boxed()
})
.boxed()
}
fn handle_pre_sleep(&self) -> BoxFuture<GearmanMessage, io::Error> {
let worker = self.worker.clone();
let ref mut w = worker.lock().unwrap();
self.workers.clone().sleep(w, self.conn_id);
// When we get woke, send a NOOP
let (tx, rx) = channel(WAKE_BACKLOG_SIZE);
{
let mut connections = self.connections.lock().unwrap();
connections.insert(self.conn_id, tx);
}
let resp = new_res(NOOP, Bytes::new());
// If there are more, they are pointless until the NOOP is queued, and once it is queued,
// the connections hashmap will have dropped the sender, and this future resolving
// should drop the receiver and all of its backed up items.
let connections = self.connections.clone();
let conn_id = self.conn_id;
rx.take(1)
.for_each(move |_| {
{
let mut connections = connections.lock().unwrap();
connections.remove(&conn_id);
}
Ok(())
})
.map_err(move |_| io::Error::new(io::ErrorKind::Other, "receiver error"))
.map(move |_| resp)
.boxed()
}
fn handle_submit_job(&self,
priority: JobQueuePriority,
wait: bool,
body: GearmanBody)
-> BoxFuture<GearmanMessage, io::Error> {
let mut queues = self.queues.clone();
let conn_id = match wait {
true => Some(self.conn_id),
false => None,
};
let mut workers = self.workers.clone();
let job_count = self.job_count.clone();
let connections = self.connections.clone();
let remote = self.remote.clone();
let (tx, response_body) = Body::pair();
let job_body_senders = self.job_body_senders.clone();
let ret = body.concat2()
.and_then(move |mut fields| {
let fname = next_field(&mut fields).unwrap();
let unique = next_field(&mut fields).unwrap();
let mut add = false;
let handle = match queues.coalesce_unique(&unique, conn_id) {
Some(handle) => handle,
None => {
{
let mut connections = connections.lock().unwrap();
for wake in workers.queue_wake(&fname) {
match connections.get_mut(&wake) {
None => {
debug!("No connection found to wake up for conn_id = {}",
wake);
}
Some(tx) => {
let tx = tx.clone();
remote.spawn(move |handle| {
handle.spawn(tx.send(()).then(|res| {
match res {
Ok(_) => {}
Err(e) => error!("Send Error! {:?}", e),
}
Ok(())
}));
Ok(())
});
}
}
}
}
// H:091234567890
let mut handle = BytesMut::with_capacity(12);
let job_num = job_count.fetch_add(1, Ordering::Relaxed);
debug!("job_num = {}", job_num);
handle.extend(format!("H:{:010}", job_num).as_bytes());
add = true;
handle.freeze()
}
};
if add {
let job = Arc::new(Job::new(fname, unique, fields, handle.clone()));
info!("Created job {:?}", job);
queues.add_job(job.clone(), priority, conn_id);
}
tx.send(Ok(handle.clone())).then(move |tx| match tx {
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
Ok(tx) => {
// If we don't store any senders, the sender will be dropped and the rx
// stream should end thus releasing the waiter immediately.
let psize = handle.len() as u32;
if wait {
let mut job_body_senders = job_body_senders.lock().unwrap();
debug!("I just inserting {:?} into {:?}", &handle, &*job_body_senders);
job_body_senders.entry(handle).or_insert(Vec::new()).push(tx);
debug!("I just inserted into {:?}", &*job_body_senders);
}
Ok(Message::WithBody(PacketHeader {
magic: PacketMagic::RES,
ptype: JOB_CREATED,
psize: psize,
},
response_body))
}
})
})
.boxed();
ret
}
fn handle_work_complete(&self, body: GearmanBody) -> BoxFuture<GearmanMessage, io::Error> {
let job_body_senders = self.job_body_senders.clone();
// Search for handle
let mut handle = BytesMut::with_capacity(12).freeze(); // Usual length of handles, 10digits + H:
let mut found_null = false;
let prev_chunks = Arc::new(Mutex::new(Vec::new()));
let body_senders = Arc::new(Mutex::new(None));
let worker = self.worker.clone();
let queues = self.queues.clone();
let remote = self.remote.clone();
body.for_each(move |mut chunk| {
if !found_null {
match chunk[..].iter().position(|b| *b == b'\0') {
Some(null_pos) => {
let value = chunk.split_to(null_pos);
// If there are previous values we need those in the handle too
let prev_chunks = prev_chunks.lock().unwrap();
for pchunk in prev_chunks.iter() {
handle.extend(pchunk);
}
handle.extend(&value);
chunk.split_to(1); // Drop null
info!("Job is complete {:?}", handle);
let mut worker = worker.lock().unwrap();
match worker.job() {
Some(ref mut j) => {
if j.handle != handle {
error!("WORK_COMPLETE received for inactive job handle: {:?}", handle)
}
let mut queues = queues.lock().unwrap();
queues.remove_job(&j.unique);
}
None => {
error!("WORK_COMPLETE received but no active jobs"); // TODO: worker id
}
}
worker.unassign_job();
// Now send this as first body chunk if there are senders
let mut job_body_senders = job_body_senders.lock().unwrap();
// We use remove so the senders get dropped and channels shut down after we
// fall out of scope. If there are no senders, we'll get a None here anyway
let mut body_senders = body_senders.lock().unwrap();
debug!("Looking for {:?} in senders: {:?}", handle, &*job_body_senders);
*body_senders = job_body_senders.remove(&handle);
found_null = true;
}
None => {
let mut prev_chunks = prev_chunks.lock().unwrap();
prev_chunks.push(chunk.clone())
}
}
}
let body_senders = body_senders.lock().unwrap();
match *body_senders {
None => {},
Some(ref body_senders) => {
for sender in body_senders {
let sender = sender.clone();
let chunk = chunk.clone();
debug!("Sending {:?} to a sender: {:?}", chunk, sender);
remote.spawn(move |reactor_handle| {
reactor_handle.spawn(sender.send(Ok(chunk)).map(|_| {}).map_err(|_| {}));
Ok(())
});
}
}
}
Ok(())
})
.map(move |_| { Self::no_response() })
.boxed()
}
}
impl Service for GearmanService {
type Request = GearmanMessage;
type Response = GearmanMessage;
type Error = io::Error;
type Future = BoxFuture<Self::Response, Self::Error>;
fn call(&self, req: Self::Request) -> Self::Future {
debug!("Got a req {:?}", req);
match req {
Message::WithoutBody(header) => {
match header.ptype {
ADMIN_VERSION | ADMIN_STATUS => {
future::ok(self.response_from_header(&header)).boxed()
}
_ => {
future::err(io::Error::new(io::ErrorKind::Other,
format!("Bodyless packet type = {}",
header.ptype)))
.boxed()
}
}
}
Message::WithBody(header, body) => {
match header.ptype {
SUBMIT_JOB => self.handle_submit_job(PRIORITY_NORMAL, true, body),
SUBMIT_JOB_HIGH => self.handle_submit_job(PRIORITY_HIGH, true, body),
SUBMIT_JOB_LOW => self.handle_submit_job(PRIORITY_LOW, true, body),
SUBMIT_JOB_BG => self.handle_submit_job(PRIORITY_NORMAL, false, body),
SUBMIT_JOB_HIGH_BG => self.handle_submit_job(PRIORITY_HIGH, false, body),
SUBMIT_JOB_LOW_BG => self.handle_submit_job(PRIORITY_LOW, false, body),
PRE_SLEEP => self.handle_pre_sleep(),
CAN_DO => self.handle_can_do(body),
CANT_DO => self.handle_cant_do(body),
GRAB_JOB => self.handle_grab_job(body),
GRAB_JOB_UNIQ => self.handle_grab_job_uniq(body),
GRAB_JOB_ALL => self.handle_grab_job_all(body),
WORK_COMPLETE => self.handle_work_complete(body),/*
WORK_STATUS | WORK_DATA | WORK_WARNING => self.handle_work_update(),
ECHO_REQ => self.handle_echo_req(&req),*/
_ => {
error!("Unimplemented: {:?} processing packet", header);
future::err(io::Error::new(io::ErrorKind::Other,
format!("Invalid packet type {}", header.ptype)))
.boxed()
}
}
}
}
}
}
|
use std::fmt;
use std::net::IpAddr;
use SdpLine;
use error::SdpParserError;
use network::{parse_nettype, parse_addrtype, parse_unicast_addr};
#[derive(Clone)]
pub enum SdpAttributeType {
// TODO consolidate these into groups
BundleOnly,
Candidate,
EndOfCandidates,
Extmap,
Fingerprint,
Fmtp,
Group,
IceLite,
IceMismatch,
IceOptions,
IcePwd,
IceUfrag,
Identity,
ImageAttr,
Inactive,
Label,
MaxMessageSize,
MaxPtime,
Mid,
Msid,
MsidSemantic,
Ptime,
Rid,
Recvonly,
Rtcp,
RtcpFb,
RtcpMux,
RtcpRsize,
Rtpmap,
Sctpmap,
SctpPort,
Sendonly,
Sendrecv,
Setup,
Simulcast,
Ssrc,
SsrcGroup,
}
impl fmt::Display for SdpAttributeType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let printable = match *self {
SdpAttributeType::BundleOnly => "Bundle-Only",
SdpAttributeType::Candidate => "Candidate",
SdpAttributeType::EndOfCandidates => "End-Of-Candidates",
SdpAttributeType::Extmap => "Extmap",
SdpAttributeType::Fingerprint => "Fingerprint",
SdpAttributeType::Fmtp => "Fmtp",
SdpAttributeType::Group => "Group",
SdpAttributeType::IceLite => "Ice-Lite",
SdpAttributeType::IceMismatch => "Ice-Mismatch",
SdpAttributeType::IceOptions => "Ice-Options",
SdpAttributeType::IcePwd => "Ice-Pwd",
SdpAttributeType::IceUfrag => "Ice-Ufrag",
SdpAttributeType::Identity => "Identity",
SdpAttributeType::ImageAttr => "Imageattr",
SdpAttributeType::Inactive => "Inactive",
SdpAttributeType::Label => "Label",
SdpAttributeType::MaxMessageSize => "Max-Message-Size",
SdpAttributeType::MaxPtime => "Max-Ptime",
SdpAttributeType::Mid => "Mid",
SdpAttributeType::Msid => "Msid",
SdpAttributeType::MsidSemantic => "Msid-Semantic",
SdpAttributeType::Ptime => "Ptime",
SdpAttributeType::Rid => "Rid",
SdpAttributeType::Recvonly => "Recvonly",
SdpAttributeType::Rtcp => "Rtcp",
SdpAttributeType::RtcpFb => "Rtcp-Fb",
SdpAttributeType::RtcpMux => "Rtcp-Mux",
SdpAttributeType::RtcpRsize => "Rtcp-Rsize",
SdpAttributeType::Rtpmap => "Rtpmap",
SdpAttributeType::Sctpmap => "Sctpmap",
SdpAttributeType::SctpPort => "Sctp-Port",
SdpAttributeType::Sendonly => "Sendonly",
SdpAttributeType::Sendrecv => "Sendrecv",
SdpAttributeType::Setup => "Setup",
SdpAttributeType::Simulcast => "Simulcast",
SdpAttributeType::Ssrc => "Ssrc",
SdpAttributeType::SsrcGroup => "Ssrc-Group",
};
write!(f, "{}", printable)
}
}
#[derive(Clone)]
pub enum SdpAttributeCandidateTransport {
Udp,
Tcp,
}
#[derive(Clone)]
pub enum SdpAttributeCandidateType {
Host,
Srflx,
Prflx,
Relay,
}
#[derive(Clone)]
pub enum SdpAttributeCandidateTcpType {
Active,
Passive,
Simultaneous,
}
#[derive(Clone)]
pub struct SdpAttributeCandidate {
pub foundation: String,
pub component: u32,
pub transport: SdpAttributeCandidateTransport,
pub priority: u64,
pub address: IpAddr,
pub port: u32,
pub c_type: SdpAttributeCandidateType,
pub raddr: Option<IpAddr>,
pub rport: Option<u32>,
pub tcp_type: Option<SdpAttributeCandidateTcpType>,
}
impl SdpAttributeCandidate {
pub fn new(foundation: String,
component: u32,
transport: SdpAttributeCandidateTransport,
priority: u64,
address: IpAddr,
port: u32,
c_type: SdpAttributeCandidateType)
-> SdpAttributeCandidate {
SdpAttributeCandidate {
foundation,
component,
transport,
priority,
address,
port,
c_type,
raddr: None,
rport: None,
tcp_type: None,
}
}
fn set_remote_address(&mut self, ip: IpAddr) {
self.raddr = Some(ip)
}
fn set_remote_port(&mut self, p: u32) {
self.rport = Some(p)
}
fn set_tcp_type(&mut self, t: SdpAttributeCandidateTcpType) {
self.tcp_type = Some(t)
}
}
#[derive(Clone)]
pub struct SdpAttributeSimulcastId {
pub id: String,
pub paused: bool,
}
impl SdpAttributeSimulcastId {
pub fn new(idstr: String) -> SdpAttributeSimulcastId {
if idstr.starts_with('~') {
SdpAttributeSimulcastId {
id: idstr[1..].to_string(),
paused: true,
}
} else {
SdpAttributeSimulcastId {
id: idstr,
paused: false,
}
}
}
}
#[derive(Clone)]
pub struct SdpAttributeSimulcastAlternatives {
pub ids: Vec<SdpAttributeSimulcastId>,
}
impl SdpAttributeSimulcastAlternatives {
pub fn new(idlist: String) -> SdpAttributeSimulcastAlternatives {
SdpAttributeSimulcastAlternatives {
ids: idlist
.split(',')
.map(|x| x.to_string())
.map(SdpAttributeSimulcastId::new)
.collect(),
}
}
}
#[derive(Clone)]
pub struct SdpAttributeSimulcast {
pub send: Vec<SdpAttributeSimulcastAlternatives>,
pub receive: Vec<SdpAttributeSimulcastAlternatives>,
}
impl SdpAttributeSimulcast {
fn parse_ids(&mut self, direction: SdpAttributeDirection, idlist: String) {
let list = idlist
.split(';')
.map(|x| x.to_string())
.map(SdpAttributeSimulcastAlternatives::new)
.collect();
// TODO prevent over-writing existing values
match direction {
SdpAttributeDirection::Recvonly => self.receive = list,
SdpAttributeDirection::Sendonly => self.send = list,
_ => (),
}
}
}
#[derive(Clone)]
pub struct SdpAttributeRtcp {
pub port: u32,
pub unicast_addr: Option<IpAddr>,
}
impl SdpAttributeRtcp {
pub fn new(port: u32) -> SdpAttributeRtcp {
SdpAttributeRtcp {
port,
unicast_addr: None,
}
}
fn set_addr(&mut self, addr: IpAddr) {
self.unicast_addr = Some(addr)
}
}
#[derive(Clone)]
pub struct SdpAttributeRtcpFb {
pub payload_type: u32,
// TODO parse this and use an enum instead?
pub feedback_type: String,
}
#[derive(Clone)]
pub enum SdpAttributeDirection {
Recvonly,
Sendonly,
Sendrecv,
}
#[derive(Clone)]
pub struct SdpAttributeExtmap {
pub id: u32,
pub direction: Option<SdpAttributeDirection>,
pub url: String,
}
#[derive(Clone)]
pub struct SdpAttributeFmtp {
pub payload_type: u32,
pub tokens: Vec<String>,
}
#[derive(Clone)]
pub struct SdpAttributeFingerprint {
// TODO turn the supported hash algorithms into an enum?
pub hash_algorithm: String,
pub fingerprint: String,
}
#[derive(Clone)]
pub struct SdpAttributeSctpmap {
pub port: u32,
pub channels: u32,
}
#[derive(Clone)]
pub enum SdpAttributeGroupSemantic {
LipSynchronization,
FlowIdentification,
SingleReservationFlow,
AlternateNetworkAddressType,
ForwardErrorCorrection,
DecodingDependency,
Bundle,
}
#[derive(Clone)]
pub struct SdpAttributeGroup {
pub semantics: SdpAttributeGroupSemantic,
pub tags: Vec<String>,
}
#[derive(Clone)]
pub struct SdpAttributeMsid {
pub id: String,
pub appdata: Option<String>,
}
#[derive(Clone)]
pub struct SdpAttributeRtpmap {
pub payload_type: u32,
pub codec_name: String,
pub frequency: Option<u32>,
pub channels: Option<u32>,
}
impl SdpAttributeRtpmap {
pub fn new(payload_type: u32, codec_name: String) -> SdpAttributeRtpmap {
SdpAttributeRtpmap {
payload_type,
codec_name,
frequency: None,
channels: None,
}
}
fn set_frequency(&mut self, f: u32) {
self.frequency = Some(f)
}
fn set_channels(&mut self, c: u32) {
self.channels = Some(c)
}
}
#[derive(Clone)]
pub enum SdpAttributeSetup {
Active,
Actpass,
Holdconn,
Passive,
}
#[derive(Clone)]
pub struct SdpAttributeSsrc {
pub id: u32,
pub attribute: Option<String>,
pub value: Option<String>,
}
impl SdpAttributeSsrc {
pub fn new(id: u32) -> SdpAttributeSsrc {
SdpAttributeSsrc {
id,
attribute: None,
value: None,
}
}
fn set_attribute(&mut self, a: &str) {
if a.find(':') == None {
self.attribute = Some(a.to_string());
} else {
let v: Vec<&str> = a.splitn(2, ':').collect();
self.attribute = Some(v[0].to_string());
self.value = Some(v[1].to_string());
}
}
}
#[derive(Clone)]
pub enum SdpAttributeValue {
Str(String),
Int(u64),
Vector(Vec<String>),
Candidate(SdpAttributeCandidate),
Extmap(SdpAttributeExtmap),
Fingerprint(SdpAttributeFingerprint),
Fmtp(SdpAttributeFmtp),
Group(SdpAttributeGroup),
Msid(SdpAttributeMsid),
Rtpmap(SdpAttributeRtpmap),
Rtcp(SdpAttributeRtcp),
Rtcpfb(SdpAttributeRtcpFb),
Sctpmap(SdpAttributeSctpmap),
Setup(SdpAttributeSetup),
Simulcast(SdpAttributeSimulcast),
Ssrc(SdpAttributeSsrc),
}
#[derive(Clone)]
pub struct SdpAttribute {
pub name: SdpAttributeType,
pub value: Option<SdpAttributeValue>,
}
impl SdpAttribute {
pub fn new(name: SdpAttributeType) -> SdpAttribute {
SdpAttribute { name, value: None }
}
pub fn parse_value(&mut self, v: &str) -> Result<(), SdpParserError> {
match self.name {
SdpAttributeType::BundleOnly |
SdpAttributeType::EndOfCandidates |
SdpAttributeType::IceLite |
SdpAttributeType::IceMismatch |
SdpAttributeType::Inactive |
SdpAttributeType::Recvonly |
SdpAttributeType::RtcpMux |
SdpAttributeType::RtcpRsize |
SdpAttributeType::Sendonly |
SdpAttributeType::Sendrecv => {
if !v.is_empty() {
return Err(SdpParserError::Line{
message: "This attribute is not allowed to have a value".to_string(),
line: v.to_string()})
}
},
SdpAttributeType::MaxMessageSize |
SdpAttributeType::MaxPtime |
SdpAttributeType::Ptime => {
self.value = Some(SdpAttributeValue::Int(v.parse::<u64>()?))
},
SdpAttributeType::IcePwd |
SdpAttributeType::IceUfrag |
SdpAttributeType::Identity |
SdpAttributeType::ImageAttr | // TODO implemente if needed
SdpAttributeType::Label |
SdpAttributeType::Mid |
SdpAttributeType::MsidSemantic | // mmusic-msid-16 doesnt have this
SdpAttributeType::Rid |
SdpAttributeType::SsrcGroup => { // not in JSEP any more...
if v.is_empty() {
return Err(SdpParserError::Line{
message: "This attribute is required to have a value".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Str(v.to_string()))
},
SdpAttributeType::Candidate => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() < 8 {
return Err(SdpParserError::Line{
message: "Candidate needs to have minimum eigth tokens".to_string(),
line: v.to_string()})
}
let component = tokens[1].parse::<u32>()?;
let transport = match tokens[2].to_lowercase().as_ref() {
"udp" => SdpAttributeCandidateTransport::Udp,
"tcp" => SdpAttributeCandidateTransport::Tcp,
_ => return Err(SdpParserError::Line{
message: "Unknonw candidate transport value".to_string(),
line: v.to_string()})
};
let priority = tokens[3].parse::<u64>()?;
let address = parse_unicast_addr(tokens[4])?;
let port = tokens[5].parse::<u32>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "ICE candidate port can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
match tokens[6].to_lowercase().as_ref() {
"typ" => (),
_ => return Err(SdpParserError::Line{
message: "Candidate attribute token must be 'typ'".to_string(),
line: v.to_string()})
};
let cand_type = match tokens[7].to_lowercase().as_ref() {
"host" => SdpAttributeCandidateType::Host,
"srflx" => SdpAttributeCandidateType::Srflx,
"prflx" => SdpAttributeCandidateType::Prflx,
"relay" => SdpAttributeCandidateType::Relay,
_ => return Err(SdpParserError::Line{
message: "Unknow candidate type value".to_string(),
line: v.to_string()})
};
let mut cand = SdpAttributeCandidate::new(tokens[0].to_string(),
component,
transport,
priority,
address,
port,
cand_type);
if tokens.len() > 8 {
let mut index = 8;
while tokens.len() > index + 1 {
match tokens[index].to_lowercase().as_ref() {
"raddr" => {
let addr = parse_unicast_addr(tokens[index + 1])?;
cand.set_remote_address(addr);
index += 2;
},
"rport" => {
let port = tokens[index + 1].parse::<u32>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "ICE candidate rport can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
cand.set_remote_port(port);
index += 2;
},
"tcptype" => {
cand.set_tcp_type(match tokens[index + 1].to_lowercase().as_ref() {
"active" => SdpAttributeCandidateTcpType::Active,
"passive" => SdpAttributeCandidateTcpType::Passive,
"so" => SdpAttributeCandidateTcpType::Simultaneous,
_ => return Err(SdpParserError::Line{
message: "Unknown tcptype value in candidate line".to_string(),
line: v.to_string()})
});
index += 2;
},
_ => return Err(SdpParserError::Unsupported{
message: "Uknown candidate extension name".to_string(),
line: v.to_string()})
};
}
}
self.value = Some(SdpAttributeValue::Candidate(cand))
},
SdpAttributeType::Extmap => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Extmap needs to have two tokens".to_string(),
line: v.to_string()})
}
let id: u32;
let mut direction: Option<SdpAttributeDirection> = None;
if tokens[0].find('/') == None {
id = tokens[0].parse::<u32>()?;
} else {
let id_dir: Vec<&str> = tokens[0].splitn(2, '/').collect();
id = id_dir[0].parse::<u32>()?;
direction = Some(match id_dir[1].to_lowercase().as_ref() {
"recvonly" => SdpAttributeDirection::Recvonly,
"sendonly" => SdpAttributeDirection::Sendonly,
"sendrecv" => SdpAttributeDirection::Sendrecv,
_ => return Err(SdpParserError::Line{
message: "Unsupported direction in extmap value".to_string(),
line: v.to_string()}),
})
}
self.value = Some(SdpAttributeValue::Extmap(
SdpAttributeExtmap {
id,
direction,
url: tokens[1].to_string()
}
))
},
SdpAttributeType::Fingerprint => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Fingerprint needs to have two tokens".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Fingerprint(
SdpAttributeFingerprint {
hash_algorithm: tokens[0].to_string(),
fingerprint: tokens[1].to_string()
}
))
},
SdpAttributeType::Fmtp => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Fmtp needs to have two tokens".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Fmtp(
SdpAttributeFmtp {
// TODO check for dynamic PT range
payload_type: tokens[0].parse::<u32>()?,
// TODO this should probably be slit into known tokens
// plus a list of unknown tokens
tokens: v.split(';').map(|x| x.to_string()).collect()
}
))
},
SdpAttributeType::Group => {
let mut tokens = v.split_whitespace();
let semantics = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Group attribute is missing semantics token".to_string(),
line: v.to_string()}),
Some(x) => match x.to_uppercase().as_ref() {
"LS" => SdpAttributeGroupSemantic::LipSynchronization,
"FID" => SdpAttributeGroupSemantic::FlowIdentification,
"SRF" => SdpAttributeGroupSemantic::SingleReservationFlow,
"ANAT" => SdpAttributeGroupSemantic::AlternateNetworkAddressType,
"FEC" => SdpAttributeGroupSemantic::ForwardErrorCorrection,
"DDP" => SdpAttributeGroupSemantic::DecodingDependency,
"BUNDLE" => SdpAttributeGroupSemantic::Bundle,
_ => return Err(SdpParserError::Line{
message: "Unsupported group semantics".to_string(),
line: v.to_string()}),
}
};
self.value = Some(SdpAttributeValue::Group(
SdpAttributeGroup {
semantics,
tags: tokens.map(|x| x.to_string()).collect()
}
))
},
SdpAttributeType::IceOptions => {
if v.is_empty() {
return Err(SdpParserError::Line{
message: "ice-options is required to have a value".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Vector (
v.split_whitespace().map(|x| x.to_string()).collect()))
},
SdpAttributeType::Msid => {
let mut tokens = v.split_whitespace();
let id = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Msid attribute is missing msid-id token".to_string(),
line: v.to_string()}),
Some(x) => x.to_string()
};
let appdata = match tokens.next() {
None => None,
Some(x) => Some(x.to_string())
};
self.value = Some(SdpAttributeValue::Msid(
SdpAttributeMsid {
id,
appdata
}
))
},
SdpAttributeType::Rtcp => {
let mut tokens = v.split_whitespace();
let port = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Rtcp attribute is missing port number".to_string(),
line: v.to_string()}),
Some(x) => x.parse::<u32>()?
};
if port > 65535 {
return Err(SdpParserError::Line{
message: "Rtcp port can only be a bit 16bit number".to_string(),
line: v.to_string()})
};
let mut rtcp = SdpAttributeRtcp::new(port);
match tokens.next() {
None => (),
Some(x) => {
parse_nettype(x)?;
match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Rtcp attribute is missing address type token".to_string(),
line: v.to_string()}),
Some(x) => {
let addrtype = parse_addrtype(x)?;
let addr = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Rtcp attribute is missing ip address token".to_string(),
line: v.to_string()}),
Some(x) => {
let addr = parse_unicast_addr(x)?;
if !addrtype.same_protocol(&addr) {
return Err(SdpParserError::Line {
message: "Failed to parse unicast address attribute.\
addrtype does not match address."
.to_string(),
line: x.to_string()
});
}
addr
},
};
rtcp.set_addr(addr);
},
};
},
};
self.value = Some(SdpAttributeValue::Rtcp(rtcp))
},
SdpAttributeType::RtcpFb => {
let tokens: Vec<&str> = v.splitn(2, ' ').collect();
self.value = Some(SdpAttributeValue::Rtcpfb(
SdpAttributeRtcpFb {
// TODO limit this to dymaic PTs
payload_type: tokens[0].parse::<u32>()?,
feedback_type: tokens[1].to_string()
}
));
},
SdpAttributeType::Rtpmap => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Rtpmap needs to have two tokens".to_string(),
line: v.to_string()})
}
// TODO limit this to dymaic PTs
let payload_type: u32 = tokens[0].parse::<u32>()?;
let split: Vec<&str> = tokens[1].split('/').collect();
if split.len() > 3 {
return Err(SdpParserError::Line{
message: "Rtpmap codec token can max 3 subtokens".to_string(),
line: v.to_string()})
}
let mut rtpmap = SdpAttributeRtpmap::new(payload_type,
split[0].to_string());
if split.len() > 1 {
rtpmap.set_frequency(split[1].parse::<u32>()?);
}
if split.len() > 2 {
rtpmap.set_channels(split[2].parse::<u32>()?);
}
self.value = Some(SdpAttributeValue::Rtpmap(rtpmap))
},
SdpAttributeType::Sctpmap => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 3 {
return Err(SdpParserError::Line{
message: "Sctpmap needs to have three tokens".to_string(),
line: v.to_string()})
}
let port = tokens[0].parse::<u32>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "Sctpmap port can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
if tokens[1].to_lowercase() != "webrtc-datachannel" {
return Err(SdpParserError::Line{
message: "Unsupported sctpmap type token".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Sctpmap(
SdpAttributeSctpmap {
port,
channels: tokens[2].parse::<u32>()?
}
));
},
SdpAttributeType::SctpPort => {
let port = v.parse::<u64>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "Sctpport port can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Int(port))
}
SdpAttributeType::Simulcast => {
let mut tokens = v.split_whitespace();
let mut token = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Simulcast attribute is missing send/recv value".to_string(),
line: v.to_string()}),
Some(x) => x,
};
let mut sc = SdpAttributeSimulcast {
send: Vec::new(),
receive: Vec::new()
};
loop {
let sendrecv = match token.to_lowercase().as_ref() {
"send" => SdpAttributeDirection::Sendonly,
"recv" => SdpAttributeDirection::Recvonly,
_ => return Err(SdpParserError::Line{
message: "Unsupported send/recv value in simulcast attribute".to_string(),
line: v.to_string()}),
};
match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Simulcast attribute is missing id list".to_string(),
line: v.to_string()}),
Some(x) => sc.parse_ids(sendrecv, x.to_string()),
};
token = match tokens.next() {
None => { break; },
Some(x) => x,
};
}
self.value = Some(SdpAttributeValue::Simulcast(sc))
},
SdpAttributeType::Setup => {
self.value = Some(SdpAttributeValue::Setup(
match v.to_lowercase().as_ref() {
"active" => SdpAttributeSetup::Active,
"actpass" => SdpAttributeSetup::Actpass,
"holdconn" => SdpAttributeSetup::Holdconn,
"passive" => SdpAttributeSetup::Passive,
_ => return Err(SdpParserError::Line{
message: "Unsupported setup value".to_string(),
line: v.to_string()}),
}
))
},
SdpAttributeType::Ssrc => {
let mut tokens = v.split_whitespace();
let ssrc_id = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Ssrc attribute is missing ssrc-id value".to_string(),
line: v.to_string()}),
Some(x) => x.parse::<u32>()?
};
let mut ssrc = SdpAttributeSsrc::new(ssrc_id);
match tokens.next() {
None => (),
Some(x) => ssrc.set_attribute(x),
};
self.value = Some(SdpAttributeValue::Ssrc(ssrc))
},
}
Ok(())
}
}
pub fn parse_attribute(value: &str) -> Result<SdpLine, SdpParserError> {
let name: &str;
let mut val: &str = "";
if value.find(':') == None {
name = value;
} else {
let v: Vec<&str> = value.splitn(2, ':').collect();
name = v[0];
val = v[1];
}
let attrtype = match name.to_lowercase().as_ref() {
"bundle-only" => SdpAttributeType::BundleOnly,
"candidate" => SdpAttributeType::Candidate,
"end-of-candidates" => SdpAttributeType::EndOfCandidates,
"extmap" => SdpAttributeType::Extmap,
"fingerprint" => SdpAttributeType::Fingerprint,
"fmtp" => SdpAttributeType::Fmtp,
"group" => SdpAttributeType::Group,
"ice-lite" => SdpAttributeType::IceLite,
"ice-mismatch" => SdpAttributeType::IceMismatch,
"ice-options" => SdpAttributeType::IceOptions,
"ice-pwd" => SdpAttributeType::IcePwd,
"ice-ufrag" => SdpAttributeType::IceUfrag,
"identity" => SdpAttributeType::Identity,
"imageattr" => SdpAttributeType::ImageAttr,
"inactive" => SdpAttributeType::Inactive,
"label" => SdpAttributeType::Label,
"max-message-size" => SdpAttributeType::MaxMessageSize,
"maxptime" => SdpAttributeType::MaxPtime,
"mid" => SdpAttributeType::Mid,
"msid" => SdpAttributeType::Msid,
"msid-semantic" => SdpAttributeType::MsidSemantic,
"ptime" => SdpAttributeType::Ptime,
"rid" => SdpAttributeType::Rid,
"recvonly" => SdpAttributeType::Recvonly,
"rtcp" => SdpAttributeType::Rtcp,
"rtcp-fb" => SdpAttributeType::RtcpFb,
"rtcp-mux" => SdpAttributeType::RtcpMux,
"rtcp-rsize" => SdpAttributeType::RtcpRsize,
"rtpmap" => SdpAttributeType::Rtpmap,
"sctpmap" => SdpAttributeType::Sctpmap,
"sctp-port" => SdpAttributeType::SctpPort,
"sendonly" => SdpAttributeType::Sendonly,
"sendrecv" => SdpAttributeType::Sendrecv,
"setup" => SdpAttributeType::Setup,
"simulcast" => SdpAttributeType::Simulcast,
"ssrc" => SdpAttributeType::Ssrc,
"ssrc-group" => SdpAttributeType::SsrcGroup,
_ => {
return Err(SdpParserError::Unsupported {
message: "unsupported attribute value".to_string(),
line: name.to_string(),
})
}
};
let mut attr = SdpAttribute::new(attrtype);
attr.parse_value(val.trim())?;
/*
println!("attribute: {}, {}",
a.name, a.value.some());
*/
Ok(SdpLine::Attribute(attr))
}
#[test]
fn test_parse_attribute_candidate() {
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ host").is_ok());
assert!(parse_attribute("candidate:foo 1 UDP 2122252543 172.16.156.106 49760 typ host")
.is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 ::1 49760 typ host").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ srflx").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ prflx").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ relay").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype active").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype passive").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype so").is_ok());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1.4 rport 61665").is_ok());
assert!(parse_attribute("candidate:1 1 TCP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1.4 rport 61665 tcptype passive").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ").is_err());
assert!(parse_attribute("candidate:0 foo UDP 2122252543 172.16.156.106 49760 typ host")
.is_err());
assert!(parse_attribute("candidate:0 1 FOO 2122252543 172.16.156.106 49760 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP foo 172.16.156.106 49760 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156 49760 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 70000 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 type host")
.is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ fost").is_err());
// FIXME this should fail without the extra 'foobar' at the end
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host unsupported foobar").is_err());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1 rport 61665").is_err());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype foobar").is_err());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1 rport 61665").is_err());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1.4 rport 70000").is_err());
}
#[test]
fn test_parse_attribute_end_of_candidates() {
assert!(parse_attribute("end-of-candidates").is_ok());
assert!(parse_attribute("end-of-candidates foobar").is_err());
}
#[test]
fn test_parse_attribute_extmap() {
assert!(parse_attribute("extmap:1/sendonly urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_ok());
assert!(parse_attribute("extmap:2/sendrecv urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_ok());
assert!(parse_attribute("extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time")
.is_ok());
assert!(parse_attribute("extmap:a/sendrecv urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_err());
assert!(parse_attribute("extmap:4/unsupported urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_err());
}
#[test]
fn test_parse_attribute_fingerprint() {
assert!(parse_attribute("fingerprint:sha-256 CD:34:D1:62:16:95:7B:B7:EB:74:E2:39:27:97:EB:0B:23:73:AC:BC:BF:2F:E3:91:CB:57:A9:9D:4A:A2:0B:40").is_ok())
}
#[test]
fn test_parse_attribute_fmtp() {
assert!(parse_attribute("fmtp:109 maxplaybackrate=48000;stereo=1;useinbandfec=1").is_ok())
}
#[test]
fn test_parse_attribute_group() {
assert!(parse_attribute("group:LS").is_ok());
assert!(parse_attribute("group:LS 1 2").is_ok());
assert!(parse_attribute("group:BUNDLE sdparta_0 sdparta_1 sdparta_2").is_ok());
assert!(parse_attribute("group:").is_err());
assert!(parse_attribute("group:NEVER_SUPPORTED_SEMANTICS").is_err());
}
#[test]
fn test_parse_attribute_bundle_only() {
assert!(parse_attribute("bundle-only").is_ok());
assert!(parse_attribute("bundle-only foobar").is_err());
}
#[test]
fn test_parse_attribute_ice_lite() {
assert!(parse_attribute("ice-lite").is_ok());
assert!(parse_attribute("ice-lite foobar").is_err());
}
#[test]
fn test_parse_attribute_ice_mismatch() {
assert!(parse_attribute("ice-mismatch").is_ok());
assert!(parse_attribute("ice-mismatch foobar").is_err());
}
#[test]
fn test_parse_attribute_ice_options() {
assert!(parse_attribute("ice-options:trickle").is_ok());
assert!(parse_attribute("ice-options:").is_err());
}
#[test]
fn test_parse_attribute_ice_pwd() {
assert!(parse_attribute("ice-pwd:e3baa26dd2fa5030d881d385f1e36cce").is_ok());
assert!(parse_attribute("ice-pwd:").is_err());
}
#[test]
fn test_parse_attribute_ice_ufrag() {
assert!(parse_attribute("ice-ufrag:58b99ead").is_ok());
assert!(parse_attribute("ice-ufrag:").is_err());
}
#[test]
fn test_parse_attribute_identity() {
assert!(parse_attribute("identity:eyJpZHAiOnsiZG9tYWluIjoiZXhhbXBsZS5vcmciLCJwcm90b2NvbCI6ImJvZ3VzIn0sImFzc2VydGlvbiI6IntcImlkZW50aXR5XCI6XCJib2JAZXhhbXBsZS5vcmdcIixcImNvbnRlbnRzXCI6XCJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3l6XCIsXCJzaWduYXR1cmVcIjpcIjAxMDIwMzA0MDUwNlwifSJ9").is_ok());
assert!(parse_attribute("identity:").is_err());
}
#[test]
fn test_parse_attribute_imageattr() {
assert!(parse_attribute("imageattr:120 send * recv *").is_ok());
assert!(parse_attribute("imageattr:97 send [x=800,y=640,sar=1.1,q=0.6] [x=480,y=320] recv [x=330,y=250]").is_ok());
assert!(parse_attribute("imageattr:97 recv [x=800,y=640,sar=1.1] send [x=330,y=250]").is_ok());
assert!(parse_attribute("imageattr:97 send [x=[480:16:800],y=[320:16:640],par=[1.2-1.3],q=0.6] [x=[176:8:208],y=[144:8:176],par=[1.2-1.3]] recv *").is_ok());
assert!(parse_attribute("imageattr:").is_err());
}
#[test]
fn test_parse_attribute_inactive() {
assert!(parse_attribute("inactive").is_ok());
assert!(parse_attribute("inactive foobar").is_err());
}
#[test]
fn test_parse_attribute_label() {
assert!(parse_attribute("label:1").is_ok());
assert!(parse_attribute("label:foobar").is_ok());
assert!(parse_attribute("label:foobar barfoo").is_ok());
assert!(parse_attribute("label:").is_err());
}
#[test]
fn test_parse_attribute_maxptime() {
assert!(parse_attribute("maxptime:60").is_ok());
assert!(parse_attribute("maxptime:").is_err());
}
#[test]
fn test_parse_attribute_mid() {
assert!(parse_attribute("mid:sdparta_0").is_ok());
assert!(parse_attribute("mid:sdparta_0 sdparta_1 sdparta_2").is_ok());
assert!(parse_attribute("mid:").is_err());
}
#[test]
fn test_parse_attribute_msid() {
assert!(parse_attribute("msid:{5a990edd-0568-ac40-8d97-310fc33f3411}").is_ok());
assert!(parse_attribute("msid:{5a990edd-0568-ac40-8d97-310fc33f3411} {218cfa1c-617d-2249-9997-60929ce4c405}").is_ok());
assert!(parse_attribute("msid:").is_err());
}
#[test]
fn test_parse_attribute_msid_semantics() {
assert!(parse_attribute("msid-semantic:WMS *").is_ok())
}
#[test]
fn test_parse_attribute_ptime() {
assert!(parse_attribute("ptime:30").is_ok());
assert!(parse_attribute("ptime:").is_err());
}
#[test]
fn test_parse_attribute_rid() {
assert!(parse_attribute("rid:foo send").is_ok());
assert!(parse_attribute("rid:foo").is_ok());
assert!(parse_attribute("rid:").is_err());
}
#[test]
fn test_parse_attribute_recvonly() {
assert!(parse_attribute("recvonly").is_ok());
assert!(parse_attribute("recvonly foobar").is_err());
}
#[test]
fn test_parse_attribute_sendonly() {
assert!(parse_attribute("sendonly").is_ok());
assert!(parse_attribute("sendonly foobar").is_err());
}
#[test]
fn test_parse_attribute_sendrecv() {
assert!(parse_attribute("sendrecv").is_ok());
assert!(parse_attribute("sendrecv foobar").is_err());
}
#[test]
fn test_parse_attribute_setup() {
assert!(parse_attribute("setup:active").is_ok());
assert!(parse_attribute("setup:passive").is_ok());
assert!(parse_attribute("setup:actpass").is_ok());
assert!(parse_attribute("setup:holdconn").is_ok());
assert!(parse_attribute("setup:").is_err());
assert!(parse_attribute("setup:foobar").is_err());
}
#[test]
fn test_parse_attribute_rtcp() {
assert!(parse_attribute("rtcp:5000").is_ok());
assert!(parse_attribute("rtcp:9 IN IP4 0.0.0.0").is_ok());
assert!(parse_attribute("rtcp:").is_err());
assert!(parse_attribute("rtcp:70000").is_err());
assert!(parse_attribute("rtcp:9 IN").is_err());
assert!(parse_attribute("rtcp:9 IN IP4").is_err());
assert!(parse_attribute("rtcp:9 IN IP4 ::1").is_err());
}
#[test]
fn test_parse_attribute_rtcp_fb() {
assert!(parse_attribute("rtcp-fb:101 ccm fir").is_ok())
}
#[test]
fn test_parse_attribute_rtcp_mux() {
assert!(parse_attribute("rtcp-mux").is_ok());
assert!(parse_attribute("rtcp-mux foobar").is_err());
}
#[test]
fn test_parse_attribute_rtcp_rsize() {
assert!(parse_attribute("rtcp-rsize").is_ok());
assert!(parse_attribute("rtcp-rsize foobar").is_err());
}
#[test]
fn test_parse_attribute_rtpmap() {
assert!(parse_attribute("rtpmap:109 opus/48000/2").is_ok())
}
#[test]
fn test_parse_attribute_sctpmap() {
assert!(parse_attribute("sctpmap:5000 webrtc-datachannel 256").is_ok());
assert!(parse_attribute("sctpmap:70000 webrtc-datachannel 256").is_err());
assert!(parse_attribute("sctpmap:5000 unsupported 256").is_err());
assert!(parse_attribute("sctpmap:5000 webrtc-datachannel 2a").is_err());
}
#[test]
fn test_parse_attribute_sctp_port() {
assert!(parse_attribute("sctp-port:5000").is_ok());
assert!(parse_attribute("sctp-port:").is_err());
assert!(parse_attribute("sctp-port:70000").is_err());
}
#[test]
fn test_parse_attribute_max_message_size() {
assert!(parse_attribute("max-message-size:1").is_ok());
assert!(parse_attribute("max-message-size:100000").is_ok());
assert!(parse_attribute("max-message-size:4294967297").is_ok());
assert!(parse_attribute("max-message-size:0").is_ok());
assert!(parse_attribute("max-message-size:").is_err());
assert!(parse_attribute("max-message-size:abc").is_err());
}
#[test]
fn test_parse_attribute_simulcast() {
assert!(parse_attribute("simulcast:send 1").is_ok());
assert!(parse_attribute("simulcast:recv test").is_ok());
assert!(parse_attribute("simulcast:recv ~test").is_ok());
assert!(parse_attribute("simulcast:recv test;foo").is_ok());
assert!(parse_attribute("simulcast:recv foo,bar").is_ok());
assert!(parse_attribute("simulcast:recv foo,bar;test").is_ok());
assert!(parse_attribute("simulcast:recv 1;4,5 send 6;7").is_ok());
assert!(parse_attribute("simulcast:send 1,2,3;~4,~5 recv 6;~7,~8").is_ok());
// old draft 03 notation used by Firefox 55
assert!(parse_attribute("simulcast: send rid=foo;bar").is_ok());
assert!(parse_attribute("simulcast:").is_err());
assert!(parse_attribute("simulcast:send").is_err());
assert!(parse_attribute("simulcast:foobar 1").is_err());
assert!(parse_attribute("simulcast:send 1 foobar 2").is_err());
}
#[test]
fn test_parse_attribute_ssrc() {
assert!(parse_attribute("ssrc:2655508255").is_ok());
assert!(parse_attribute("ssrc:2655508255 foo").is_ok());
assert!(parse_attribute("ssrc:2655508255 cname:{735484ea-4f6c-f74a-bd66-7425f8476c2e}")
.is_ok());
assert!(parse_attribute("ssrc:").is_err());
assert!(parse_attribute("ssrc:foo").is_err());
}
#[test]
fn test_parse_attribute_ssrc_group() {
assert!(parse_attribute("ssrc-group:FID 3156517279 2673335628").is_ok())
}
#[test]
fn test_parse_unknown_attribute() {
assert!(parse_attribute("unknown").is_err())
}
added remote-candidates attribute parser
use std::fmt;
use std::net::IpAddr;
use SdpLine;
use error::SdpParserError;
use network::{parse_nettype, parse_addrtype, parse_unicast_addr};
#[derive(Clone)]
pub enum SdpAttributeType {
// TODO consolidate these into groups
BundleOnly,
Candidate,
EndOfCandidates,
Extmap,
Fingerprint,
Fmtp,
Group,
IceLite,
IceMismatch,
IceOptions,
IcePwd,
IceUfrag,
Identity,
ImageAttr,
Inactive,
Label,
MaxMessageSize,
MaxPtime,
Mid,
Msid,
MsidSemantic,
Ptime,
Rid,
Recvonly,
RemoteCandidate,
Rtcp,
RtcpFb,
RtcpMux,
RtcpRsize,
Rtpmap,
Sctpmap,
SctpPort,
Sendonly,
Sendrecv,
Setup,
Simulcast,
Ssrc,
SsrcGroup,
}
impl fmt::Display for SdpAttributeType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let printable = match *self {
SdpAttributeType::BundleOnly => "Bundle-Only",
SdpAttributeType::Candidate => "Candidate",
SdpAttributeType::EndOfCandidates => "End-Of-Candidates",
SdpAttributeType::Extmap => "Extmap",
SdpAttributeType::Fingerprint => "Fingerprint",
SdpAttributeType::Fmtp => "Fmtp",
SdpAttributeType::Group => "Group",
SdpAttributeType::IceLite => "Ice-Lite",
SdpAttributeType::IceMismatch => "Ice-Mismatch",
SdpAttributeType::IceOptions => "Ice-Options",
SdpAttributeType::IcePwd => "Ice-Pwd",
SdpAttributeType::IceUfrag => "Ice-Ufrag",
SdpAttributeType::Identity => "Identity",
SdpAttributeType::ImageAttr => "Imageattr",
SdpAttributeType::Inactive => "Inactive",
SdpAttributeType::Label => "Label",
SdpAttributeType::MaxMessageSize => "Max-Message-Size",
SdpAttributeType::MaxPtime => "Max-Ptime",
SdpAttributeType::Mid => "Mid",
SdpAttributeType::Msid => "Msid",
SdpAttributeType::MsidSemantic => "Msid-Semantic",
SdpAttributeType::Ptime => "Ptime",
SdpAttributeType::Rid => "Rid",
SdpAttributeType::Recvonly => "Recvonly",
SdpAttributeType::RemoteCandidate => "RemoteCandidate",
SdpAttributeType::Rtcp => "Rtcp",
SdpAttributeType::RtcpFb => "Rtcp-Fb",
SdpAttributeType::RtcpMux => "Rtcp-Mux",
SdpAttributeType::RtcpRsize => "Rtcp-Rsize",
SdpAttributeType::Rtpmap => "Rtpmap",
SdpAttributeType::Sctpmap => "Sctpmap",
SdpAttributeType::SctpPort => "Sctp-Port",
SdpAttributeType::Sendonly => "Sendonly",
SdpAttributeType::Sendrecv => "Sendrecv",
SdpAttributeType::Setup => "Setup",
SdpAttributeType::Simulcast => "Simulcast",
SdpAttributeType::Ssrc => "Ssrc",
SdpAttributeType::SsrcGroup => "Ssrc-Group",
};
write!(f, "{}", printable)
}
}
#[derive(Clone)]
pub enum SdpAttributeCandidateTransport {
Udp,
Tcp,
}
#[derive(Clone)]
pub enum SdpAttributeCandidateType {
Host,
Srflx,
Prflx,
Relay,
}
#[derive(Clone)]
pub enum SdpAttributeCandidateTcpType {
Active,
Passive,
Simultaneous,
}
#[derive(Clone)]
pub struct SdpAttributeCandidate {
pub foundation: String,
pub component: u32,
pub transport: SdpAttributeCandidateTransport,
pub priority: u64,
pub address: IpAddr,
pub port: u32,
pub c_type: SdpAttributeCandidateType,
pub raddr: Option<IpAddr>,
pub rport: Option<u32>,
pub tcp_type: Option<SdpAttributeCandidateTcpType>,
}
impl SdpAttributeCandidate {
pub fn new(foundation: String,
component: u32,
transport: SdpAttributeCandidateTransport,
priority: u64,
address: IpAddr,
port: u32,
c_type: SdpAttributeCandidateType)
-> SdpAttributeCandidate {
SdpAttributeCandidate {
foundation,
component,
transport,
priority,
address,
port,
c_type,
raddr: None,
rport: None,
tcp_type: None,
}
}
fn set_remote_address(&mut self, ip: IpAddr) {
self.raddr = Some(ip)
}
fn set_remote_port(&mut self, p: u32) {
self.rport = Some(p)
}
fn set_tcp_type(&mut self, t: SdpAttributeCandidateTcpType) {
self.tcp_type = Some(t)
}
}
#[derive(Clone)]
pub struct SdpAttributeRemoteCandidate {
pub component: u32,
pub address: IpAddr,
pub port: u32,
}
#[derive(Clone)]
pub struct SdpAttributeSimulcastId {
pub id: String,
pub paused: bool,
}
impl SdpAttributeSimulcastId {
pub fn new(idstr: String) -> SdpAttributeSimulcastId {
if idstr.starts_with('~') {
SdpAttributeSimulcastId {
id: idstr[1..].to_string(),
paused: true,
}
} else {
SdpAttributeSimulcastId {
id: idstr,
paused: false,
}
}
}
}
#[derive(Clone)]
pub struct SdpAttributeSimulcastAlternatives {
pub ids: Vec<SdpAttributeSimulcastId>,
}
impl SdpAttributeSimulcastAlternatives {
pub fn new(idlist: String) -> SdpAttributeSimulcastAlternatives {
SdpAttributeSimulcastAlternatives {
ids: idlist
.split(',')
.map(|x| x.to_string())
.map(SdpAttributeSimulcastId::new)
.collect(),
}
}
}
#[derive(Clone)]
pub struct SdpAttributeSimulcast {
pub send: Vec<SdpAttributeSimulcastAlternatives>,
pub receive: Vec<SdpAttributeSimulcastAlternatives>,
}
impl SdpAttributeSimulcast {
fn parse_ids(&mut self, direction: SdpAttributeDirection, idlist: String) {
let list = idlist
.split(';')
.map(|x| x.to_string())
.map(SdpAttributeSimulcastAlternatives::new)
.collect();
// TODO prevent over-writing existing values
match direction {
SdpAttributeDirection::Recvonly => self.receive = list,
SdpAttributeDirection::Sendonly => self.send = list,
_ => (),
}
}
}
#[derive(Clone)]
pub struct SdpAttributeRtcp {
pub port: u32,
pub unicast_addr: Option<IpAddr>,
}
impl SdpAttributeRtcp {
pub fn new(port: u32) -> SdpAttributeRtcp {
SdpAttributeRtcp {
port,
unicast_addr: None,
}
}
fn set_addr(&mut self, addr: IpAddr) {
self.unicast_addr = Some(addr)
}
}
#[derive(Clone)]
pub struct SdpAttributeRtcpFb {
pub payload_type: u32,
// TODO parse this and use an enum instead?
pub feedback_type: String,
}
#[derive(Clone)]
pub enum SdpAttributeDirection {
Recvonly,
Sendonly,
Sendrecv,
}
#[derive(Clone)]
pub struct SdpAttributeExtmap {
pub id: u32,
pub direction: Option<SdpAttributeDirection>,
pub url: String,
}
#[derive(Clone)]
pub struct SdpAttributeFmtp {
pub payload_type: u32,
pub tokens: Vec<String>,
}
#[derive(Clone)]
pub struct SdpAttributeFingerprint {
// TODO turn the supported hash algorithms into an enum?
pub hash_algorithm: String,
pub fingerprint: String,
}
#[derive(Clone)]
pub struct SdpAttributeSctpmap {
pub port: u32,
pub channels: u32,
}
#[derive(Clone)]
pub enum SdpAttributeGroupSemantic {
LipSynchronization,
FlowIdentification,
SingleReservationFlow,
AlternateNetworkAddressType,
ForwardErrorCorrection,
DecodingDependency,
Bundle,
}
#[derive(Clone)]
pub struct SdpAttributeGroup {
pub semantics: SdpAttributeGroupSemantic,
pub tags: Vec<String>,
}
#[derive(Clone)]
pub struct SdpAttributeMsid {
pub id: String,
pub appdata: Option<String>,
}
#[derive(Clone)]
pub struct SdpAttributeRtpmap {
pub payload_type: u32,
pub codec_name: String,
pub frequency: Option<u32>,
pub channels: Option<u32>,
}
impl SdpAttributeRtpmap {
pub fn new(payload_type: u32, codec_name: String) -> SdpAttributeRtpmap {
SdpAttributeRtpmap {
payload_type,
codec_name,
frequency: None,
channels: None,
}
}
fn set_frequency(&mut self, f: u32) {
self.frequency = Some(f)
}
fn set_channels(&mut self, c: u32) {
self.channels = Some(c)
}
}
#[derive(Clone)]
pub enum SdpAttributeSetup {
Active,
Actpass,
Holdconn,
Passive,
}
#[derive(Clone)]
pub struct SdpAttributeSsrc {
pub id: u32,
pub attribute: Option<String>,
pub value: Option<String>,
}
impl SdpAttributeSsrc {
pub fn new(id: u32) -> SdpAttributeSsrc {
SdpAttributeSsrc {
id,
attribute: None,
value: None,
}
}
fn set_attribute(&mut self, a: &str) {
if a.find(':') == None {
self.attribute = Some(a.to_string());
} else {
let v: Vec<&str> = a.splitn(2, ':').collect();
self.attribute = Some(v[0].to_string());
self.value = Some(v[1].to_string());
}
}
}
#[derive(Clone)]
pub enum SdpAttributeValue {
Str(String),
Int(u64),
Vector(Vec<String>),
Candidate(SdpAttributeCandidate),
Extmap(SdpAttributeExtmap),
Fingerprint(SdpAttributeFingerprint),
Fmtp(SdpAttributeFmtp),
Group(SdpAttributeGroup),
Msid(SdpAttributeMsid),
RemoteCandidate(SdpAttributeRemoteCandidate),
Rtpmap(SdpAttributeRtpmap),
Rtcp(SdpAttributeRtcp),
Rtcpfb(SdpAttributeRtcpFb),
Sctpmap(SdpAttributeSctpmap),
Setup(SdpAttributeSetup),
Simulcast(SdpAttributeSimulcast),
Ssrc(SdpAttributeSsrc),
}
#[derive(Clone)]
pub struct SdpAttribute {
pub name: SdpAttributeType,
pub value: Option<SdpAttributeValue>,
}
impl SdpAttribute {
pub fn new(name: SdpAttributeType) -> SdpAttribute {
SdpAttribute { name, value: None }
}
pub fn parse_value(&mut self, v: &str) -> Result<(), SdpParserError> {
match self.name {
SdpAttributeType::BundleOnly |
SdpAttributeType::EndOfCandidates |
SdpAttributeType::IceLite |
SdpAttributeType::IceMismatch |
SdpAttributeType::Inactive |
SdpAttributeType::Recvonly |
SdpAttributeType::RtcpMux |
SdpAttributeType::RtcpRsize |
SdpAttributeType::Sendonly |
SdpAttributeType::Sendrecv => {
if !v.is_empty() {
return Err(SdpParserError::Line{
message: "This attribute is not allowed to have a value".to_string(),
line: v.to_string()})
}
},
SdpAttributeType::MaxMessageSize |
SdpAttributeType::MaxPtime |
SdpAttributeType::Ptime => {
self.value = Some(SdpAttributeValue::Int(v.parse::<u64>()?))
},
SdpAttributeType::IcePwd |
SdpAttributeType::IceUfrag |
SdpAttributeType::Identity |
SdpAttributeType::ImageAttr | // TODO implemente if needed
SdpAttributeType::Label |
SdpAttributeType::Mid |
SdpAttributeType::MsidSemantic | // mmusic-msid-16 doesnt have this
SdpAttributeType::Rid |
SdpAttributeType::SsrcGroup => { // not in JSEP any more...
if v.is_empty() {
return Err(SdpParserError::Line{
message: "This attribute is required to have a value".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Str(v.to_string()))
},
SdpAttributeType::Candidate => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() < 8 {
return Err(SdpParserError::Line{
message: "Candidate needs to have minimum eigth tokens".to_string(),
line: v.to_string()})
}
let component = tokens[1].parse::<u32>()?;
let transport = match tokens[2].to_lowercase().as_ref() {
"udp" => SdpAttributeCandidateTransport::Udp,
"tcp" => SdpAttributeCandidateTransport::Tcp,
_ => return Err(SdpParserError::Line{
message: "Unknonw candidate transport value".to_string(),
line: v.to_string()})
};
let priority = tokens[3].parse::<u64>()?;
let address = parse_unicast_addr(tokens[4])?;
let port = tokens[5].parse::<u32>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "ICE candidate port can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
match tokens[6].to_lowercase().as_ref() {
"typ" => (),
_ => return Err(SdpParserError::Line{
message: "Candidate attribute token must be 'typ'".to_string(),
line: v.to_string()})
};
let cand_type = match tokens[7].to_lowercase().as_ref() {
"host" => SdpAttributeCandidateType::Host,
"srflx" => SdpAttributeCandidateType::Srflx,
"prflx" => SdpAttributeCandidateType::Prflx,
"relay" => SdpAttributeCandidateType::Relay,
_ => return Err(SdpParserError::Line{
message: "Unknow candidate type value".to_string(),
line: v.to_string()})
};
let mut cand = SdpAttributeCandidate::new(tokens[0].to_string(),
component,
transport,
priority,
address,
port,
cand_type);
if tokens.len() > 8 {
let mut index = 8;
while tokens.len() > index + 1 {
match tokens[index].to_lowercase().as_ref() {
"raddr" => {
let addr = parse_unicast_addr(tokens[index + 1])?;
cand.set_remote_address(addr);
index += 2;
},
"rport" => {
let port = tokens[index + 1].parse::<u32>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "ICE candidate rport can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
cand.set_remote_port(port);
index += 2;
},
"tcptype" => {
cand.set_tcp_type(match tokens[index + 1].to_lowercase().as_ref() {
"active" => SdpAttributeCandidateTcpType::Active,
"passive" => SdpAttributeCandidateTcpType::Passive,
"so" => SdpAttributeCandidateTcpType::Simultaneous,
_ => return Err(SdpParserError::Line{
message: "Unknown tcptype value in candidate line".to_string(),
line: v.to_string()})
});
index += 2;
},
_ => return Err(SdpParserError::Unsupported{
message: "Uknown candidate extension name".to_string(),
line: v.to_string()})
};
}
}
self.value = Some(SdpAttributeValue::Candidate(cand))
},
SdpAttributeType::Extmap => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Extmap needs to have two tokens".to_string(),
line: v.to_string()})
}
let id: u32;
let mut direction: Option<SdpAttributeDirection> = None;
if tokens[0].find('/') == None {
id = tokens[0].parse::<u32>()?;
} else {
let id_dir: Vec<&str> = tokens[0].splitn(2, '/').collect();
id = id_dir[0].parse::<u32>()?;
direction = Some(match id_dir[1].to_lowercase().as_ref() {
"recvonly" => SdpAttributeDirection::Recvonly,
"sendonly" => SdpAttributeDirection::Sendonly,
"sendrecv" => SdpAttributeDirection::Sendrecv,
_ => return Err(SdpParserError::Line{
message: "Unsupported direction in extmap value".to_string(),
line: v.to_string()}),
})
}
self.value = Some(SdpAttributeValue::Extmap(
SdpAttributeExtmap {
id,
direction,
url: tokens[1].to_string()
}
))
},
SdpAttributeType::Fingerprint => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Fingerprint needs to have two tokens".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Fingerprint(
SdpAttributeFingerprint {
hash_algorithm: tokens[0].to_string(),
fingerprint: tokens[1].to_string()
}
))
},
SdpAttributeType::Fmtp => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Fmtp needs to have two tokens".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Fmtp(
SdpAttributeFmtp {
// TODO check for dynamic PT range
payload_type: tokens[0].parse::<u32>()?,
// TODO this should probably be slit into known tokens
// plus a list of unknown tokens
tokens: v.split(';').map(|x| x.to_string()).collect()
}
))
},
SdpAttributeType::Group => {
let mut tokens = v.split_whitespace();
let semantics = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Group attribute is missing semantics token".to_string(),
line: v.to_string()}),
Some(x) => match x.to_uppercase().as_ref() {
"LS" => SdpAttributeGroupSemantic::LipSynchronization,
"FID" => SdpAttributeGroupSemantic::FlowIdentification,
"SRF" => SdpAttributeGroupSemantic::SingleReservationFlow,
"ANAT" => SdpAttributeGroupSemantic::AlternateNetworkAddressType,
"FEC" => SdpAttributeGroupSemantic::ForwardErrorCorrection,
"DDP" => SdpAttributeGroupSemantic::DecodingDependency,
"BUNDLE" => SdpAttributeGroupSemantic::Bundle,
_ => return Err(SdpParserError::Line{
message: "Unsupported group semantics".to_string(),
line: v.to_string()}),
}
};
self.value = Some(SdpAttributeValue::Group(
SdpAttributeGroup {
semantics,
tags: tokens.map(|x| x.to_string()).collect()
}
))
},
SdpAttributeType::IceOptions => {
if v.is_empty() {
return Err(SdpParserError::Line{
message: "ice-options is required to have a value".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Vector (
v.split_whitespace().map(|x| x.to_string()).collect()))
},
SdpAttributeType::Msid => {
let mut tokens = v.split_whitespace();
let id = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Msid attribute is missing msid-id token".to_string(),
line: v.to_string()}),
Some(x) => x.to_string()
};
let appdata = match tokens.next() {
None => None,
Some(x) => Some(x.to_string())
};
self.value = Some(SdpAttributeValue::Msid(
SdpAttributeMsid {
id,
appdata
}
))
},
SdpAttributeType::RemoteCandidate => {
let mut tokens = v.split_whitespace();
let component = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Remote-candidate attribute is missing component ID".to_string(),
line: v.to_string()}),
Some(x) => x.parse::<u32>()?
};
let address = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Remote-candidate attribute is missing connection address".to_string(),
line: v.to_string()}),
Some(x) => parse_unicast_addr(x)?
};
let port = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Remote-candidate attribute is missing port number".to_string(),
line: v.to_string()}),
Some(x) => x.parse::<u32>()?
};
if port > 65535 {
return Err(SdpParserError::Line{
message: "Remote-candidate port can only be a bit 16bit number".to_string(),
line: v.to_string()})
};
self.value = Some(SdpAttributeValue::RemoteCandidate(
SdpAttributeRemoteCandidate {
component,
address,
port
}
))
}
SdpAttributeType::Rtcp => {
let mut tokens = v.split_whitespace();
let port = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Rtcp attribute is missing port number".to_string(),
line: v.to_string()}),
Some(x) => x.parse::<u32>()?
};
if port > 65535 {
return Err(SdpParserError::Line{
message: "Rtcp port can only be a bit 16bit number".to_string(),
line: v.to_string()})
};
let mut rtcp = SdpAttributeRtcp::new(port);
match tokens.next() {
None => (),
Some(x) => {
parse_nettype(x)?;
match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Rtcp attribute is missing address type token".to_string(),
line: v.to_string()}),
Some(x) => {
let addrtype = parse_addrtype(x)?;
let addr = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Rtcp attribute is missing ip address token".to_string(),
line: v.to_string()}),
Some(x) => {
let addr = parse_unicast_addr(x)?;
if !addrtype.same_protocol(&addr) {
return Err(SdpParserError::Line {
message: "Failed to parse unicast address attribute.\
addrtype does not match address."
.to_string(),
line: x.to_string()
});
}
addr
},
};
rtcp.set_addr(addr);
},
};
},
};
self.value = Some(SdpAttributeValue::Rtcp(rtcp))
},
SdpAttributeType::RtcpFb => {
let tokens: Vec<&str> = v.splitn(2, ' ').collect();
self.value = Some(SdpAttributeValue::Rtcpfb(
SdpAttributeRtcpFb {
// TODO limit this to dymaic PTs
payload_type: tokens[0].parse::<u32>()?,
feedback_type: tokens[1].to_string()
}
));
},
SdpAttributeType::Rtpmap => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 2 {
return Err(SdpParserError::Line{
message: "Rtpmap needs to have two tokens".to_string(),
line: v.to_string()})
}
// TODO limit this to dymaic PTs
let payload_type: u32 = tokens[0].parse::<u32>()?;
let split: Vec<&str> = tokens[1].split('/').collect();
if split.len() > 3 {
return Err(SdpParserError::Line{
message: "Rtpmap codec token can max 3 subtokens".to_string(),
line: v.to_string()})
}
let mut rtpmap = SdpAttributeRtpmap::new(payload_type,
split[0].to_string());
if split.len() > 1 {
rtpmap.set_frequency(split[1].parse::<u32>()?);
}
if split.len() > 2 {
rtpmap.set_channels(split[2].parse::<u32>()?);
}
self.value = Some(SdpAttributeValue::Rtpmap(rtpmap))
},
SdpAttributeType::Sctpmap => {
let tokens: Vec<&str> = v.split_whitespace().collect();
if tokens.len() != 3 {
return Err(SdpParserError::Line{
message: "Sctpmap needs to have three tokens".to_string(),
line: v.to_string()})
}
let port = tokens[0].parse::<u32>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "Sctpmap port can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
if tokens[1].to_lowercase() != "webrtc-datachannel" {
return Err(SdpParserError::Line{
message: "Unsupported sctpmap type token".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Sctpmap(
SdpAttributeSctpmap {
port,
channels: tokens[2].parse::<u32>()?
}
));
},
SdpAttributeType::SctpPort => {
let port = v.parse::<u64>()?;
if port > 65535 {
return Err(SdpParserError::Line{
message: "Sctpport port can only be a bit 16bit number".to_string(),
line: v.to_string()})
}
self.value = Some(SdpAttributeValue::Int(port))
}
SdpAttributeType::Simulcast => {
let mut tokens = v.split_whitespace();
let mut token = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Simulcast attribute is missing send/recv value".to_string(),
line: v.to_string()}),
Some(x) => x,
};
let mut sc = SdpAttributeSimulcast {
send: Vec::new(),
receive: Vec::new()
};
loop {
let sendrecv = match token.to_lowercase().as_ref() {
"send" => SdpAttributeDirection::Sendonly,
"recv" => SdpAttributeDirection::Recvonly,
_ => return Err(SdpParserError::Line{
message: "Unsupported send/recv value in simulcast attribute".to_string(),
line: v.to_string()}),
};
match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Simulcast attribute is missing id list".to_string(),
line: v.to_string()}),
Some(x) => sc.parse_ids(sendrecv, x.to_string()),
};
token = match tokens.next() {
None => { break; },
Some(x) => x,
};
}
self.value = Some(SdpAttributeValue::Simulcast(sc))
},
SdpAttributeType::Setup => {
self.value = Some(SdpAttributeValue::Setup(
match v.to_lowercase().as_ref() {
"active" => SdpAttributeSetup::Active,
"actpass" => SdpAttributeSetup::Actpass,
"holdconn" => SdpAttributeSetup::Holdconn,
"passive" => SdpAttributeSetup::Passive,
_ => return Err(SdpParserError::Line{
message: "Unsupported setup value".to_string(),
line: v.to_string()}),
}
))
},
SdpAttributeType::Ssrc => {
let mut tokens = v.split_whitespace();
let ssrc_id = match tokens.next() {
None => return Err(SdpParserError::Line{
message: "Ssrc attribute is missing ssrc-id value".to_string(),
line: v.to_string()}),
Some(x) => x.parse::<u32>()?
};
let mut ssrc = SdpAttributeSsrc::new(ssrc_id);
match tokens.next() {
None => (),
Some(x) => ssrc.set_attribute(x),
};
self.value = Some(SdpAttributeValue::Ssrc(ssrc))
},
}
Ok(())
}
}
pub fn parse_attribute(value: &str) -> Result<SdpLine, SdpParserError> {
let name: &str;
let mut val: &str = "";
if value.find(':') == None {
name = value;
} else {
let v: Vec<&str> = value.splitn(2, ':').collect();
name = v[0];
val = v[1];
}
let attrtype = match name.to_lowercase().as_ref() {
"bundle-only" => SdpAttributeType::BundleOnly,
"candidate" => SdpAttributeType::Candidate,
"end-of-candidates" => SdpAttributeType::EndOfCandidates,
"extmap" => SdpAttributeType::Extmap,
"fingerprint" => SdpAttributeType::Fingerprint,
"fmtp" => SdpAttributeType::Fmtp,
"group" => SdpAttributeType::Group,
"ice-lite" => SdpAttributeType::IceLite,
"ice-mismatch" => SdpAttributeType::IceMismatch,
"ice-options" => SdpAttributeType::IceOptions,
"ice-pwd" => SdpAttributeType::IcePwd,
"ice-ufrag" => SdpAttributeType::IceUfrag,
"identity" => SdpAttributeType::Identity,
"imageattr" => SdpAttributeType::ImageAttr,
"inactive" => SdpAttributeType::Inactive,
"label" => SdpAttributeType::Label,
"max-message-size" => SdpAttributeType::MaxMessageSize,
"maxptime" => SdpAttributeType::MaxPtime,
"mid" => SdpAttributeType::Mid,
"msid" => SdpAttributeType::Msid,
"msid-semantic" => SdpAttributeType::MsidSemantic,
"ptime" => SdpAttributeType::Ptime,
"rid" => SdpAttributeType::Rid,
"recvonly" => SdpAttributeType::Recvonly,
"remote-candidates" => SdpAttributeType::RemoteCandidate,
"rtcp" => SdpAttributeType::Rtcp,
"rtcp-fb" => SdpAttributeType::RtcpFb,
"rtcp-mux" => SdpAttributeType::RtcpMux,
"rtcp-rsize" => SdpAttributeType::RtcpRsize,
"rtpmap" => SdpAttributeType::Rtpmap,
"sctpmap" => SdpAttributeType::Sctpmap,
"sctp-port" => SdpAttributeType::SctpPort,
"sendonly" => SdpAttributeType::Sendonly,
"sendrecv" => SdpAttributeType::Sendrecv,
"setup" => SdpAttributeType::Setup,
"simulcast" => SdpAttributeType::Simulcast,
"ssrc" => SdpAttributeType::Ssrc,
"ssrc-group" => SdpAttributeType::SsrcGroup,
_ => {
return Err(SdpParserError::Unsupported {
message: "unsupported attribute value".to_string(),
line: name.to_string(),
})
}
};
let mut attr = SdpAttribute::new(attrtype);
attr.parse_value(val.trim())?;
/*
println!("attribute: {}, {}",
a.name, a.value.some());
*/
Ok(SdpLine::Attribute(attr))
}
#[test]
fn test_parse_attribute_candidate() {
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ host").is_ok());
assert!(parse_attribute("candidate:foo 1 UDP 2122252543 172.16.156.106 49760 typ host")
.is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 ::1 49760 typ host").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ srflx").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ prflx").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ relay").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype active").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype passive").is_ok());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype so").is_ok());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1.4 rport 61665").is_ok());
assert!(parse_attribute("candidate:1 1 TCP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1.4 rport 61665 tcptype passive").is_ok());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ").is_err());
assert!(parse_attribute("candidate:0 foo UDP 2122252543 172.16.156.106 49760 typ host")
.is_err());
assert!(parse_attribute("candidate:0 1 FOO 2122252543 172.16.156.106 49760 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP foo 172.16.156.106 49760 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156 49760 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 70000 typ host").is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 type host")
.is_err());
assert!(parse_attribute("candidate:0 1 UDP 2122252543 172.16.156.106 49760 typ fost").is_err());
// FIXME this should fail without the extra 'foobar' at the end
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host unsupported foobar").is_err());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1 rport 61665").is_err());
assert!(parse_attribute("candidate:0 1 TCP 2122252543 172.16.156.106 49760 typ host tcptype foobar").is_err());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1 rport 61665").is_err());
assert!(parse_attribute("candidate:1 1 UDP 1685987071 24.23.204.141 54609 typ srflx raddr 192.168.1.4 rport 70000").is_err());
}
#[test]
fn test_parse_attribute_end_of_candidates() {
assert!(parse_attribute("end-of-candidates").is_ok());
assert!(parse_attribute("end-of-candidates foobar").is_err());
}
#[test]
fn test_parse_attribute_extmap() {
assert!(parse_attribute("extmap:1/sendonly urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_ok());
assert!(parse_attribute("extmap:2/sendrecv urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_ok());
assert!(parse_attribute("extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time")
.is_ok());
assert!(parse_attribute("extmap:a/sendrecv urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_err());
assert!(parse_attribute("extmap:4/unsupported urn:ietf:params:rtp-hdrext:ssrc-audio-level")
.is_err());
}
#[test]
fn test_parse_attribute_fingerprint() {
assert!(parse_attribute("fingerprint:sha-256 CD:34:D1:62:16:95:7B:B7:EB:74:E2:39:27:97:EB:0B:23:73:AC:BC:BF:2F:E3:91:CB:57:A9:9D:4A:A2:0B:40").is_ok())
}
#[test]
fn test_parse_attribute_fmtp() {
assert!(parse_attribute("fmtp:109 maxplaybackrate=48000;stereo=1;useinbandfec=1").is_ok())
}
#[test]
fn test_parse_attribute_group() {
assert!(parse_attribute("group:LS").is_ok());
assert!(parse_attribute("group:LS 1 2").is_ok());
assert!(parse_attribute("group:BUNDLE sdparta_0 sdparta_1 sdparta_2").is_ok());
assert!(parse_attribute("group:").is_err());
assert!(parse_attribute("group:NEVER_SUPPORTED_SEMANTICS").is_err());
}
#[test]
fn test_parse_attribute_bundle_only() {
assert!(parse_attribute("bundle-only").is_ok());
assert!(parse_attribute("bundle-only foobar").is_err());
}
#[test]
fn test_parse_attribute_ice_lite() {
assert!(parse_attribute("ice-lite").is_ok());
assert!(parse_attribute("ice-lite foobar").is_err());
}
#[test]
fn test_parse_attribute_ice_mismatch() {
assert!(parse_attribute("ice-mismatch").is_ok());
assert!(parse_attribute("ice-mismatch foobar").is_err());
}
#[test]
fn test_parse_attribute_ice_options() {
assert!(parse_attribute("ice-options:trickle").is_ok());
assert!(parse_attribute("ice-options:").is_err());
}
#[test]
fn test_parse_attribute_ice_pwd() {
assert!(parse_attribute("ice-pwd:e3baa26dd2fa5030d881d385f1e36cce").is_ok());
assert!(parse_attribute("ice-pwd:").is_err());
}
#[test]
fn test_parse_attribute_ice_ufrag() {
assert!(parse_attribute("ice-ufrag:58b99ead").is_ok());
assert!(parse_attribute("ice-ufrag:").is_err());
}
#[test]
fn test_parse_attribute_identity() {
assert!(parse_attribute("identity:eyJpZHAiOnsiZG9tYWluIjoiZXhhbXBsZS5vcmciLCJwcm90b2NvbCI6ImJvZ3VzIn0sImFzc2VydGlvbiI6IntcImlkZW50aXR5XCI6XCJib2JAZXhhbXBsZS5vcmdcIixcImNvbnRlbnRzXCI6XCJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3l6XCIsXCJzaWduYXR1cmVcIjpcIjAxMDIwMzA0MDUwNlwifSJ9").is_ok());
assert!(parse_attribute("identity:").is_err());
}
#[test]
fn test_parse_attribute_imageattr() {
assert!(parse_attribute("imageattr:120 send * recv *").is_ok());
assert!(parse_attribute("imageattr:97 send [x=800,y=640,sar=1.1,q=0.6] [x=480,y=320] recv [x=330,y=250]").is_ok());
assert!(parse_attribute("imageattr:97 recv [x=800,y=640,sar=1.1] send [x=330,y=250]").is_ok());
assert!(parse_attribute("imageattr:97 send [x=[480:16:800],y=[320:16:640],par=[1.2-1.3],q=0.6] [x=[176:8:208],y=[144:8:176],par=[1.2-1.3]] recv *").is_ok());
assert!(parse_attribute("imageattr:").is_err());
}
#[test]
fn test_parse_attribute_inactive() {
assert!(parse_attribute("inactive").is_ok());
assert!(parse_attribute("inactive foobar").is_err());
}
#[test]
fn test_parse_attribute_label() {
assert!(parse_attribute("label:1").is_ok());
assert!(parse_attribute("label:foobar").is_ok());
assert!(parse_attribute("label:foobar barfoo").is_ok());
assert!(parse_attribute("label:").is_err());
}
#[test]
fn test_parse_attribute_maxptime() {
assert!(parse_attribute("maxptime:60").is_ok());
assert!(parse_attribute("maxptime:").is_err());
}
#[test]
fn test_parse_attribute_mid() {
assert!(parse_attribute("mid:sdparta_0").is_ok());
assert!(parse_attribute("mid:sdparta_0 sdparta_1 sdparta_2").is_ok());
assert!(parse_attribute("mid:").is_err());
}
#[test]
fn test_parse_attribute_msid() {
assert!(parse_attribute("msid:{5a990edd-0568-ac40-8d97-310fc33f3411}").is_ok());
assert!(parse_attribute("msid:{5a990edd-0568-ac40-8d97-310fc33f3411} {218cfa1c-617d-2249-9997-60929ce4c405}").is_ok());
assert!(parse_attribute("msid:").is_err());
}
#[test]
fn test_parse_attribute_msid_semantics() {
assert!(parse_attribute("msid-semantic:WMS *").is_ok())
}
#[test]
fn test_parse_attribute_ptime() {
assert!(parse_attribute("ptime:30").is_ok());
assert!(parse_attribute("ptime:").is_err());
}
#[test]
fn test_parse_attribute_rid() {
assert!(parse_attribute("rid:foo send").is_ok());
assert!(parse_attribute("rid:foo").is_ok());
assert!(parse_attribute("rid:").is_err());
}
#[test]
fn test_parse_attribute_recvonly() {
assert!(parse_attribute("recvonly").is_ok());
assert!(parse_attribute("recvonly foobar").is_err());
}
#[test]
fn test_parse_attribute_remote_candidate() {
assert!(parse_attribute("remote-candidates:0 10.0.0.1 5555").is_ok());
assert!(parse_attribute("remote-candidates:12345 ::1 5555").is_ok());
assert!(parse_attribute("remote-candidates:abc 10.0.0.1 5555").is_err());
assert!(parse_attribute("remote-candidates:0 10.a.0.1 5555").is_err());
assert!(parse_attribute("remote-candidates:0 10.0.0.1 70000").is_err());
assert!(parse_attribute("remote-candidates:0 10.0.0.1").is_err());
assert!(parse_attribute("remote-candidates:0").is_err());
assert!(parse_attribute("remote-candidates:").is_err());
}
#[test]
fn test_parse_attribute_sendonly() {
assert!(parse_attribute("sendonly").is_ok());
assert!(parse_attribute("sendonly foobar").is_err());
}
#[test]
fn test_parse_attribute_sendrecv() {
assert!(parse_attribute("sendrecv").is_ok());
assert!(parse_attribute("sendrecv foobar").is_err());
}
#[test]
fn test_parse_attribute_setup() {
assert!(parse_attribute("setup:active").is_ok());
assert!(parse_attribute("setup:passive").is_ok());
assert!(parse_attribute("setup:actpass").is_ok());
assert!(parse_attribute("setup:holdconn").is_ok());
assert!(parse_attribute("setup:").is_err());
assert!(parse_attribute("setup:foobar").is_err());
}
#[test]
fn test_parse_attribute_rtcp() {
assert!(parse_attribute("rtcp:5000").is_ok());
assert!(parse_attribute("rtcp:9 IN IP4 0.0.0.0").is_ok());
assert!(parse_attribute("rtcp:").is_err());
assert!(parse_attribute("rtcp:70000").is_err());
assert!(parse_attribute("rtcp:9 IN").is_err());
assert!(parse_attribute("rtcp:9 IN IP4").is_err());
assert!(parse_attribute("rtcp:9 IN IP4 ::1").is_err());
}
#[test]
fn test_parse_attribute_rtcp_fb() {
assert!(parse_attribute("rtcp-fb:101 ccm fir").is_ok())
}
#[test]
fn test_parse_attribute_rtcp_mux() {
assert!(parse_attribute("rtcp-mux").is_ok());
assert!(parse_attribute("rtcp-mux foobar").is_err());
}
#[test]
fn test_parse_attribute_rtcp_rsize() {
assert!(parse_attribute("rtcp-rsize").is_ok());
assert!(parse_attribute("rtcp-rsize foobar").is_err());
}
#[test]
fn test_parse_attribute_rtpmap() {
assert!(parse_attribute("rtpmap:109 opus/48000/2").is_ok())
}
#[test]
fn test_parse_attribute_sctpmap() {
assert!(parse_attribute("sctpmap:5000 webrtc-datachannel 256").is_ok());
assert!(parse_attribute("sctpmap:70000 webrtc-datachannel 256").is_err());
assert!(parse_attribute("sctpmap:5000 unsupported 256").is_err());
assert!(parse_attribute("sctpmap:5000 webrtc-datachannel 2a").is_err());
}
#[test]
fn test_parse_attribute_sctp_port() {
assert!(parse_attribute("sctp-port:5000").is_ok());
assert!(parse_attribute("sctp-port:").is_err());
assert!(parse_attribute("sctp-port:70000").is_err());
}
#[test]
fn test_parse_attribute_max_message_size() {
assert!(parse_attribute("max-message-size:1").is_ok());
assert!(parse_attribute("max-message-size:100000").is_ok());
assert!(parse_attribute("max-message-size:4294967297").is_ok());
assert!(parse_attribute("max-message-size:0").is_ok());
assert!(parse_attribute("max-message-size:").is_err());
assert!(parse_attribute("max-message-size:abc").is_err());
}
#[test]
fn test_parse_attribute_simulcast() {
assert!(parse_attribute("simulcast:send 1").is_ok());
assert!(parse_attribute("simulcast:recv test").is_ok());
assert!(parse_attribute("simulcast:recv ~test").is_ok());
assert!(parse_attribute("simulcast:recv test;foo").is_ok());
assert!(parse_attribute("simulcast:recv foo,bar").is_ok());
assert!(parse_attribute("simulcast:recv foo,bar;test").is_ok());
assert!(parse_attribute("simulcast:recv 1;4,5 send 6;7").is_ok());
assert!(parse_attribute("simulcast:send 1,2,3;~4,~5 recv 6;~7,~8").is_ok());
// old draft 03 notation used by Firefox 55
assert!(parse_attribute("simulcast: send rid=foo;bar").is_ok());
assert!(parse_attribute("simulcast:").is_err());
assert!(parse_attribute("simulcast:send").is_err());
assert!(parse_attribute("simulcast:foobar 1").is_err());
assert!(parse_attribute("simulcast:send 1 foobar 2").is_err());
}
#[test]
fn test_parse_attribute_ssrc() {
assert!(parse_attribute("ssrc:2655508255").is_ok());
assert!(parse_attribute("ssrc:2655508255 foo").is_ok());
assert!(parse_attribute("ssrc:2655508255 cname:{735484ea-4f6c-f74a-bd66-7425f8476c2e}")
.is_ok());
assert!(parse_attribute("ssrc:").is_err());
assert!(parse_attribute("ssrc:foo").is_err());
}
#[test]
fn test_parse_attribute_ssrc_group() {
assert!(parse_attribute("ssrc-group:FID 3156517279 2673335628").is_ok())
}
#[test]
fn test_parse_unknown_attribute() {
assert!(parse_attribute("unknown").is_err())
}
|
//!
//! A session runs a filesystem implementation while it is being mounted
//! to a specific mount point. A session begins by mounting the filesystem
//! and ends by unmounting it. While the filesystem is mounted, the session
//! loop receives, dispatches and replies to kernel requests for filesystem
//! operations under its mount point.
//!
use std::task;
use std::libc::{EAGAIN, EINTR, ENODEV, ENOENT};
use channel, channel::Channel;
use Filesystem;
use request::Request;
/// The session data structure
pub struct Session<FS> {
filesystem: FS,
mountpoint: Path,
ch: Channel,
proto_major: uint,
proto_minor: uint,
initialized: bool,
destroyed: bool,
}
impl<FS: Filesystem+Send> Session<FS> {
/// Create a new session by mounting the given filesystem to the given mountpoint
pub fn new (filesystem: FS, mountpoint: &Path, options: &[&[u8]]) -> Session<FS> {
info!("Mounting {}", mountpoint.display());
let ch = Channel::new(mountpoint, options).expect("unable to mount filesystem");
Session {
filesystem: filesystem,
mountpoint: mountpoint.clone(),
ch: ch,
proto_major: 0,
proto_minor: 0,
initialized: false,
destroyed: false,
}
}
/// Run the session loop that receives, dispatches and replies to kernel requests.
/// Make sure to run it on a new single threaded scheduler since the I/O in the
/// session loop can block.
pub fn run (&mut self) {
let mut req = Request::new();
loop {
match req.read(self) {
Err(ENOENT) => continue, // Operation interrupted. Accordingly to FUSE, this is safe to retry
Err(EINTR) => continue, // Interrupted system call, retry
Err(EAGAIN) => continue, // Explicitly try again
Err(ENODEV) => break, // Filesystem was unmounted, quit the loop
Err(err) => fail!("Lost connection to FUSE device. Error {:i}", err),
Ok(_) => req.dispatch(self),
}
}
}
/// Run the session loop in a background task
pub fn spawn (self) -> BackgroundSession {
BackgroundSession::new(self)
}
}
#[unsafe_destructor]
impl<FS: Filesystem+Send> Drop for Session<FS> {
fn drop (&mut self) {
info!("Unmounted {}", self.mountpoint.display());
// The actual unmounting takes place because self.ch is dropped here
}
}
/// The background session data structure
pub struct BackgroundSession {
mountpoint: Path,
}
impl BackgroundSession {
/// Create a new background session for the given session by running its
/// session loop in a background task. If the returned handle is dropped,
/// the filesystem is unmounted and the given session ends.
pub fn new<FS: Filesystem+Send> (se: Session<FS>) -> BackgroundSession {
let mountpoint = se.mountpoint.clone();
// The background task is started using a a new single threaded
// scheduler since native I/O in the session loop can block
do task::spawn_sched(task::SingleThreaded) {
let mut se = se;
se.run();
}
BackgroundSession { mountpoint: mountpoint }
}
}
impl Drop for BackgroundSession {
fn drop (&mut self) {
info!("Unmounting {}", self.mountpoint.display());
// Unmounting the filesystem will eventually end the session loop,
// drop the session and hence end the background task.
channel::unmount(&self.mountpoint);
}
}
Output error code when failing to mount a filesystem
//!
//! A session runs a filesystem implementation while it is being mounted
//! to a specific mount point. A session begins by mounting the filesystem
//! and ends by unmounting it. While the filesystem is mounted, the session
//! loop receives, dispatches and replies to kernel requests for filesystem
//! operations under its mount point.
//!
use std::task;
use std::libc::{EAGAIN, EINTR, ENODEV, ENOENT};
use channel, channel::Channel;
use Filesystem;
use request::Request;
/// The session data structure
pub struct Session<FS> {
filesystem: FS,
mountpoint: Path,
ch: Channel,
proto_major: uint,
proto_minor: uint,
initialized: bool,
destroyed: bool,
}
impl<FS: Filesystem+Send> Session<FS> {
/// Create a new session by mounting the given filesystem to the given mountpoint
pub fn new (filesystem: FS, mountpoint: &Path, options: &[&[u8]]) -> Session<FS> {
info!("Mounting {}", mountpoint.display());
let ch = match Channel::new(mountpoint, options) {
Ok(ch) => ch,
Err(err) => fail!("Unable to mount filesystem. Error {:i}", err),
};
Session {
filesystem: filesystem,
mountpoint: mountpoint.clone(),
ch: ch,
proto_major: 0,
proto_minor: 0,
initialized: false,
destroyed: false,
}
}
/// Run the session loop that receives, dispatches and replies to kernel requests.
/// Make sure to run it on a new single threaded scheduler since the I/O in the
/// session loop can block.
pub fn run (&mut self) {
let mut req = Request::new();
loop {
match req.read(self) {
Err(ENOENT) => continue, // Operation interrupted. Accordingly to FUSE, this is safe to retry
Err(EINTR) => continue, // Interrupted system call, retry
Err(EAGAIN) => continue, // Explicitly try again
Err(ENODEV) => break, // Filesystem was unmounted, quit the loop
Err(err) => fail!("Lost connection to FUSE device. Error {:i}", err),
Ok(_) => req.dispatch(self),
}
}
}
/// Run the session loop in a background task
pub fn spawn (self) -> BackgroundSession {
BackgroundSession::new(self)
}
}
#[unsafe_destructor]
impl<FS: Filesystem+Send> Drop for Session<FS> {
fn drop (&mut self) {
info!("Unmounted {}", self.mountpoint.display());
// The actual unmounting takes place because self.ch is dropped here
}
}
/// The background session data structure
pub struct BackgroundSession {
mountpoint: Path,
}
impl BackgroundSession {
/// Create a new background session for the given session by running its
/// session loop in a background task. If the returned handle is dropped,
/// the filesystem is unmounted and the given session ends.
pub fn new<FS: Filesystem+Send> (se: Session<FS>) -> BackgroundSession {
let mountpoint = se.mountpoint.clone();
// The background task is started using a a new single threaded
// scheduler since native I/O in the session loop can block
do task::spawn_sched(task::SingleThreaded) {
let mut se = se;
se.run();
}
BackgroundSession { mountpoint: mountpoint }
}
}
impl Drop for BackgroundSession {
fn drop (&mut self) {
info!("Unmounting {}", self.mountpoint.display());
// Unmounting the filesystem will eventually end the session loop,
// drop the session and hence end the background task.
channel::unmount(&self.mountpoint);
}
}
|
use chrono::*;
use hyper::header::Headers;
use openssl::crypto::hash::hash;
use openssl::crypto::hash::Type::SHA1;
use openssl::crypto::pkey::PKey;
use rustc_serialize::base64::{STANDARD, ToBase64};
use std::ascii::AsciiExt;
use super::http_headers::*;
// #[derive(Clone,Debug)]
pub struct Authentication {
body: Option<String>,
date: String,
headers: Headers,
key: Option<PKey>,
method: Option<String>,
path: Option<String>,
userid: Option<String>,
version: String,
}
impl Authentication {
pub fn new() -> Authentication {
let dt = UTC::now().format("%Y-%m-%dT%H:%M:%SZ").to_string();
let mut headers = Headers::new();
headers.set(OpsSign(String::from("algorithm=sha1;version=1.1")));
Authentication {
body: None,
date: dt,
headers: headers,
key: None,
method: None,
path: None,
userid: None,
version: String::from("1.1"),
}
}
pub fn body(mut self, body: &str) -> Authentication {
let body = String::from(body);
self.body = Some(body);
self
}
pub fn key(mut self, key: PKey) -> Authentication {
self.key = Some(key);
self
}
pub fn method(mut self, method: &str) -> Authentication {
let method = String::from(method.to_ascii_uppercase());
self.method = Some(method);
self
}
pub fn path(mut self, path: &str) -> Authentication {
let path = String::from(path);
self.path = Some(squeeze_path(path));
self
}
pub fn userid(mut self, id: &str) -> Authentication {
let userid = String::from(id);
self.userid = Some(userid.clone());
self.headers.set(OpsUserId(userid));
self
}
pub fn version(mut self, version: &str) -> Authentication {
let version = String::from(version);
self.version = version;
self
}
pub fn set_timestamp(mut self) -> Authentication {
self.headers.set(OpsTimestamp(self.date.clone()));
self
}
fn hashed_path(&self) -> String {
hash(SHA1, expand_string(&self.path).as_bytes()).to_base64(STANDARD)
}
/// FIXME: this needs, eventually, to deal with IO and not just strings
fn content_hash(&self) -> String {
let body = expand_string(&self.body);
hash(SHA1, body.as_bytes()).to_base64(STANDARD)
}
fn canonical_user_id(&self) -> String {
let userid = expand_string(&self.userid);
if self.version == "1.0" {
userid
} else {
hash(SHA1, userid.as_bytes()).to_base64(STANDARD)
}
}
fn canonical_request(&self) -> String {
format!("Method:{}\nHashed Path:{}\nX-Ops-Content-Hash:{}\nX-Ops-Timestamp:{}\nX-Ops-UserId:{}",
expand_string(&self.method), self.hashed_path(), self.content_hash(),
self.date, self.canonical_user_id())
}
fn encrypted_request(&self) -> String {
match self.key {
Some(ref key) => key.private_encrypt(&self.canonical_request().as_bytes()).to_base64(STANDARD),
None => panic!("No private key provided!")
}
}
}
fn expand_string(val: &Option<String>) -> String {
match *val {
None => "".to_string(),
Some(ref x) => x.to_string(),
}
}
/// Remove duplicate and trailing slashes from a path
fn squeeze_path(pth: String) -> String {
let mut st = String::new();
for p in pth.split('/').filter(|&x| x != "") {
st.push('/');
st.push_str(p)
}
if st.len() == 0 {
String::from("/")
} else {
st
}
}
#[cfg(test)]
mod tests {
use super::Authentication;
use super::squeeze_path;
use http_headers::*;
use hyper::header::Headers;
use openssl::crypto::pkey::PKey;
const PATH: &'static str = "/organizations/clownco";
const BODY: &'static str = "Spec Body";
const USER: &'static str = "spec-user";
const DT: &'static str = "2009-01-01T12:00:00Z";
const PRIVATE_KEY_DATA: &'static str = r"
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA0ueqo76MXuP6XqZBILFziH/9AI7C6PaN5W0dSvkr9yInyGHS
z/IR1+4tqvP2qlfKVKI4CP6BFH251Ft9qMUBuAsnlAVQ1z0exDtIFFOyQCdR7iXm
jBIWMSS4buBwRQXwDK7id1OxtU23qVJv+xwEV0IzaaSJmaGLIbvRBD+qatfUuQJB
MU/04DdJIwvLtZBYdC2219m5dUBQaa4bimL+YN9EcsDzD9h9UxQo5ReK7b3cNMzJ
BKJWLzFBcJuePMzAnLFktr/RufX4wpXe6XJxoVPaHo72GorLkwnQ0HYMTY8rehT4
mDi1FI969LHCFFaFHSAaRnwdXaQkJmSfcxzCYQIDAQABAoIBAQCW3I4sKN5B9jOe
xq/pkeWBq4OvhW8Ys1yW0zFT8t6nHbB1XrwscQygd8gE9BPqj3e0iIEqtdphbPmj
VHqTYbC0FI6QDClifV7noTwTBjeIOlgZ0NSUN0/WgVzIOxUz2mZ2vBZUovKILPqG
TOi7J7RXMoySMdcXpP1f+PgvYNcnKsT72UcWaSXEV8/zo+Zm/qdGPVWwJonri5Mp
DVm5EQSENBiRyt028rU6ElXORNmoQpVjDVqZ1gipzXkifdjGyENw2rt4V/iKYD7V
5iqXOsvP6Cemf4gbrjunAgDG08S00kiUgvVWcdXW+dlsR2nCvH4DOEe3AYYh/aH8
DxEE7FbtAoGBAPcNO8fJ56mNw0ow4Qg38C+Zss/afhBOCfX4O/SZKv/roRn5+gRM
KRJYSVXNnsjPI1plzqR4OCyOrjAhtuvL4a0DinDzf1+fiztyNohwYsW1vYmqn3ti
EN0GhSgE7ppZjqvLQ3f3LUTxynhA0U+k9wflb4irIlViTUlCsOPkrNJDAoGBANqL
Q+vvuGSsmRLU/Cenjy+Mjj6+QENg51dz34o8JKuVKIPKU8pNnyeLa5fat0qD2MHm
OB9opeQOcw0dStodxr6DB3wi83bpjeU6BWUGITNiWEaZEBrQ0aiqNJJKrrHm8fAZ
9o4l4oHc4hI0kYVYYDuxtKuVJrzZiEapTwoOcYiLAoGBAI/EWbeIHZIj9zOjgjEA
LHvm25HtulLOtyk2jd1njQhlHNk7CW2azIPqcLLH99EwCYi/miNH+pijZ2aHGCXb
/bZrSxM0ADmrZKDxdB6uGCyp+GS2sBxjEyEsfCyvwhJ8b3Q100tqwiNO+d5FCglp
HICx2dgUjuRVUliBwOK93nx1AoGAUI8RhIEjOYkeDAESyhNMBr0LGjnLOosX+/as
qiotYkpjWuFULbibOFp+WMW41vDvD9qrSXir3fstkeIAW5KqVkO6mJnRoT3Knnra
zjiKOITCAZQeiaP8BO5o3pxE9TMqb9VCO3ffnPstIoTaN4syPg7tiGo8k1SklVeH
2S8lzq0CgYAKG2fljIYWQvGH628rp4ZcXS4hWmYohOxsnl1YrszbJ+hzR+IQOhGl
YlkUQYXhy9JixmUUKtH+NXkKX7Lyc8XYw5ETr7JBT3ifs+G7HruDjVG78EJVojbd
8uLA+DdQm5mg4vd1GTiSK65q/3EeoBlUaVor3HhLFki+i9qpT8CBsg==
-----END RSA PRIVATE KEY-----
";
// const PUBLIC_KEY_DATA: &'static str = r"
// -----BEGIN PUBLIC KEY-----
// MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ueqo76MXuP6XqZBILFz
// iH/9AI7C6PaN5W0dSvkr9yInyGHSz/IR1+4tqvP2qlfKVKI4CP6BFH251Ft9qMUB
// uAsnlAVQ1z0exDtIFFOyQCdR7iXmjBIWMSS4buBwRQXwDK7id1OxtU23qVJv+xwE
// V0IzaaSJmaGLIbvRBD+qatfUuQJBMU/04DdJIwvLtZBYdC2219m5dUBQaa4bimL+
// YN9EcsDzD9h9UxQo5ReK7b3cNMzJBKJWLzFBcJuePMzAnLFktr/RufX4wpXe6XJx
// oVPaHo72GorLkwnQ0HYMTY8rehT4mDi1FI969LHCFFaFHSAaRnwdXaQkJmSfcxzC
// YQIDAQAB
// -----END PUBLIC KEY-----
// ";
#[test]
fn test_squeeze_path() {
let path = String::from("/any/given/path");
assert_eq!(path, squeeze_path(path.clone()))
}
#[test]
fn test_squeeze_path_with_duplicate_slashes() {
let path = String::from("/any//given/path");
assert_eq!("/any/given/path", squeeze_path(path.clone()))
}
#[test]
fn test_squeeze_path_with_many_duplicate_slashes() {
let path = String::from("/any//old/given//path");
assert_eq!("/any/old/given/path", squeeze_path(path.clone()))
}
#[test]
fn test_squeeze_path_with_trailing_slash() {
let path = String::from("/any/given/path/");
assert_eq!("/any/given/path", squeeze_path(path.clone()))
}
#[test]
fn test_new_authentication() {
let auth = Authentication::new();
assert_eq!(auth.body, None)
}
#[test]
fn test_timestamp() {
let auth = Authentication {
body: None,
date: String::from(DT),
headers: Headers::new(),
key: None,
method: None,
path: None,
userid: None,
version: String::from("1.1"),
}.set_timestamp();
assert_eq!(auth.headers.get::<OpsTimestamp>().unwrap().to_string(), "2009-01-01T12:00:00Z")
}
#[test]
fn test_userid() {
let auth = Authentication::new().userid(USER);
assert_eq!(auth.userid.unwrap(), "spec-user");
assert_eq!(auth.headers.get::<OpsUserId>().unwrap().to_string(), "spec-user")
}
#[test]
fn test_method() {
let auth = Authentication::new().method("get");
assert_eq!(auth.method.unwrap(), "GET")
}
#[test]
fn test_canonical_user_id_v1_0() {
let auth = Authentication::new().userid(USER).version("1.0");
assert_eq!(auth.canonical_user_id(), "spec-user")
}
#[test]
fn test_canonical_user_id_v1_1() {
let auth = Authentication::new().userid(USER);
assert_eq!(auth.canonical_user_id(), "EAF7Wv/hbAudWV5ZkwKz40Z/lO0=")
}
#[test]
fn test_canonical_request() {
let auth = Authentication {
body: Some(String::from(BODY)),
date: String::from(DT),
headers: Headers::new(),
key: None,
method: Some(String::from("POST")),
path: Some(String::from(PATH)),
userid: Some(String::from(USER)),
version: String::from("1.1"),
};
assert_eq!(auth.canonical_request(), "Method:POST\nHashed Path:YtBWDn1blGGuFIuKksdwXzHU9oE=\nX-Ops-Content-Hash:DFteJZPVv6WKdQmMqZUQUumUyRs=\nX-Ops-Timestamp:2009-01-01T12:00:00Z\nX-Ops-UserId:EAF7Wv/hbAudWV5ZkwKz40Z/lO0=")
}
#[test]
fn test_private_key() {
let k0 = PKey::private_key_from_pem(&mut PRIVATE_KEY_DATA.as_bytes()).unwrap();
let auth = Authentication {
body: Some(String::from(BODY)),
date: String::from(DT),
headers: Headers::new(),
key: Some(k0),
method: Some(String::from("POST")),
path: Some(String::from(PATH)),
userid: Some(String::from(USER)),
version: String::from("1.1"),
};
assert_eq!(&auth.encrypted_request(),
"UfZD9dRz6rFu6LbP5Mo1oNHcWYxpNIcUfFCffJS1FQa0GtfU/vkt3/O5HuCM1wIFl/U0f5faH9EWpXWY5NwKR031Myxcabw4t4ZLO69CIh/3qx1XnjcZvt2wc2R9bx/43IWA/r8w8Q6decuu0f6ZlNheJeJhaYPI8piX/aH+uHBH8zTACZu8vMnl5MF3/OIlsZc8cemq6eKYstp8a8KYq9OmkB5IXIX6qVMJHA6fRvQEB/7j281Q7oI/O+lE8AmVyBbwruPb7Mp6s4839eYiOdjbDwFjYtbS3XgAjrHlaD7WFDlbAG7H8Dmvo+wBxmtNkszhzbBnEYtuwQqT8nM/8A==")
}
}
base64 encode correctly
use chrono::*;
use hyper::header::Headers;
use openssl::crypto::hash::hash;
use openssl::crypto::hash::Type::SHA1;
use openssl::crypto::pkey::PKey;
use rustc_serialize::base64::{ToBase64, Config, Newline, CharacterSet};
use std::ascii::AsciiExt;
use http_headers::*;
// #[derive(Clone,Debug)]
pub struct Authentication {
body: Option<String>,
date: String,
headers: Headers,
key: Option<PKey>,
method: Option<String>,
path: Option<String>,
userid: Option<String>,
version: String,
}
pub static BASE64_AUTH: Config = Config {char_set: CharacterSet::Standard, newline: Newline::LF, pad: true, line_length: Some(60)};
impl Authentication {
pub fn new() -> Authentication {
let dt = UTC::now().format("%Y-%m-%dT%H:%M:%SZ").to_string();
let mut headers = Headers::new();
headers.set(OpsSign(String::from("algorithm=sha1;version=1.1")));
Authentication {
body: None,
date: dt,
headers: headers,
key: None,
method: None,
path: None,
userid: None,
version: String::from("1.1"),
}
}
pub fn body(mut self, body: &str) -> Authentication {
let body = String::from(body);
self.body = Some(body);
self
}
pub fn key(mut self, key: PKey) -> Authentication {
self.key = Some(key);
self
}
pub fn method(mut self, method: &str) -> Authentication {
let method = String::from(method.to_ascii_uppercase());
self.method = Some(method);
self
}
pub fn path(mut self, path: &str) -> Authentication {
let path = String::from(path);
self.path = Some(squeeze_path(path));
self
}
pub fn userid(mut self, id: &str) -> Authentication {
let userid = String::from(id);
self.userid = Some(userid.clone());
self.headers.set(OpsUserId(userid));
self
}
pub fn version(mut self, version: &str) -> Authentication {
let version = String::from(version);
self.version = version;
self
}
pub fn set_timestamp(mut self) -> Authentication {
self.headers.set(OpsTimestamp(self.date.clone()));
self
}
fn hashed_path(&self) -> String {
hash(SHA1, expand_string(&self.path).as_bytes()).to_base64(BASE64_AUTH)
}
/// FIXME: this needs, eventually, to deal with IO and not just strings
fn content_hash(&self) -> String {
let body = expand_string(&self.body);
hash(SHA1, body.as_bytes()).to_base64(BASE64_AUTH)
}
fn canonical_user_id(&self) -> String {
let userid = expand_string(&self.userid);
if self.version == "1.0" {
userid
} else {
hash(SHA1, userid.as_bytes()).to_base64(BASE64_AUTH)
}
}
fn canonical_request(&self) -> String {
format!("Method:{}\nHashed Path:{}\nX-Ops-Content-Hash:{}\nX-Ops-Timestamp:{}\nX-Ops-UserId:{}",
expand_string(&self.method), self.hashed_path(), self.content_hash(),
self.date, self.canonical_user_id())
}
fn encrypted_request(&self) -> String {
match self.key {
Some(ref key) => key.private_encrypt(&self.canonical_request().as_bytes()).to_base64(BASE64_AUTH),
None => panic!("No private key provided!")
}
}
pub fn as_headers(self) -> Headers {
let fin = self.set_timestamp();
let enc = fin.encrypted_request();
let mut headers = fin.headers;
let mut i = 1;
for h in enc.split('\n') {
let key = format!("x-Ops-Authorization-{}", i);
headers.set_raw(key, vec!(h.as_bytes().to_vec()));
i += 1;
}
headers
}
}
fn expand_string(val: &Option<String>) -> String {
match *val {
None => "".to_string(),
Some(ref x) => x.to_string(),
}
}
/// Remove duplicate and trailing slashes from a path
fn squeeze_path(pth: String) -> String {
let mut st = String::new();
for p in pth.split('/').filter(|&x| x != "") {
st.push('/');
st.push_str(p)
}
if st.len() == 0 {
String::from("/")
} else {
st
}
}
#[cfg(test)]
mod tests {
use super::Authentication;
use super::squeeze_path;
use http_headers::*;
use hyper::header::Headers;
use openssl::crypto::pkey::PKey;
const PATH: &'static str = "/organizations/clownco";
const BODY: &'static str = "Spec Body";
const USER: &'static str = "spec-user";
const DT: &'static str = "2009-01-01T12:00:00Z";
const PRIVATE_KEY_DATA: &'static str = r"
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA0ueqo76MXuP6XqZBILFziH/9AI7C6PaN5W0dSvkr9yInyGHS
z/IR1+4tqvP2qlfKVKI4CP6BFH251Ft9qMUBuAsnlAVQ1z0exDtIFFOyQCdR7iXm
jBIWMSS4buBwRQXwDK7id1OxtU23qVJv+xwEV0IzaaSJmaGLIbvRBD+qatfUuQJB
MU/04DdJIwvLtZBYdC2219m5dUBQaa4bimL+YN9EcsDzD9h9UxQo5ReK7b3cNMzJ
BKJWLzFBcJuePMzAnLFktr/RufX4wpXe6XJxoVPaHo72GorLkwnQ0HYMTY8rehT4
mDi1FI969LHCFFaFHSAaRnwdXaQkJmSfcxzCYQIDAQABAoIBAQCW3I4sKN5B9jOe
xq/pkeWBq4OvhW8Ys1yW0zFT8t6nHbB1XrwscQygd8gE9BPqj3e0iIEqtdphbPmj
VHqTYbC0FI6QDClifV7noTwTBjeIOlgZ0NSUN0/WgVzIOxUz2mZ2vBZUovKILPqG
TOi7J7RXMoySMdcXpP1f+PgvYNcnKsT72UcWaSXEV8/zo+Zm/qdGPVWwJonri5Mp
DVm5EQSENBiRyt028rU6ElXORNmoQpVjDVqZ1gipzXkifdjGyENw2rt4V/iKYD7V
5iqXOsvP6Cemf4gbrjunAgDG08S00kiUgvVWcdXW+dlsR2nCvH4DOEe3AYYh/aH8
DxEE7FbtAoGBAPcNO8fJ56mNw0ow4Qg38C+Zss/afhBOCfX4O/SZKv/roRn5+gRM
KRJYSVXNnsjPI1plzqR4OCyOrjAhtuvL4a0DinDzf1+fiztyNohwYsW1vYmqn3ti
EN0GhSgE7ppZjqvLQ3f3LUTxynhA0U+k9wflb4irIlViTUlCsOPkrNJDAoGBANqL
Q+vvuGSsmRLU/Cenjy+Mjj6+QENg51dz34o8JKuVKIPKU8pNnyeLa5fat0qD2MHm
OB9opeQOcw0dStodxr6DB3wi83bpjeU6BWUGITNiWEaZEBrQ0aiqNJJKrrHm8fAZ
9o4l4oHc4hI0kYVYYDuxtKuVJrzZiEapTwoOcYiLAoGBAI/EWbeIHZIj9zOjgjEA
LHvm25HtulLOtyk2jd1njQhlHNk7CW2azIPqcLLH99EwCYi/miNH+pijZ2aHGCXb
/bZrSxM0ADmrZKDxdB6uGCyp+GS2sBxjEyEsfCyvwhJ8b3Q100tqwiNO+d5FCglp
HICx2dgUjuRVUliBwOK93nx1AoGAUI8RhIEjOYkeDAESyhNMBr0LGjnLOosX+/as
qiotYkpjWuFULbibOFp+WMW41vDvD9qrSXir3fstkeIAW5KqVkO6mJnRoT3Knnra
zjiKOITCAZQeiaP8BO5o3pxE9TMqb9VCO3ffnPstIoTaN4syPg7tiGo8k1SklVeH
2S8lzq0CgYAKG2fljIYWQvGH628rp4ZcXS4hWmYohOxsnl1YrszbJ+hzR+IQOhGl
YlkUQYXhy9JixmUUKtH+NXkKX7Lyc8XYw5ETr7JBT3ifs+G7HruDjVG78EJVojbd
8uLA+DdQm5mg4vd1GTiSK65q/3EeoBlUaVor3HhLFki+i9qpT8CBsg==
-----END RSA PRIVATE KEY-----
";
// const PUBLIC_KEY_DATA: &'static str = r"
// -----BEGIN PUBLIC KEY-----
// MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ueqo76MXuP6XqZBILFz
// iH/9AI7C6PaN5W0dSvkr9yInyGHSz/IR1+4tqvP2qlfKVKI4CP6BFH251Ft9qMUB
// uAsnlAVQ1z0exDtIFFOyQCdR7iXmjBIWMSS4buBwRQXwDK7id1OxtU23qVJv+xwE
// V0IzaaSJmaGLIbvRBD+qatfUuQJBMU/04DdJIwvLtZBYdC2219m5dUBQaa4bimL+
// YN9EcsDzD9h9UxQo5ReK7b3cNMzJBKJWLzFBcJuePMzAnLFktr/RufX4wpXe6XJx
// oVPaHo72GorLkwnQ0HYMTY8rehT4mDi1FI969LHCFFaFHSAaRnwdXaQkJmSfcxzC
// YQIDAQAB
// -----END PUBLIC KEY-----
// ";
#[test]
fn test_squeeze_path() {
let path = String::from("/any/given/path");
assert_eq!(path, squeeze_path(path.clone()))
}
#[test]
fn test_squeeze_path_with_duplicate_slashes() {
let path = String::from("/any//given/path");
assert_eq!("/any/given/path", squeeze_path(path.clone()))
}
#[test]
fn test_squeeze_path_with_many_duplicate_slashes() {
let path = String::from("/any//old/given//path");
assert_eq!("/any/old/given/path", squeeze_path(path.clone()))
}
#[test]
fn test_squeeze_path_with_trailing_slash() {
let path = String::from("/any/given/path/");
assert_eq!("/any/given/path", squeeze_path(path.clone()))
}
#[test]
fn test_new_authentication() {
let auth = Authentication::new();
assert_eq!(auth.body, None)
}
#[test]
fn test_timestamp() {
let auth = Authentication {
body: None,
date: String::from(DT),
headers: Headers::new(),
key: None,
method: None,
path: None,
userid: None,
version: String::from("1.1"),
}.set_timestamp();
assert_eq!(auth.headers.get::<OpsTimestamp>().unwrap().to_string(), "2009-01-01T12:00:00Z")
}
#[test]
fn test_userid() {
let auth = Authentication::new().userid(USER);
assert_eq!(auth.userid.unwrap(), "spec-user");
assert_eq!(auth.headers.get::<OpsUserId>().unwrap().to_string(), "spec-user")
}
#[test]
fn test_method() {
let auth = Authentication::new().method("get");
assert_eq!(auth.method.unwrap(), "GET")
}
#[test]
fn test_canonical_user_id_v1_0() {
let auth = Authentication::new().userid(USER).version("1.0");
assert_eq!(auth.canonical_user_id(), "spec-user")
}
#[test]
fn test_canonical_user_id_v1_1() {
let auth = Authentication::new().userid(USER);
assert_eq!(auth.canonical_user_id(), "EAF7Wv/hbAudWV5ZkwKz40Z/lO0=")
}
#[test]
fn test_canonical_request() {
let auth = Authentication {
body: Some(String::from(BODY)),
date: String::from(DT),
headers: Headers::new(),
key: None,
method: Some(String::from("POST")),
path: Some(String::from(PATH)),
userid: Some(String::from(USER)),
version: String::from("1.1"),
};
assert_eq!(auth.canonical_request(), "Method:POST\nHashed Path:YtBWDn1blGGuFIuKksdwXzHU9oE=\nX-Ops-Content-Hash:DFteJZPVv6WKdQmMqZUQUumUyRs=\nX-Ops-Timestamp:2009-01-01T12:00:00Z\nX-Ops-UserId:EAF7Wv/hbAudWV5ZkwKz40Z/lO0=")
}
#[test]
fn test_private_key() {
let k0 = PKey::private_key_from_pem(&mut PRIVATE_KEY_DATA.as_bytes()).unwrap();
let auth = Authentication {
body: Some(String::from(BODY)),
date: String::from(DT),
headers: Headers::new(),
key: Some(k0),
method: Some(String::from("POST")),
path: Some(String::from(PATH)),
userid: Some(String::from(USER)),
version: String::from("1.1"),
};
assert_eq!(&auth.encrypted_request(),
"UfZD9dRz6rFu6LbP5Mo1oNHcWYxpNIcUfFCffJS1FQa0GtfU/vkt3/O5HuCM\n1wIFl/U0f5faH9EWpXWY5NwKR031Myxcabw4t4ZLO69CIh/3qx1XnjcZvt2w\nc2R9bx/43IWA/r8w8Q6decuu0f6ZlNheJeJhaYPI8piX/aH+uHBH8zTACZu8\nvMnl5MF3/OIlsZc8cemq6eKYstp8a8KYq9OmkB5IXIX6qVMJHA6fRvQEB/7j\n281Q7oI/O+lE8AmVyBbwruPb7Mp6s4839eYiOdjbDwFjYtbS3XgAjrHlaD7W\nFDlbAG7H8Dmvo+wBxmtNkszhzbBnEYtuwQqT8nM/8A==")
}
#[test]
fn test_headers() {
let k0 = PKey::private_key_from_pem(&mut PRIVATE_KEY_DATA.as_bytes()).unwrap();
let auth = Authentication {
body: Some(String::from(BODY)),
date: String::from(DT),
headers: Headers::new(),
key: Some(k0),
method: Some(String::from("POST")),
path: Some(String::from(PATH)),
userid: Some(String::from(USER)),
version: String::from("1.1"),
};
let headers = auth.as_headers();
let header = headers.get_raw("x-ops-authorization-1").unwrap();
}
}
|
//! TODO Fill in
use rustwlc::{Geometry, Point, Size, WlcOutput};
use std::fmt::{self, Display, Formatter};
use std::default::Default;
use std::rc::Rc;
use rlua::{self, Table, Lua, UserData, ToLua, Value};
use super::object::{Object, Objectable};
use super::property::Property;
use super::class::{self, Class, ClassBuilder};
pub const SCREENS_HANDLE: &'static str = "__screens";
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Output {
pub name: String,
pub mm_width: u32,
pub mm_height: u32,
// TODO The XID array?
}
impl From<WlcOutput> for Output {
fn from(output: WlcOutput) -> Output {
let resolution = output.get_resolution().unwrap();
Output {
name: output.get_name(),
mm_width: resolution.w,
mm_height: resolution.h
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ScreenState {
// Is this screen still valid and may be used
pub valid: bool,
// Screen geometry
pub geometry: Geometry,
// Screen workarea
pub workarea: Geometry,
// The screen outputs information
pub outputs: Vec<Output>,
// Some XID indetifying this screen
pub xid: u32
}
pub struct Screen<'lua>(Table<'lua>);
impl_objectable!(Screen, ScreenState);
impl Display for ScreenState {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Screen: {:p}", self)
}
}
impl <'lua> ToLua<'lua> for Screen<'lua> {
fn to_lua(self, lua: &'lua Lua) -> rlua::Result<Value<'lua>> {
self.0.to_lua(lua)
}
}
impl UserData for ScreenState {}
impl Default for ScreenState {
fn default() -> Self {
ScreenState {
valid: true,
geometry: Geometry::zero(),
workarea: Geometry::zero(),
outputs: vec![],
xid: 0
}
}
}
impl <'lua> Screen<'lua> {
fn new(lua: &Lua) -> rlua::Result<Object> {
let class = class::class_setup(lua, "screen")?;
Ok(Screen::allocate(lua, class)?.build())
}
fn init_screens(&mut self, outputs: Vec<Output>) -> rlua::Result<()> {
let mut state = self.state()?;
state.outputs = outputs;
self.set_state(state)
}
fn get_workarea(&self, lua: &'lua Lua) -> rlua::Result<Table<'lua>> {
let state = self.state()?;
let Point { x, y } = state.workarea.origin;
let Size { w, h } = state.workarea.size;
// TODO I do this a lot, put it somewhere
let table = lua.create_table();
table.set("x", x)?;
table.set("y", y)?;
table.set("width", w)?;
table.set("height", h)?;
Ok(table)
}
}
pub fn init(lua: &Lua) -> rlua::Result<Class> {
let res = property_setup(lua, method_setup(lua, Class::builder(lua, Some(Rc::new(Screen::new)), None, None)?)?)?
.save_class("screen")?
.build()?;
let mut screens: Vec<Screen> = vec![];
for output in WlcOutput::list() {
let mut screen = Screen::cast(Screen::new(lua)?)?;
screen.init_screens(vec![output.into()])?;
// TODO Move to Screen impl like the others
screens.push(screen);
}
lua.globals().set(SCREENS_HANDLE, screens.to_lua(lua)?)?;
Ok(res)
}
fn method_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
// TODO Do properly
use super::dummy;
builder.method("connect_signal".into(), lua.create_function(dummy))?
.method("__index".into(), lua.create_function(index))?
.method("__call".into(), lua.create_function(iterate_over_screens))
}
fn property_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
builder
.property(Property::new("workarea".into(),
None,
Some(lua.create_function(get_workarea)),
None))
}
fn get_workarea<'lua>(lua: &'lua Lua, table: Table<'lua>) -> rlua::Result<Table<'lua>> {
let screen = Screen::cast(table.into())?;
screen.get_workarea(lua)
}
/// Ok this requires some explanation...
/// Lua gives us the previous value in the loop, with the first one being nil
/// since there was nothing there before.
///
/// To ensure we loop over everything, we take the index of that value in our list,
/// increment it by 1 (starting at 0 if it's the start) and then once it falls outside
/// the bounds it will stop by returning nil.
fn iterate_over_screens<'lua>(lua: &'lua Lua,
(_, _, prev): (Value<'lua>, Value<'lua>, Value<'lua>))
-> rlua::Result<Value<'lua>> {
let screens: Vec<Screen> = lua.globals().get::<_, Vec<Table>>(SCREENS_HANDLE)?
.into_iter().map(|t| Screen::cast(t.into()).unwrap())
.collect();
let index = match prev {
Value::Nil => 0,
Value::Table(ref table) => {
if let Ok(screen) = Screen::cast(table.clone().into()) {
screens.iter().position(|t| t.state().unwrap() == screen.state().unwrap())
.unwrap_or(screens.len()) + 1
} else {
panic!("Unexpected non-screen table in loop");
}
}
_ => panic!("Unexpected non-screen or nil value in screens loop")
};
if index < screens.len() {
screens[index].get_table().to_lua(lua)
} else {
Ok(Value::Nil)
}
}
fn index<'lua>(lua: &'lua Lua,
(obj_table, index): (Table<'lua>, Value<'lua>))
-> rlua::Result<Value<'lua>> {
let screens: Vec<Screen> = lua.globals().get::<_, Vec<Table>>(SCREENS_HANDLE)?
.into_iter().map(|t| Screen::cast(t.into()).unwrap())
.collect();
match index {
Value::String(ref string) => {
let string = string.to_str()?;
if string == "primary" {
// TODO Emit primary changed signal
if screens.len() > 0 {
return screens[0].get_table().clone().to_lua(lua)
}
}
for screen in screens.iter() {
let screen_state = screen.state()?;
for output in &screen_state.outputs {
if output.name.as_str() == string {
return screen.get_table().clone().to_lua(lua)
}
}
}
},
// TODO Might need to do Number instead
Value::Integer(screen_index) => {
if screen_index < 1 || screen_index as usize > screens.len() {
return Err(rlua::Error::RuntimeError(
format!("invalid screen number: {} (of {} existing)",
screen_index, screens.len())))
}
return screens[screen_index as usize].get_table().clone().to_lua(lua).clone()
},
Value::Table(ref table) => {
// If this is a screen, just return it
if let Ok(screen) = Screen::cast(table.clone().into()) {
return screen.to_lua(lua)
}
},
// TODO This checke user data like in luaA_toudata in awesome
_ => {}
}
// TODO checkudata
let meta = obj_table.get_metatable().unwrap();
meta.get(index.clone()).or_else(|_| super::object::default_index(lua, (obj_table, index)))
}
Removed screens list to get rc.lua to work
Doing this so it can still work on master. If it's uncommented, then you
need to fix layoutboxes since it's used in the default rc.lua
//! TODO Fill in
use rustwlc::{Geometry, Point, Size, WlcOutput};
use std::fmt::{self, Display, Formatter};
use std::default::Default;
use std::rc::Rc;
use rlua::{self, Table, Lua, UserData, ToLua, Value};
use super::object::{Object, Objectable};
use super::property::Property;
use super::class::{self, Class, ClassBuilder};
pub const SCREENS_HANDLE: &'static str = "__screens";
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Output {
pub name: String,
pub mm_width: u32,
pub mm_height: u32,
// TODO The XID array?
}
impl From<WlcOutput> for Output {
fn from(output: WlcOutput) -> Output {
let resolution = output.get_resolution().unwrap();
Output {
name: output.get_name(),
mm_width: resolution.w,
mm_height: resolution.h
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ScreenState {
// Is this screen still valid and may be used
pub valid: bool,
// Screen geometry
pub geometry: Geometry,
// Screen workarea
pub workarea: Geometry,
// The screen outputs information
pub outputs: Vec<Output>,
// Some XID indetifying this screen
pub xid: u32
}
pub struct Screen<'lua>(Table<'lua>);
impl_objectable!(Screen, ScreenState);
impl Display for ScreenState {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Screen: {:p}", self)
}
}
impl <'lua> ToLua<'lua> for Screen<'lua> {
fn to_lua(self, lua: &'lua Lua) -> rlua::Result<Value<'lua>> {
self.0.to_lua(lua)
}
}
impl UserData for ScreenState {}
impl Default for ScreenState {
fn default() -> Self {
ScreenState {
valid: true,
geometry: Geometry::zero(),
workarea: Geometry::zero(),
outputs: vec![],
xid: 0
}
}
}
impl <'lua> Screen<'lua> {
fn new(lua: &Lua) -> rlua::Result<Object> {
let class = class::class_setup(lua, "screen")?;
Ok(Screen::allocate(lua, class)?.build())
}
fn init_screens(&mut self, outputs: Vec<Output>) -> rlua::Result<()> {
let mut state = self.state()?;
state.outputs = outputs;
self.set_state(state)
}
fn get_workarea(&self, lua: &'lua Lua) -> rlua::Result<Table<'lua>> {
let state = self.state()?;
let Point { x, y } = state.workarea.origin;
let Size { w, h } = state.workarea.size;
// TODO I do this a lot, put it somewhere
let table = lua.create_table();
table.set("x", x)?;
table.set("y", y)?;
table.set("width", w)?;
table.set("height", h)?;
Ok(table)
}
}
pub fn init(lua: &Lua) -> rlua::Result<Class> {
let res = property_setup(lua, method_setup(lua, Class::builder(lua, Some(Rc::new(Screen::new)), None, None)?)?)?
.save_class("screen")?
.build()?;
let mut screens: Vec<Screen> = vec![];
for output in WlcOutput::list() {
let mut screen = Screen::cast(Screen::new(lua)?)?;
screen.init_screens(vec![output.into()])?;
// TODO Move to Screen impl like the others
screens.push(screen);
}
// TODO Uncomment
// This breaks rc.lua because of layoutbox stuff.
// Please fix that when you uncomment this.
lua.globals().set(SCREENS_HANDLE, lua.create_table()/*screens.to_lua(lua)?*/)?;
Ok(res)
}
fn method_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
// TODO Do properly
use super::dummy;
builder.method("connect_signal".into(), lua.create_function(dummy))?
.method("__index".into(), lua.create_function(index))?
.method("__call".into(), lua.create_function(iterate_over_screens))
}
fn property_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
builder
.property(Property::new("workarea".into(),
None,
Some(lua.create_function(get_workarea)),
None))
}
fn get_workarea<'lua>(lua: &'lua Lua, table: Table<'lua>) -> rlua::Result<Table<'lua>> {
let screen = Screen::cast(table.into())?;
screen.get_workarea(lua)
}
/// Ok this requires some explanation...
/// Lua gives us the previous value in the loop, with the first one being nil
/// since there was nothing there before.
///
/// To ensure we loop over everything, we take the index of that value in our list,
/// increment it by 1 (starting at 0 if it's the start) and then once it falls outside
/// the bounds it will stop by returning nil.
fn iterate_over_screens<'lua>(lua: &'lua Lua,
(_, _, prev): (Value<'lua>, Value<'lua>, Value<'lua>))
-> rlua::Result<Value<'lua>> {
let screens: Vec<Screen> = lua.globals().get::<_, Vec<Table>>(SCREENS_HANDLE)?
.into_iter().map(|t| Screen::cast(t.into()).unwrap())
.collect();
let index = match prev {
Value::Nil => 0,
Value::Table(ref table) => {
if let Ok(screen) = Screen::cast(table.clone().into()) {
screens.iter().position(|t| t.state().unwrap() == screen.state().unwrap())
.unwrap_or(screens.len()) + 1
} else {
panic!("Unexpected non-screen table in loop");
}
}
_ => panic!("Unexpected non-screen or nil value in screens loop")
};
if index < screens.len() {
screens[index].get_table().to_lua(lua)
} else {
Ok(Value::Nil)
}
}
fn index<'lua>(lua: &'lua Lua,
(obj_table, index): (Table<'lua>, Value<'lua>))
-> rlua::Result<Value<'lua>> {
let screens: Vec<Screen> = lua.globals().get::<_, Vec<Table>>(SCREENS_HANDLE)?
.into_iter().map(|t| Screen::cast(t.into()).unwrap())
.collect();
match index {
Value::String(ref string) => {
let string = string.to_str()?;
if string == "primary" {
// TODO Emit primary changed signal
if screens.len() > 0 {
return screens[0].get_table().clone().to_lua(lua)
}
}
for screen in screens.iter() {
let screen_state = screen.state()?;
for output in &screen_state.outputs {
if output.name.as_str() == string {
return screen.get_table().clone().to_lua(lua)
}
}
}
},
// TODO Might need to do Number instead
Value::Integer(screen_index) => {
if screen_index < 1 || screen_index as usize > screens.len() {
return Err(rlua::Error::RuntimeError(
format!("invalid screen number: {} (of {} existing)",
screen_index, screens.len())))
}
return screens[screen_index as usize].get_table().clone().to_lua(lua).clone()
},
Value::Table(ref table) => {
// If this is a screen, just return it
if let Ok(screen) = Screen::cast(table.clone().into()) {
return screen.to_lua(lua)
}
},
// TODO This checke user data like in luaA_toudata in awesome
_ => {}
}
// TODO checkudata
let meta = obj_table.get_metatable().unwrap();
meta.get(index.clone()).or_else(|_| super::object::default_index(lua, (obj_table, index)))
}
|
// Copyright © 2016, Canal TP and/or its affiliates. All rights reserved.
//
// This file is part of Navitia,
// the software to build cool stuff with public transport.
//
// Hope you'll enjoy and contribute to this project,
// powered by Canal TP (www.canaltp.fr).
// Help us simplify mobility and open public transport:
// a non ending quest to the responsive locomotion way of traveling!
//
// LICENCE: This program is free software; you can redistribute it
// and/or modify it under the terms of the GNU Affero General Public
// License as published by the Free Software Foundation, either
// version 3 of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public
// License along with this program. If not, see
// <http://www.gnu.org/licenses/>.
//
// Stay tuned using
// twitter @navitia
// IRC #navitia on freenode
// https://groups.google.com/d/forum/navitia
// www.navitia.io
extern crate failure;
extern crate mimir;
extern crate mimirsbrunn;
extern crate navitia_model;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate slog_scope;
#[macro_use]
extern crate structopt;
use std::path::PathBuf;
use structopt::StructOpt;
use mimirsbrunn::stops::*;
use navitia_model::objects as navitia;
use navitia_model::collection::Idx;
use failure::ResultExt;
#[derive(Debug, StructOpt)]
struct Args {
/// NTFS directory.
#[structopt(short = "i", long = "input", parse(from_os_str), default_value = ".")]
input: PathBuf,
/// Name of the dataset.
#[structopt(short = "d", long = "dataset", default_value = "fr")]
dataset: String,
/// Elasticsearch parameters.
#[structopt(short = "c", long = "connection-string",
default_value = "http://localhost:9200/munin")]
connection_string: String,
/// Deprecated option.
#[structopt(short = "C", long = "city-level")]
city_level: Option<String>,
}
fn to_mimir(
idx: Idx<navitia::StopArea>,
stop_area: &navitia::StopArea,
navitia: &navitia_model::PtObjects,
) -> mimir::Stop {
let commercial_modes = navitia
.get_corresponding_from_idx(idx)
.into_iter()
.map(|cm_idx| mimir::CommercialMode {
id: format!("commercial_mode:{}", navitia.commercial_modes[cm_idx].id),
name: navitia.commercial_modes[cm_idx].name.clone(),
})
.collect();
let physical_modes = navitia
.get_corresponding_from_idx(idx)
.into_iter()
.map(|pm_idx| mimir::PhysicalMode {
id: format!("physical_mode:{}", navitia.physical_modes[pm_idx].id),
name: navitia.physical_modes[pm_idx].name.clone(),
})
.collect();
let feed_publishers = navitia
.get_corresponding_from_idx(idx)
.into_iter()
.map(|contrib_idx| mimir::FeedPublisher {
id: navitia.contributors[contrib_idx].id.clone(),
name: navitia.contributors[contrib_idx].name.clone(),
license: navitia.contributors[contrib_idx]
.license
.clone()
.unwrap_or_else(|| "".into()),
url: navitia.contributors[contrib_idx]
.website
.clone()
.unwrap_or_else(|| "".into()),
})
.collect();
mimir::Stop {
id: format!("stop_area:{}", stop_area.id),
label: stop_area.name.clone(),
name: stop_area.name.clone(),
coord: mimir::Coord::new(stop_area.coord.lon, stop_area.coord.lat),
commercial_modes: commercial_modes,
physical_modes: physical_modes,
administrative_regions: vec![],
weight: 0.,
zip_codes: vec![],
coverages: vec![],
timezone: stop_area.timezone.clone().unwrap_or(format!("")),
codes: stop_area
.codes
.iter()
.map(|&(ref t, ref v)| mimir::Code {
name: t.clone(),
value: v.clone(),
})
.collect(),
properties: stop_area
.object_properties
.iter()
.map(|&(ref k, ref v)| mimir::Property {
key: k.clone(),
value: v.clone(),
})
.collect(),
feed_publishers: feed_publishers,
}
}
fn main() {
let _guard = mimir::logger_init();
info!("Launching ntfs2mimir...");
let args = Args::from_args();
if args.city_level.is_some() {
warn!("city-level option is deprecated, it now has no effect.");
}
if let Err(err) = run(args) {
for cause in err.causes() {
eprintln!("{}", cause);
}
std::process::exit(1);
}
}
fn run(args: Args) -> Result<(), navitia_model::Error> {
let navitia = navitia_model::ntfs::read(&args.input)?;
let nb_stop_points = navitia
.stop_areas
.iter()
.map(|(idx, sa)| {
let id = format!("stop_area:{}", sa.id);
let nb_stop_points = navitia
.get_corresponding_from_idx::<_, navitia::StopPoint>(idx)
.len();
(id, nb_stop_points as u32)
})
.collect();
let mut stops: Vec<mimir::Stop> = navitia
.stop_areas
.iter()
.map(|(idx, sa)| to_mimir(idx, sa, &navitia))
.collect();
set_weights(stops.iter_mut(), &nb_stop_points);
import_stops(stops, &args.connection_string, &args.dataset).context(format!(
"Error occurred when importing stops into {} on {}",
args.dataset, args.connection_string
))?;
Ok(())
}
add test
// Copyright © 2016, Canal TP and/or its affiliates. All rights reserved.
//
// This file is part of Navitia,
// the software to build cool stuff with public transport.
//
// Hope you'll enjoy and contribute to this project,
// powered by Canal TP (www.canaltp.fr).
// Help us simplify mobility and open public transport:
// a non ending quest to the responsive locomotion way of traveling!
//
// LICENCE: This program is free software; you can redistribute it
// and/or modify it under the terms of the GNU Affero General Public
// License as published by the Free Software Foundation, either
// version 3 of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public
// License along with this program. If not, see
// <http://www.gnu.org/licenses/>.
//
// Stay tuned using
// twitter @navitia
// IRC #navitia on freenode
// https://groups.google.com/d/forum/navitia
// www.navitia.io
extern crate failure;
extern crate mimir;
extern crate mimirsbrunn;
extern crate navitia_model;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate slog_scope;
#[macro_use]
extern crate structopt;
use std::path::PathBuf;
use structopt::StructOpt;
use mimirsbrunn::stops::*;
use navitia_model::objects as navitia;
use navitia_model::collection::Idx;
use failure::ResultExt;
#[derive(Debug, StructOpt)]
struct Args {
/// NTFS directory.
#[structopt(short = "i", long = "input", parse(from_os_str), default_value = ".")]
input: PathBuf,
/// Name of the dataset.
#[structopt(short = "d", long = "dataset", default_value = "fr")]
dataset: String,
/// Elasticsearch parameters.
#[structopt(short = "c", long = "connection-string",
default_value = "http://localhost:9200/munin")]
connection_string: String,
/// Deprecated option.
#[structopt(short = "C", long = "city-level")]
city_level: Option<String>,
}
fn to_mimir(
idx: Idx<navitia::StopArea>,
stop_area: &navitia::StopArea,
navitia: &navitia_model::PtObjects,
) -> mimir::Stop {
let commercial_modes = navitia
.get_corresponding_from_idx(idx)
.into_iter()
.map(|cm_idx| mimir::CommercialMode {
id: format!("commercial_mode:{}", navitia.commercial_modes[cm_idx].id),
name: navitia.commercial_modes[cm_idx].name.clone(),
})
.collect();
let physical_modes = navitia
.get_corresponding_from_idx(idx)
.into_iter()
.map(|pm_idx| mimir::PhysicalMode {
id: format!("physical_mode:{}", navitia.physical_modes[pm_idx].id),
name: navitia.physical_modes[pm_idx].name.clone(),
})
.collect();
let feed_publishers = navitia
.get_corresponding_from_idx(idx)
.into_iter()
.map(|contrib_idx| mimir::FeedPublisher {
id: navitia.contributors[contrib_idx].id.clone(),
name: navitia.contributors[contrib_idx].name.clone(),
license: navitia.contributors[contrib_idx]
.license
.clone()
.unwrap_or_else(|| "".into()),
url: navitia.contributors[contrib_idx]
.website
.clone()
.unwrap_or_else(|| "".into()),
})
.collect();
mimir::Stop {
id: format!("stop_area:{}", stop_area.id),
label: stop_area.name.clone(),
name: stop_area.name.clone(),
coord: mimir::Coord::new(stop_area.coord.lon, stop_area.coord.lat),
commercial_modes: commercial_modes,
physical_modes: physical_modes,
administrative_regions: vec![],
weight: 0.,
zip_codes: vec![],
coverages: vec![],
timezone: stop_area.timezone.clone().unwrap_or(format!("")),
codes: stop_area
.codes
.iter()
.map(|&(ref t, ref v)| mimir::Code {
name: t.clone(),
value: v.clone(),
})
.collect(),
properties: stop_area
.object_properties
.iter()
.map(|&(ref k, ref v)| mimir::Property {
key: k.clone(),
value: v.clone(),
})
.collect(),
feed_publishers: feed_publishers,
}
}
fn main() {
let _guard = mimir::logger_init();
info!("Launching ntfs2mimir...");
let args = Args::from_args();
if args.city_level.is_some() {
warn!("city-level option is deprecated, it now has no effect.");
}
if let Err(err) = run(args) {
for cause in err.causes() {
eprintln!("{}", cause);
}
std::process::exit(1);
}
}
fn run(args: Args) -> Result<(), navitia_model::Error> {
let navitia = navitia_model::ntfs::read(&args.input)?;
let nb_stop_points = navitia
.stop_areas
.iter()
.map(|(idx, sa)| {
let id = format!("stop_area:{}", sa.id);
let nb_stop_points = navitia
.get_corresponding_from_idx::<_, navitia::StopPoint>(idx)
.len();
(id, nb_stop_points as u32)
})
.collect();
let mut stops: Vec<mimir::Stop> = navitia
.stop_areas
.iter()
.map(|(idx, sa)| to_mimir(idx, sa, &navitia))
.collect();
set_weights(stops.iter_mut(), &nb_stop_points);
import_stops(stops, &args.connection_string, &args.dataset).context(format!(
"Error occurred when importing stops into {} on {}",
args.dataset, args.connection_string
))?;
Ok(())
}
#[test]
fn test_bad_connection_string() {
let args = Args {
input: PathBuf::from("./tests/fixtures/ntfs"),
connection_string: "http://localhost:4242".to_string(),
dataset: "bob".to_string(),
city_level: None,
};
if let Err(err) = run(args) {
let causes = err.causes()
.into_iter()
.map(|cause| format!("{}", cause))
.collect::<Vec<String>>();
assert!(causes == vec!["Error occurred when importing stops into bob on http://localhost:4242".to_string(),
"Error: Connection refused (os error 111) while creating template template_addr".to_string()]);
}
}
#[test]
fn test_bad_file() {
let args = Args {
input: PathBuf::from("./tests/fixtures/not_exist"),
connection_string: "http://localhost:9200".to_string(),
dataset: "bob".to_string(),
city_level: None,
};
if let Err(err) = run(args) {
let causes = err.causes()
.into_iter()
.map(|cause| format!("{}", cause))
.collect::<Vec<String>>();
assert!(
causes
== vec![
"Error reading \"./tests/fixtures/not_exist/contributors.txt\"",
"No such file or directory (os error 2)",
]
);
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io::Read;
use std::path::{Path as FilePath, PathBuf as FilePathBuf};
use syn;
use crate::bindgen::bitflags;
use crate::bindgen::cargo::{Cargo, PackageRef};
use crate::bindgen::config::{Config, ParseConfig};
use crate::bindgen::error::Error;
use crate::bindgen::ir::{
AnnotationSet, Cfg, Constant, Documentation, Enum, Function, GenericParams, ItemMap,
OpaqueItem, Path, Static, Struct, Type, Typedef, Union,
};
use crate::bindgen::utilities::{SynAbiHelpers, SynItemFnHelpers, SynItemHelpers};
const STD_CRATES: &[&str] = &[
"std",
"std_unicode",
"alloc",
"collections",
"core",
"proc_macro",
];
type ParseResult = Result<Parse, Error>;
/// Parses a single rust source file, not following `mod` or `extern crate`.
pub fn parse_src(src_file: &FilePath, config: &Config) -> ParseResult {
let mod_name = src_file.file_stem().unwrap().to_str().unwrap();
let mut config = config.clone();
config.parse = ParseConfig {
parse_deps: true,
..ParseConfig::default()
};
let mut context = Parser {
binding_crate_name: mod_name.to_owned(),
config: &config,
lib: None,
parsed_crates: HashSet::new(),
cache_src: HashMap::new(),
cache_expanded_crate: HashMap::new(),
cfg_stack: Vec::new(),
out: Parse::new(),
};
let pkg_ref = PackageRef {
name: mod_name.to_owned(),
version: None,
};
context.parse_mod(&pkg_ref, src_file)?;
Ok(context.out)
}
/// Recursively parses a rust library starting at the root crate's directory.
///
/// Inside a crate, `mod` and `extern crate` declarations are followed
/// and parsed. To find an external crate, the parser uses the `cargo metadata`
/// command to find the location of dependencies.
pub(crate) fn parse_lib(lib: Cargo, config: &Config) -> ParseResult {
let mut context = Parser {
binding_crate_name: lib.binding_crate_name().to_owned(),
config,
lib: Some(lib),
parsed_crates: HashSet::new(),
cache_src: HashMap::new(),
cache_expanded_crate: HashMap::new(),
cfg_stack: Vec::new(),
out: Parse::new(),
};
let binding_crate = context.lib.as_ref().unwrap().binding_crate_ref();
context.parse_crate(&binding_crate)?;
Ok(context.out)
}
#[derive(Debug, Clone)]
struct Parser<'a> {
binding_crate_name: String,
lib: Option<Cargo>,
config: &'a Config,
parsed_crates: HashSet<String>,
cache_src: HashMap<FilePathBuf, Vec<syn::Item>>,
cache_expanded_crate: HashMap<String, Vec<syn::Item>>,
cfg_stack: Vec<Cfg>,
out: Parse,
}
impl<'a> Parser<'a> {
fn should_parse_dependency(&self, pkg_name: &String) -> bool {
if self.parsed_crates.contains(pkg_name) {
return false;
}
if !self.config.parse.parse_deps {
return false;
}
// Skip any whitelist or blacklist for expand
if self.config.parse.expand.crates.contains(&pkg_name) {
return true;
}
// If we have a whitelist, check it
if let Some(ref include) = self.config.parse.include {
if !include.contains(&pkg_name) {
return false;
}
}
// Check the blacklist
!STD_CRATES.contains(&pkg_name.as_ref()) && !self.config.parse.exclude.contains(&pkg_name)
}
fn parse_crate(&mut self, pkg: &PackageRef) -> Result<(), Error> {
assert!(self.lib.is_some());
self.parsed_crates.insert(pkg.name.clone());
// Check if we should use cargo expand for this crate
if self.config.parse.expand.crates.contains(&pkg.name) {
self.parse_expand_crate(pkg)?;
} else {
// Parse the crate before the dependencies otherwise the same-named idents we
// want to generate bindings for would be replaced by the ones provided
// by the first dependency containing it.
let crate_src = self.lib.as_ref().unwrap().find_crate_src(pkg);
match crate_src {
Some(crate_src) => self.parse_mod(pkg, crate_src.as_path())?,
None => {
// This should be an error, but is common enough to just elicit a warning
warn!(
"Parsing crate `{}`: can't find lib.rs with `cargo metadata`.",
pkg.name
);
}
}
}
for (dep_pkg, cfg) in self.lib.as_ref().unwrap().dependencies(&pkg) {
if !self.should_parse_dependency(&dep_pkg.name) {
continue;
}
if let Some(ref cfg) = cfg {
self.cfg_stack.push(cfg.clone());
}
self.parse_crate(&dep_pkg)?;
if cfg.is_some() {
self.cfg_stack.pop();
}
}
Ok(())
}
fn parse_expand_crate(&mut self, pkg: &PackageRef) -> Result<(), Error> {
assert!(self.lib.is_some());
// If you want to expand the crate you run cbindgen on you might end up in an endless
// recursion if the cbindgen generation is triggered from build.rs. Hence don't run the
// expansion if the build was already triggered by cbindgen.
if std::env::var("_CBINDGEN_IS_RUNNING").is_ok() {
return Ok(());
}
let mod_parsed = {
if !self.cache_expanded_crate.contains_key(&pkg.name) {
let s = self
.lib
.as_ref()
.unwrap()
.expand_crate(
pkg,
self.config.parse.expand.all_features,
self.config.parse.expand.default_features,
&self.config.parse.expand.features,
)
.map_err(|x| Error::CargoExpand(pkg.name.clone(), x))?;
let i = syn::parse_file(&s).map_err(|x| Error::ParseSyntaxError {
crate_name: pkg.name.clone(),
src_path: "".to_owned(),
error: x,
})?;
self.cache_expanded_crate.insert(pkg.name.clone(), i.items);
}
self.cache_expanded_crate.get(&pkg.name).unwrap().clone()
};
self.process_expanded_mod(pkg, &mod_parsed)
}
fn process_expanded_mod(&mut self, pkg: &PackageRef, items: &[syn::Item]) -> Result<(), Error> {
self.out.load_syn_crate_mod(
&self.config,
&self.binding_crate_name,
&pkg.name,
Cfg::join(&self.cfg_stack).as_ref(),
items,
);
for item in items {
if item.has_test_attr() {
continue;
}
if let syn::Item::Mod(ref item) = *item {
let cfg = Cfg::load(&item.attrs);
if let Some(ref cfg) = cfg {
self.cfg_stack.push(cfg.clone());
}
if let Some((_, ref inline_items)) = item.content {
self.process_expanded_mod(pkg, inline_items)?;
} else {
unreachable!();
}
if cfg.is_some() {
self.cfg_stack.pop();
}
}
}
Ok(())
}
fn parse_mod(&mut self, pkg: &PackageRef, mod_path: &FilePath) -> Result<(), Error> {
self.parse_mod_depth(pkg, mod_path, 0)
}
fn parse_mod_depth(&mut self, pkg: &PackageRef, mod_path: &FilePath, depth: usize) -> Result<(), Error> {
let mod_parsed = {
let owned_mod_path = mod_path.to_path_buf();
if !self.cache_src.contains_key(&owned_mod_path) {
let mut s = String::new();
let mut f = File::open(mod_path).map_err(|_| Error::ParseCannotOpenFile {
crate_name: pkg.name.clone(),
src_path: mod_path.to_str().unwrap().to_owned(),
})?;
f.read_to_string(&mut s)
.map_err(|_| Error::ParseCannotOpenFile {
crate_name: pkg.name.clone(),
src_path: mod_path.to_str().unwrap().to_owned(),
})?;
let i = syn::parse_file(&s).map_err(|x| Error::ParseSyntaxError {
crate_name: pkg.name.clone(),
src_path: owned_mod_path.to_string_lossy().into(),
error: x,
})?;
self.cache_src.insert(owned_mod_path.clone(), i.items);
}
self.cache_src.get(&owned_mod_path).unwrap().clone()
};
// Compute module directory according to Rust 2018 rules
let mod_dir_2018;
let mod_dir = if depth == 0 {
mod_path.parent().unwrap()
} else {
mod_dir_2018 = mod_path.parent().unwrap().join(mod_path.file_stem().unwrap());
&mod_dir_2018
};
self.process_mod(pkg, &mod_dir, &mod_parsed, depth)
}
fn process_mod(
&mut self,
pkg: &PackageRef,
mod_dir: &FilePath,
items: &[syn::Item],
depth: usize,
) -> Result<(), Error> {
self.out.load_syn_crate_mod(
&self.config,
&self.binding_crate_name,
&pkg.name,
Cfg::join(&self.cfg_stack).as_ref(),
items,
);
for item in items {
if item.has_test_attr() {
continue;
}
if let syn::Item::Mod(ref item) = *item {
let next_mod_name = item.ident.to_string();
let cfg = Cfg::load(&item.attrs);
if let Some(ref cfg) = cfg {
self.cfg_stack.push(cfg.clone());
}
if let Some((_, ref inline_items)) = item.content {
self.process_mod(pkg, &mod_dir.join(&next_mod_name), inline_items, depth)?;
} else {
let next_mod_path1 = mod_dir.join(next_mod_name.clone() + ".rs");
let next_mod_path2 = mod_dir.join(next_mod_name.clone()).join("mod.rs");
if next_mod_path1.exists() {
self.parse_mod_depth(pkg, next_mod_path1.as_path(), depth + 1)?;
} else if next_mod_path2.exists() {
self.parse_mod_depth(pkg, next_mod_path2.as_path(), depth + 1)?;
} else {
// Last chance to find a module path
let mut path_attr_found = false;
for attr in &item.attrs {
match attr.parse_meta() {
Ok(syn::Meta::NameValue(syn::MetaNameValue {
path, lit, ..
})) => match lit {
syn::Lit::Str(ref path_lit) if path.is_ident("path") => {
path_attr_found = true;
self.parse_mod_depth(pkg, &mod_dir.join(path_lit.value()), depth + 1)?;
break;
}
_ => (),
},
_ => (),
}
}
// This should be an error, but it's common enough to
// just elicit a warning
if !path_attr_found {
warn!(
"Parsing crate `{}`: can't find mod {}`.",
pkg.name, next_mod_name
);
}
}
}
if cfg.is_some() {
self.cfg_stack.pop();
}
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct Parse {
pub constants: ItemMap<Constant>,
pub globals: ItemMap<Static>,
pub enums: ItemMap<Enum>,
pub structs: ItemMap<Struct>,
pub unions: ItemMap<Union>,
pub opaque_items: ItemMap<OpaqueItem>,
pub typedefs: ItemMap<Typedef>,
pub functions: Vec<Function>,
}
impl Parse {
pub fn new() -> Parse {
Parse {
constants: ItemMap::new(),
globals: ItemMap::new(),
enums: ItemMap::new(),
structs: ItemMap::new(),
unions: ItemMap::new(),
opaque_items: ItemMap::new(),
typedefs: ItemMap::new(),
functions: Vec::new(),
}
}
pub fn add_std_types(&mut self) {
let mut add_opaque = |path: &str, generic_params: Vec<&str>| {
let path = Path::new(path);
let generic_params: Vec<_> = generic_params.into_iter().map(Path::new).collect();
self.opaque_items.try_insert(OpaqueItem::new(
path,
GenericParams(generic_params),
None,
AnnotationSet::new(),
Documentation::none(),
))
};
add_opaque("String", vec![]);
add_opaque("Box", vec!["T"]);
add_opaque("RefCell", vec!["T"]);
add_opaque("Rc", vec!["T"]);
add_opaque("Arc", vec!["T"]);
add_opaque("Result", vec!["T", "E"]);
add_opaque("Option", vec!["T"]);
add_opaque("NonNull", vec!["T"]);
add_opaque("Vec", vec!["T"]);
add_opaque("HashMap", vec!["K", "V"]);
add_opaque("BTreeMap", vec!["K", "V"]);
add_opaque("HashSet", vec!["T"]);
add_opaque("BTreeSet", vec!["T"]);
add_opaque("LinkedList", vec!["T"]);
add_opaque("VecDeque", vec!["T"]);
}
pub fn extend_with(&mut self, other: &Parse) {
self.constants.extend_with(&other.constants);
self.globals.extend_with(&other.globals);
self.enums.extend_with(&other.enums);
self.structs.extend_with(&other.structs);
self.unions.extend_with(&other.unions);
self.opaque_items.extend_with(&other.opaque_items);
self.typedefs.extend_with(&other.typedefs);
self.functions.extend_from_slice(&other.functions);
}
pub fn load_syn_crate_mod(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
items: &[syn::Item],
) {
let mut impls_with_assoc_consts = Vec::new();
for item in items {
if item.has_test_attr() {
continue;
}
match item {
syn::Item::ForeignMod(ref item) => {
self.load_syn_foreign_mod(
config,
binding_crate_name,
crate_name,
mod_cfg,
item,
);
}
syn::Item::Fn(ref item) => {
self.load_syn_fn(config, binding_crate_name, crate_name, mod_cfg, item);
}
syn::Item::Const(ref item) => {
self.load_syn_const(config, binding_crate_name, crate_name, mod_cfg, item);
}
syn::Item::Static(ref item) => {
self.load_syn_static(config, binding_crate_name, crate_name, mod_cfg, item);
}
syn::Item::Struct(ref item) => {
self.load_syn_struct(config, crate_name, mod_cfg, item);
}
syn::Item::Union(ref item) => {
self.load_syn_union(config, crate_name, mod_cfg, item);
}
syn::Item::Enum(ref item) => {
self.load_syn_enum(config, crate_name, mod_cfg, item);
}
syn::Item::Type(ref item) => {
self.load_syn_ty(crate_name, mod_cfg, item);
}
syn::Item::Impl(ref item_impl) => {
let has_assoc_const = item_impl.items.iter().any(|item| match item {
syn::ImplItem::Const(_) => true,
_ => false,
});
if has_assoc_const {
impls_with_assoc_consts.push(item_impl);
}
if let syn::Type::Path(ref path) = *item_impl.self_ty {
if let Some(type_name) = path.path.get_ident() {
for method in item_impl.items.iter().filter_map(|item| match item {
syn::ImplItem::Method(method) => Some(method),
_ => None,
}) {
self.load_syn_method(
config,
binding_crate_name,
crate_name,
mod_cfg,
&Path::new(type_name.to_string()),
method,
)
}
}
}
}
syn::Item::Macro(ref item) => {
self.load_builtin_macro(config, crate_name, mod_cfg, item)
}
_ => {}
}
}
for item_impl in impls_with_assoc_consts {
self.load_syn_assoc_consts_from_impl(crate_name, mod_cfg, item_impl)
}
}
fn load_syn_assoc_consts_from_impl(
&mut self,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item_impl: &syn::ItemImpl,
) {
let associated_constants = item_impl.items.iter().filter_map(|item| match item {
syn::ImplItem::Const(ref associated_constant) => Some(associated_constant),
_ => None,
});
self.load_syn_assoc_consts(
crate_name,
mod_cfg,
&item_impl.self_ty,
associated_constants,
);
}
/// Enters a `extern "C" { }` declaration and loads function declarations.
fn load_syn_foreign_mod(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemForeignMod,
) {
if !item.abi.is_c() {
info!("Skip {} - (extern block must be extern C).", crate_name);
return;
}
for foreign_item in &item.items {
if let syn::ForeignItem::Fn(ref function) = *foreign_item {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (fn's outside of the binding crate are not used).",
crate_name, &function.sig.ident
);
return;
}
let path = Path::new(function.sig.ident.to_string());
match Function::load(path, None, &function.sig, true, &function.attrs, mod_cfg) {
Ok(func) => {
info!("Take {}::{}.", crate_name, &function.sig.ident);
self.functions.push(func);
}
Err(msg) => {
error!(
"Cannot use fn {}::{} ({}).",
crate_name, &function.sig.ident, msg
);
}
}
}
}
}
/// Loads a `fn` declaration inside an `impl` block, if the type is a simple identifier
fn load_syn_method(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
self_type: &Path,
item: &syn::ImplItemMethod,
) {
self.load_fn_declaration(
config,
binding_crate_name,
crate_name,
mod_cfg,
item,
Some(self_type),
&item.sig,
&item.vis,
&item.attrs,
)
}
/// Loads a `fn` declaration
fn load_syn_fn(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemFn,
) {
self.load_fn_declaration(
config,
binding_crate_name,
crate_name,
mod_cfg,
item,
None,
&item.sig,
&item.vis,
&item.attrs,
);
}
fn load_fn_declaration(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
named_symbol: &dyn SynItemFnHelpers,
self_type: Option<&Path>,
sig: &syn::Signature,
vis: &syn::Visibility,
attrs: &[syn::Attribute],
) {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (fn's outside of the binding crate are not used).",
crate_name, &sig.ident
);
return;
}
let loggable_item_name = || {
let mut items = vec![];
items.push(crate_name.to_owned());
if let Some(ref self_type) = self_type {
items.push(self_type.to_string());
}
items.push(sig.ident.to_string());
items.join("::")
};
let is_extern_c = sig.abi.is_omitted() || sig.abi.is_c();
let exported_name = named_symbol.exported_name();
if let syn::Visibility::Public(_) = vis {
match (is_extern_c, exported_name) {
(true, Some(exported_name)) => {
let path = Path::new(exported_name);
match Function::load(path, self_type, &sig, false, &attrs, mod_cfg) {
Ok(func) => {
info!("Take {}.", loggable_item_name());
self.functions.push(func);
}
Err(msg) => {
error!("Cannot use fn {} ({}).", loggable_item_name(), msg);
}
}
}
(true, None) => {
warn!(
"Skipping {} - (not `no_mangle`, and has no `export_name` attribute)",
loggable_item_name()
);
}
(false, Some(_exported_name)) => {
warn!("Skipping {} - (not `extern \"C\"`", loggable_item_name());
}
(false, None) => {}
}
} else {
match (is_extern_c, exported_name) {
(true, Some(..)) => {
warn!(
"Skipping {} - (not `pub` but is `extern \"C\"` and `no_mangle`)",
loggable_item_name()
);
}
(true, None) => {
warn!(
"Skipping {} - (not `pub` but is `extern \"C\"`)",
loggable_item_name()
);
}
(false, Some(..)) => {
warn!(
"Skipping {} - (not `pub` but is `no_mangle`)",
loggable_item_name()
);
}
(false, None) => {}
}
}
}
/// Loads associated `const` declarations
fn load_syn_assoc_consts<'a, I>(
&mut self,
crate_name: &str,
mod_cfg: Option<&Cfg>,
impl_ty: &syn::Type,
items: I,
) where
I: IntoIterator<Item = &'a syn::ImplItemConst>,
{
let ty = match Type::load(impl_ty) {
Ok(ty) => ty,
Err(e) => {
warn!("Skipping associated constants for {:?}: {:?}", impl_ty, e);
return;
}
};
let ty = match ty {
Some(ty) => ty,
None => return,
};
let impl_path = match ty.get_root_path() {
Some(p) => p,
None => {
warn!(
"Couldn't find path for {:?}, skipping associated constants",
ty
);
return;
}
};
for item in items.into_iter() {
if let syn::Visibility::Public(_) = item.vis {
} else {
warn!("Skip {}::{} - (not `pub`).", crate_name, &item.ident);
return;
}
let path = Path::new(item.ident.to_string());
match Constant::load(
path,
mod_cfg,
&item.ty,
&item.expr,
&item.attrs,
Some(impl_path.clone()),
) {
Ok(constant) => {
info!("Take {}::{}::{}.", crate_name, impl_path, &item.ident);
let mut any = false;
self.structs.for_items_mut(&impl_path, |item| {
any = true;
item.add_associated_constant(constant.clone());
});
// Handle associated constants to other item types that are
// not structs like enums or such as regular constants.
if !any && !self.constants.try_insert(constant) {
error!(
"Conflicting name for constant {}::{}::{}.",
crate_name, impl_path, &item.ident,
);
}
}
Err(msg) => {
warn!("Skip {}::{} - ({})", crate_name, &item.ident, msg);
}
}
}
}
/// Loads a `const` declaration
fn load_syn_const(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemConst,
) {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (const's outside of the binding crate are not used).",
crate_name, &item.ident
);
return;
}
if let syn::Visibility::Public(_) = item.vis {
} else {
warn!("Skip {}::{} - (not `pub`).", crate_name, &item.ident);
return;
}
let path = Path::new(item.ident.to_string());
match Constant::load(path, mod_cfg, &item.ty, &item.expr, &item.attrs, None) {
Ok(constant) => {
info!("Take {}::{}.", crate_name, &item.ident);
let full_name = constant.path.clone();
if !self.constants.try_insert(constant) {
error!("Conflicting name for constant {}", full_name);
}
}
Err(msg) => {
warn!("Skip {}::{} - ({})", crate_name, &item.ident, msg);
}
}
}
/// Loads a `static` declaration
fn load_syn_static(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemStatic,
) {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (static's outside of the binding crate are not used).",
crate_name, &item.ident
);
return;
}
if let syn::Visibility::Public(_) = item.vis {
if item.is_no_mangle() {
match Static::load(item, mod_cfg) {
Ok(constant) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.globals.try_insert(constant);
}
Err(msg) => {
warn!("Skip {}::{} - ({})", crate_name, &item.ident, msg);
}
}
}
}
// TODO
if let syn::Visibility::Public(_) = item.vis {
} else {
warn!("Skip {}::{} - (not `pub`).", crate_name, &item.ident);
}
if !item.is_no_mangle() {
warn!("Skip {}::{} - (not `no_mangle`).", crate_name, &item.ident);
}
}
/// Loads a `struct` declaration
fn load_syn_struct(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemStruct,
) {
match Struct::load(&config.layout, item, mod_cfg) {
Ok(st) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.structs.try_insert(st);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
/// Loads a `union` declaration
fn load_syn_union(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemUnion,
) {
match Union::load(&config.layout, item, mod_cfg) {
Ok(st) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.unions.try_insert(st);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
/// Loads a `enum` declaration
fn load_syn_enum(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemEnum,
) {
if item.generics.lifetimes().count() > 0 {
info!(
"Skip {}::{} - (has generics or lifetimes or where bounds).",
crate_name, &item.ident
);
return;
}
match Enum::load(item, mod_cfg, config) {
Ok(en) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.enums.try_insert(en);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
/// Loads a `type` declaration
fn load_syn_ty(&mut self, crate_name: &str, mod_cfg: Option<&Cfg>, item: &syn::ItemType) {
match Typedef::load(item, mod_cfg) {
Ok(st) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.typedefs.try_insert(st);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
fn load_builtin_macro(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemMacro,
) {
let name = match item.mac.path.segments.last() {
Some(ref n) => n.ident.to_string(),
None => return,
};
if name != "bitflags" || !config.macro_expansion.bitflags {
return;
}
let bitflags = match bitflags::parse(item.mac.tokens.clone()) {
Ok(b) => b,
Err(e) => {
warn!("Failed to parse bitflags invocation: {:?}", e);
return;
}
};
let (struct_, impl_) = bitflags.expand();
self.load_syn_struct(config, crate_name, mod_cfg, &struct_);
// We know that the expansion will only reference `struct_`, so it's
// fine to just do it here instead of deferring it like we do with the
// other calls to this function.
self.load_syn_assoc_consts_from_impl(crate_name, mod_cfg, &impl_);
}
}
Only keep parse_mod fn
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io::Read;
use std::path::{Path as FilePath, PathBuf as FilePathBuf};
use syn;
use crate::bindgen::bitflags;
use crate::bindgen::cargo::{Cargo, PackageRef};
use crate::bindgen::config::{Config, ParseConfig};
use crate::bindgen::error::Error;
use crate::bindgen::ir::{
AnnotationSet, Cfg, Constant, Documentation, Enum, Function, GenericParams, ItemMap,
OpaqueItem, Path, Static, Struct, Type, Typedef, Union,
};
use crate::bindgen::utilities::{SynAbiHelpers, SynItemFnHelpers, SynItemHelpers};
const STD_CRATES: &[&str] = &[
"std",
"std_unicode",
"alloc",
"collections",
"core",
"proc_macro",
];
type ParseResult = Result<Parse, Error>;
/// Parses a single rust source file, not following `mod` or `extern crate`.
pub fn parse_src(src_file: &FilePath, config: &Config) -> ParseResult {
let mod_name = src_file.file_stem().unwrap().to_str().unwrap();
let mut config = config.clone();
config.parse = ParseConfig {
parse_deps: true,
..ParseConfig::default()
};
let mut context = Parser {
binding_crate_name: mod_name.to_owned(),
config: &config,
lib: None,
parsed_crates: HashSet::new(),
cache_src: HashMap::new(),
cache_expanded_crate: HashMap::new(),
cfg_stack: Vec::new(),
out: Parse::new(),
};
let pkg_ref = PackageRef {
name: mod_name.to_owned(),
version: None,
};
context.parse_mod(&pkg_ref, src_file, 0)?;
Ok(context.out)
}
/// Recursively parses a rust library starting at the root crate's directory.
///
/// Inside a crate, `mod` and `extern crate` declarations are followed
/// and parsed. To find an external crate, the parser uses the `cargo metadata`
/// command to find the location of dependencies.
pub(crate) fn parse_lib(lib: Cargo, config: &Config) -> ParseResult {
let mut context = Parser {
binding_crate_name: lib.binding_crate_name().to_owned(),
config,
lib: Some(lib),
parsed_crates: HashSet::new(),
cache_src: HashMap::new(),
cache_expanded_crate: HashMap::new(),
cfg_stack: Vec::new(),
out: Parse::new(),
};
let binding_crate = context.lib.as_ref().unwrap().binding_crate_ref();
context.parse_crate(&binding_crate)?;
Ok(context.out)
}
#[derive(Debug, Clone)]
struct Parser<'a> {
binding_crate_name: String,
lib: Option<Cargo>,
config: &'a Config,
parsed_crates: HashSet<String>,
cache_src: HashMap<FilePathBuf, Vec<syn::Item>>,
cache_expanded_crate: HashMap<String, Vec<syn::Item>>,
cfg_stack: Vec<Cfg>,
out: Parse,
}
impl<'a> Parser<'a> {
fn should_parse_dependency(&self, pkg_name: &String) -> bool {
if self.parsed_crates.contains(pkg_name) {
return false;
}
if !self.config.parse.parse_deps {
return false;
}
// Skip any whitelist or blacklist for expand
if self.config.parse.expand.crates.contains(&pkg_name) {
return true;
}
// If we have a whitelist, check it
if let Some(ref include) = self.config.parse.include {
if !include.contains(&pkg_name) {
return false;
}
}
// Check the blacklist
!STD_CRATES.contains(&pkg_name.as_ref()) && !self.config.parse.exclude.contains(&pkg_name)
}
fn parse_crate(&mut self, pkg: &PackageRef) -> Result<(), Error> {
assert!(self.lib.is_some());
self.parsed_crates.insert(pkg.name.clone());
// Check if we should use cargo expand for this crate
if self.config.parse.expand.crates.contains(&pkg.name) {
self.parse_expand_crate(pkg)?;
} else {
// Parse the crate before the dependencies otherwise the same-named idents we
// want to generate bindings for would be replaced by the ones provided
// by the first dependency containing it.
let crate_src = self.lib.as_ref().unwrap().find_crate_src(pkg);
match crate_src {
Some(crate_src) => self.parse_mod(pkg, crate_src.as_path(), 0)?,
None => {
// This should be an error, but is common enough to just elicit a warning
warn!(
"Parsing crate `{}`: can't find lib.rs with `cargo metadata`.",
pkg.name
);
}
}
}
for (dep_pkg, cfg) in self.lib.as_ref().unwrap().dependencies(&pkg) {
if !self.should_parse_dependency(&dep_pkg.name) {
continue;
}
if let Some(ref cfg) = cfg {
self.cfg_stack.push(cfg.clone());
}
self.parse_crate(&dep_pkg)?;
if cfg.is_some() {
self.cfg_stack.pop();
}
}
Ok(())
}
fn parse_expand_crate(&mut self, pkg: &PackageRef) -> Result<(), Error> {
assert!(self.lib.is_some());
// If you want to expand the crate you run cbindgen on you might end up in an endless
// recursion if the cbindgen generation is triggered from build.rs. Hence don't run the
// expansion if the build was already triggered by cbindgen.
if std::env::var("_CBINDGEN_IS_RUNNING").is_ok() {
return Ok(());
}
let mod_parsed = {
if !self.cache_expanded_crate.contains_key(&pkg.name) {
let s = self
.lib
.as_ref()
.unwrap()
.expand_crate(
pkg,
self.config.parse.expand.all_features,
self.config.parse.expand.default_features,
&self.config.parse.expand.features,
)
.map_err(|x| Error::CargoExpand(pkg.name.clone(), x))?;
let i = syn::parse_file(&s).map_err(|x| Error::ParseSyntaxError {
crate_name: pkg.name.clone(),
src_path: "".to_owned(),
error: x,
})?;
self.cache_expanded_crate.insert(pkg.name.clone(), i.items);
}
self.cache_expanded_crate.get(&pkg.name).unwrap().clone()
};
self.process_expanded_mod(pkg, &mod_parsed)
}
fn process_expanded_mod(&mut self, pkg: &PackageRef, items: &[syn::Item]) -> Result<(), Error> {
self.out.load_syn_crate_mod(
&self.config,
&self.binding_crate_name,
&pkg.name,
Cfg::join(&self.cfg_stack).as_ref(),
items,
);
for item in items {
if item.has_test_attr() {
continue;
}
if let syn::Item::Mod(ref item) = *item {
let cfg = Cfg::load(&item.attrs);
if let Some(ref cfg) = cfg {
self.cfg_stack.push(cfg.clone());
}
if let Some((_, ref inline_items)) = item.content {
self.process_expanded_mod(pkg, inline_items)?;
} else {
unreachable!();
}
if cfg.is_some() {
self.cfg_stack.pop();
}
}
}
Ok(())
}
fn parse_mod(&mut self, pkg: &PackageRef, mod_path: &FilePath, depth: usize) -> Result<(), Error> {
let mod_parsed = {
let owned_mod_path = mod_path.to_path_buf();
if !self.cache_src.contains_key(&owned_mod_path) {
let mut s = String::new();
let mut f = File::open(mod_path).map_err(|_| Error::ParseCannotOpenFile {
crate_name: pkg.name.clone(),
src_path: mod_path.to_str().unwrap().to_owned(),
})?;
f.read_to_string(&mut s)
.map_err(|_| Error::ParseCannotOpenFile {
crate_name: pkg.name.clone(),
src_path: mod_path.to_str().unwrap().to_owned(),
})?;
let i = syn::parse_file(&s).map_err(|x| Error::ParseSyntaxError {
crate_name: pkg.name.clone(),
src_path: owned_mod_path.to_string_lossy().into(),
error: x,
})?;
self.cache_src.insert(owned_mod_path.clone(), i.items);
}
self.cache_src.get(&owned_mod_path).unwrap().clone()
};
// Compute module directory according to Rust 2018 rules
let mod_dir_2018;
let mod_dir = if depth == 0 {
mod_path.parent().unwrap()
} else {
mod_dir_2018 = mod_path.parent().unwrap().join(mod_path.file_stem().unwrap());
&mod_dir_2018
};
self.process_mod(pkg, &mod_dir, &mod_parsed, depth)
}
fn process_mod(
&mut self,
pkg: &PackageRef,
mod_dir: &FilePath,
items: &[syn::Item],
depth: usize,
) -> Result<(), Error> {
self.out.load_syn_crate_mod(
&self.config,
&self.binding_crate_name,
&pkg.name,
Cfg::join(&self.cfg_stack).as_ref(),
items,
);
for item in items {
if item.has_test_attr() {
continue;
}
if let syn::Item::Mod(ref item) = *item {
let next_mod_name = item.ident.to_string();
let cfg = Cfg::load(&item.attrs);
if let Some(ref cfg) = cfg {
self.cfg_stack.push(cfg.clone());
}
if let Some((_, ref inline_items)) = item.content {
self.process_mod(pkg, &mod_dir.join(&next_mod_name), inline_items, depth)?;
} else {
let next_mod_path1 = mod_dir.join(next_mod_name.clone() + ".rs");
let next_mod_path2 = mod_dir.join(next_mod_name.clone()).join("mod.rs");
if next_mod_path1.exists() {
self.parse_mod(pkg, next_mod_path1.as_path(), depth + 1)?;
} else if next_mod_path2.exists() {
self.parse_mod(pkg, next_mod_path2.as_path(), depth + 1)?;
} else {
// Last chance to find a module path
let mut path_attr_found = false;
for attr in &item.attrs {
match attr.parse_meta() {
Ok(syn::Meta::NameValue(syn::MetaNameValue {
path, lit, ..
})) => match lit {
syn::Lit::Str(ref path_lit) if path.is_ident("path") => {
path_attr_found = true;
self.parse_mod(pkg, &mod_dir.join(path_lit.value()), depth + 1)?;
break;
}
_ => (),
},
_ => (),
}
}
// This should be an error, but it's common enough to
// just elicit a warning
if !path_attr_found {
warn!(
"Parsing crate `{}`: can't find mod {}`.",
pkg.name, next_mod_name
);
}
}
}
if cfg.is_some() {
self.cfg_stack.pop();
}
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct Parse {
pub constants: ItemMap<Constant>,
pub globals: ItemMap<Static>,
pub enums: ItemMap<Enum>,
pub structs: ItemMap<Struct>,
pub unions: ItemMap<Union>,
pub opaque_items: ItemMap<OpaqueItem>,
pub typedefs: ItemMap<Typedef>,
pub functions: Vec<Function>,
}
impl Parse {
pub fn new() -> Parse {
Parse {
constants: ItemMap::new(),
globals: ItemMap::new(),
enums: ItemMap::new(),
structs: ItemMap::new(),
unions: ItemMap::new(),
opaque_items: ItemMap::new(),
typedefs: ItemMap::new(),
functions: Vec::new(),
}
}
pub fn add_std_types(&mut self) {
let mut add_opaque = |path: &str, generic_params: Vec<&str>| {
let path = Path::new(path);
let generic_params: Vec<_> = generic_params.into_iter().map(Path::new).collect();
self.opaque_items.try_insert(OpaqueItem::new(
path,
GenericParams(generic_params),
None,
AnnotationSet::new(),
Documentation::none(),
))
};
add_opaque("String", vec![]);
add_opaque("Box", vec!["T"]);
add_opaque("RefCell", vec!["T"]);
add_opaque("Rc", vec!["T"]);
add_opaque("Arc", vec!["T"]);
add_opaque("Result", vec!["T", "E"]);
add_opaque("Option", vec!["T"]);
add_opaque("NonNull", vec!["T"]);
add_opaque("Vec", vec!["T"]);
add_opaque("HashMap", vec!["K", "V"]);
add_opaque("BTreeMap", vec!["K", "V"]);
add_opaque("HashSet", vec!["T"]);
add_opaque("BTreeSet", vec!["T"]);
add_opaque("LinkedList", vec!["T"]);
add_opaque("VecDeque", vec!["T"]);
}
pub fn extend_with(&mut self, other: &Parse) {
self.constants.extend_with(&other.constants);
self.globals.extend_with(&other.globals);
self.enums.extend_with(&other.enums);
self.structs.extend_with(&other.structs);
self.unions.extend_with(&other.unions);
self.opaque_items.extend_with(&other.opaque_items);
self.typedefs.extend_with(&other.typedefs);
self.functions.extend_from_slice(&other.functions);
}
pub fn load_syn_crate_mod(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
items: &[syn::Item],
) {
let mut impls_with_assoc_consts = Vec::new();
for item in items {
if item.has_test_attr() {
continue;
}
match item {
syn::Item::ForeignMod(ref item) => {
self.load_syn_foreign_mod(
config,
binding_crate_name,
crate_name,
mod_cfg,
item,
);
}
syn::Item::Fn(ref item) => {
self.load_syn_fn(config, binding_crate_name, crate_name, mod_cfg, item);
}
syn::Item::Const(ref item) => {
self.load_syn_const(config, binding_crate_name, crate_name, mod_cfg, item);
}
syn::Item::Static(ref item) => {
self.load_syn_static(config, binding_crate_name, crate_name, mod_cfg, item);
}
syn::Item::Struct(ref item) => {
self.load_syn_struct(config, crate_name, mod_cfg, item);
}
syn::Item::Union(ref item) => {
self.load_syn_union(config, crate_name, mod_cfg, item);
}
syn::Item::Enum(ref item) => {
self.load_syn_enum(config, crate_name, mod_cfg, item);
}
syn::Item::Type(ref item) => {
self.load_syn_ty(crate_name, mod_cfg, item);
}
syn::Item::Impl(ref item_impl) => {
let has_assoc_const = item_impl.items.iter().any(|item| match item {
syn::ImplItem::Const(_) => true,
_ => false,
});
if has_assoc_const {
impls_with_assoc_consts.push(item_impl);
}
if let syn::Type::Path(ref path) = *item_impl.self_ty {
if let Some(type_name) = path.path.get_ident() {
for method in item_impl.items.iter().filter_map(|item| match item {
syn::ImplItem::Method(method) => Some(method),
_ => None,
}) {
self.load_syn_method(
config,
binding_crate_name,
crate_name,
mod_cfg,
&Path::new(type_name.to_string()),
method,
)
}
}
}
}
syn::Item::Macro(ref item) => {
self.load_builtin_macro(config, crate_name, mod_cfg, item)
}
_ => {}
}
}
for item_impl in impls_with_assoc_consts {
self.load_syn_assoc_consts_from_impl(crate_name, mod_cfg, item_impl)
}
}
fn load_syn_assoc_consts_from_impl(
&mut self,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item_impl: &syn::ItemImpl,
) {
let associated_constants = item_impl.items.iter().filter_map(|item| match item {
syn::ImplItem::Const(ref associated_constant) => Some(associated_constant),
_ => None,
});
self.load_syn_assoc_consts(
crate_name,
mod_cfg,
&item_impl.self_ty,
associated_constants,
);
}
/// Enters a `extern "C" { }` declaration and loads function declarations.
fn load_syn_foreign_mod(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemForeignMod,
) {
if !item.abi.is_c() {
info!("Skip {} - (extern block must be extern C).", crate_name);
return;
}
for foreign_item in &item.items {
if let syn::ForeignItem::Fn(ref function) = *foreign_item {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (fn's outside of the binding crate are not used).",
crate_name, &function.sig.ident
);
return;
}
let path = Path::new(function.sig.ident.to_string());
match Function::load(path, None, &function.sig, true, &function.attrs, mod_cfg) {
Ok(func) => {
info!("Take {}::{}.", crate_name, &function.sig.ident);
self.functions.push(func);
}
Err(msg) => {
error!(
"Cannot use fn {}::{} ({}).",
crate_name, &function.sig.ident, msg
);
}
}
}
}
}
/// Loads a `fn` declaration inside an `impl` block, if the type is a simple identifier
fn load_syn_method(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
self_type: &Path,
item: &syn::ImplItemMethod,
) {
self.load_fn_declaration(
config,
binding_crate_name,
crate_name,
mod_cfg,
item,
Some(self_type),
&item.sig,
&item.vis,
&item.attrs,
)
}
/// Loads a `fn` declaration
fn load_syn_fn(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemFn,
) {
self.load_fn_declaration(
config,
binding_crate_name,
crate_name,
mod_cfg,
item,
None,
&item.sig,
&item.vis,
&item.attrs,
);
}
fn load_fn_declaration(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
named_symbol: &dyn SynItemFnHelpers,
self_type: Option<&Path>,
sig: &syn::Signature,
vis: &syn::Visibility,
attrs: &[syn::Attribute],
) {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (fn's outside of the binding crate are not used).",
crate_name, &sig.ident
);
return;
}
let loggable_item_name = || {
let mut items = vec![];
items.push(crate_name.to_owned());
if let Some(ref self_type) = self_type {
items.push(self_type.to_string());
}
items.push(sig.ident.to_string());
items.join("::")
};
let is_extern_c = sig.abi.is_omitted() || sig.abi.is_c();
let exported_name = named_symbol.exported_name();
if let syn::Visibility::Public(_) = vis {
match (is_extern_c, exported_name) {
(true, Some(exported_name)) => {
let path = Path::new(exported_name);
match Function::load(path, self_type, &sig, false, &attrs, mod_cfg) {
Ok(func) => {
info!("Take {}.", loggable_item_name());
self.functions.push(func);
}
Err(msg) => {
error!("Cannot use fn {} ({}).", loggable_item_name(), msg);
}
}
}
(true, None) => {
warn!(
"Skipping {} - (not `no_mangle`, and has no `export_name` attribute)",
loggable_item_name()
);
}
(false, Some(_exported_name)) => {
warn!("Skipping {} - (not `extern \"C\"`", loggable_item_name());
}
(false, None) => {}
}
} else {
match (is_extern_c, exported_name) {
(true, Some(..)) => {
warn!(
"Skipping {} - (not `pub` but is `extern \"C\"` and `no_mangle`)",
loggable_item_name()
);
}
(true, None) => {
warn!(
"Skipping {} - (not `pub` but is `extern \"C\"`)",
loggable_item_name()
);
}
(false, Some(..)) => {
warn!(
"Skipping {} - (not `pub` but is `no_mangle`)",
loggable_item_name()
);
}
(false, None) => {}
}
}
}
/// Loads associated `const` declarations
fn load_syn_assoc_consts<'a, I>(
&mut self,
crate_name: &str,
mod_cfg: Option<&Cfg>,
impl_ty: &syn::Type,
items: I,
) where
I: IntoIterator<Item = &'a syn::ImplItemConst>,
{
let ty = match Type::load(impl_ty) {
Ok(ty) => ty,
Err(e) => {
warn!("Skipping associated constants for {:?}: {:?}", impl_ty, e);
return;
}
};
let ty = match ty {
Some(ty) => ty,
None => return,
};
let impl_path = match ty.get_root_path() {
Some(p) => p,
None => {
warn!(
"Couldn't find path for {:?}, skipping associated constants",
ty
);
return;
}
};
for item in items.into_iter() {
if let syn::Visibility::Public(_) = item.vis {
} else {
warn!("Skip {}::{} - (not `pub`).", crate_name, &item.ident);
return;
}
let path = Path::new(item.ident.to_string());
match Constant::load(
path,
mod_cfg,
&item.ty,
&item.expr,
&item.attrs,
Some(impl_path.clone()),
) {
Ok(constant) => {
info!("Take {}::{}::{}.", crate_name, impl_path, &item.ident);
let mut any = false;
self.structs.for_items_mut(&impl_path, |item| {
any = true;
item.add_associated_constant(constant.clone());
});
// Handle associated constants to other item types that are
// not structs like enums or such as regular constants.
if !any && !self.constants.try_insert(constant) {
error!(
"Conflicting name for constant {}::{}::{}.",
crate_name, impl_path, &item.ident,
);
}
}
Err(msg) => {
warn!("Skip {}::{} - ({})", crate_name, &item.ident, msg);
}
}
}
}
/// Loads a `const` declaration
fn load_syn_const(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemConst,
) {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (const's outside of the binding crate are not used).",
crate_name, &item.ident
);
return;
}
if let syn::Visibility::Public(_) = item.vis {
} else {
warn!("Skip {}::{} - (not `pub`).", crate_name, &item.ident);
return;
}
let path = Path::new(item.ident.to_string());
match Constant::load(path, mod_cfg, &item.ty, &item.expr, &item.attrs, None) {
Ok(constant) => {
info!("Take {}::{}.", crate_name, &item.ident);
let full_name = constant.path.clone();
if !self.constants.try_insert(constant) {
error!("Conflicting name for constant {}", full_name);
}
}
Err(msg) => {
warn!("Skip {}::{} - ({})", crate_name, &item.ident, msg);
}
}
}
/// Loads a `static` declaration
fn load_syn_static(
&mut self,
config: &Config,
binding_crate_name: &str,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemStatic,
) {
if !config
.parse
.should_generate_top_level_item(crate_name, binding_crate_name)
{
info!(
"Skip {}::{} - (static's outside of the binding crate are not used).",
crate_name, &item.ident
);
return;
}
if let syn::Visibility::Public(_) = item.vis {
if item.is_no_mangle() {
match Static::load(item, mod_cfg) {
Ok(constant) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.globals.try_insert(constant);
}
Err(msg) => {
warn!("Skip {}::{} - ({})", crate_name, &item.ident, msg);
}
}
}
}
// TODO
if let syn::Visibility::Public(_) = item.vis {
} else {
warn!("Skip {}::{} - (not `pub`).", crate_name, &item.ident);
}
if !item.is_no_mangle() {
warn!("Skip {}::{} - (not `no_mangle`).", crate_name, &item.ident);
}
}
/// Loads a `struct` declaration
fn load_syn_struct(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemStruct,
) {
match Struct::load(&config.layout, item, mod_cfg) {
Ok(st) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.structs.try_insert(st);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
/// Loads a `union` declaration
fn load_syn_union(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemUnion,
) {
match Union::load(&config.layout, item, mod_cfg) {
Ok(st) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.unions.try_insert(st);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
/// Loads a `enum` declaration
fn load_syn_enum(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemEnum,
) {
if item.generics.lifetimes().count() > 0 {
info!(
"Skip {}::{} - (has generics or lifetimes or where bounds).",
crate_name, &item.ident
);
return;
}
match Enum::load(item, mod_cfg, config) {
Ok(en) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.enums.try_insert(en);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
/// Loads a `type` declaration
fn load_syn_ty(&mut self, crate_name: &str, mod_cfg: Option<&Cfg>, item: &syn::ItemType) {
match Typedef::load(item, mod_cfg) {
Ok(st) => {
info!("Take {}::{}.", crate_name, &item.ident);
self.typedefs.try_insert(st);
}
Err(msg) => {
info!("Take {}::{} - opaque ({}).", crate_name, &item.ident, msg);
let path = Path::new(item.ident.to_string());
self.opaque_items.try_insert(
OpaqueItem::load(path, &item.generics, &item.attrs, mod_cfg).unwrap(),
);
}
}
}
fn load_builtin_macro(
&mut self,
config: &Config,
crate_name: &str,
mod_cfg: Option<&Cfg>,
item: &syn::ItemMacro,
) {
let name = match item.mac.path.segments.last() {
Some(ref n) => n.ident.to_string(),
None => return,
};
if name != "bitflags" || !config.macro_expansion.bitflags {
return;
}
let bitflags = match bitflags::parse(item.mac.tokens.clone()) {
Ok(b) => b,
Err(e) => {
warn!("Failed to parse bitflags invocation: {:?}", e);
return;
}
};
let (struct_, impl_) = bitflags.expand();
self.load_syn_struct(config, crate_name, mod_cfg, &struct_);
// We know that the expansion will only reference `struct_`, so it's
// fine to just do it here instead of deferring it like we do with the
// other calls to this function.
self.load_syn_assoc_consts_from_impl(crate_name, mod_cfg, &impl_);
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::str::FromStr;
use bindgen::ir::{Enum, Item};
/// The type of identifier to be renamed.
#[derive(Debug, Clone, Copy)]
pub enum IdentifierType<'a> {
StructMember,
EnumVariant(&'a Enum),
FunctionArg,
Enum,
}
impl<'a> IdentifierType<'a> {
fn to_str(&'a self) -> &'static str {
match *self {
IdentifierType::StructMember => "m",
IdentifierType::EnumVariant(..) => "",
IdentifierType::FunctionArg => "a",
IdentifierType::Enum => "",
}
}
}
/// A rule to apply to an identifier when generating bindings.
#[derive(Debug, Clone, Copy)]
pub enum RenameRule {
/// Do not apply any renaming. The default.
None,
/// Converts the identifier to PascalCase and adds a context dependent prefix
GeckoCase,
/// Converts the identifier to lower case.
LowerCase,
/// Converts the identifier to upper case.
UpperCase,
/// Converts the identifier to PascalCase.
PascalCase,
/// Converts the identifier to camelCase.
CamelCase,
/// Converts the identifier to snake_case.
SnakeCase,
/// Converts the identifier to SCREAMING_SNAKE_CASE.
ScreamingSnakeCase,
/// Converts the identifier to SCREAMING_SNAKE_CASE and prefixes enum variants
/// with the enum name.
QualifiedScreamingSnakeCase,
}
impl RenameRule {
/// Applies the rename rule to a string that is formatted in PascalCase.
pub fn apply_to_pascal_case(&self, text: &str, context: IdentifierType) -> String {
if text.len() == 0 {
return String::new();
}
match *self {
RenameRule::None => String::from(text),
RenameRule::GeckoCase => context.to_str().to_owned() + text,
RenameRule::LowerCase => text.to_lowercase(),
RenameRule::UpperCase => text.to_uppercase(),
RenameRule::PascalCase => text.to_owned(),
RenameRule::CamelCase => text[..1].to_lowercase() + &text[1..],
RenameRule::SnakeCase => {
// Do not add additional `_` if the string already contains `_` e.g. `__Field`
// Do not split consecutive capital letters
let mut result = String::new();
let mut add_separator = true;
let mut prev_uppercase = false;
for (i, c) in text.char_indices() {
if c == '_' {
add_separator = false;
prev_uppercase = false;
}
if c.is_uppercase() {
if i != 0 && add_separator && !prev_uppercase {
result.push_str("_");
} else {
add_separator = true;
}
prev_uppercase = true;
} else {
prev_uppercase = false;
}
for x in c.to_lowercase() {
result.push(x);
}
}
result
}
RenameRule::ScreamingSnakeCase => {
// Same as SnakeCase code above, but uses to_uppercase
let mut result = String::new();
for (i, c) in text.char_indices() {
if c.is_uppercase() && i != 0 {
result.push_str("_");
}
for x in c.to_uppercase() {
result.push(x);
}
}
result
}
RenameRule::QualifiedScreamingSnakeCase => {
let mut result = String::new();
if let IdentifierType::EnumVariant(e) = context {
if let &RenameRule::QualifiedScreamingSnakeCase = self {
result.push_str(
&RenameRule::ScreamingSnakeCase
.apply_to_pascal_case(e.path().name(), IdentifierType::Enum),
);
result.push_str("_");
}
}
result
.push_str(&RenameRule::ScreamingSnakeCase.apply_to_pascal_case(&text, context));
result
}
}
}
/// Applies the rename rule to a string that is formatted in snake_case.
pub fn apply_to_snake_case(&self, mut text: &str, context: IdentifierType) -> String {
if text.len() == 0 {
return String::new();
}
match *self {
RenameRule::None => String::from(text),
RenameRule::GeckoCase => {
if &text[..1] == "_" {
text = &text[1..];
}
context.to_str().to_owned()
+ &RenameRule::PascalCase.apply_to_snake_case(text, context)
}
RenameRule::LowerCase => text.to_lowercase(),
RenameRule::UpperCase => text.to_uppercase(),
RenameRule::PascalCase => {
let mut result = String::new();
let mut is_uppercase = true;
for c in text.chars() {
if c == '_' {
is_uppercase = true;
continue;
}
if is_uppercase {
for x in c.to_uppercase() {
result.push(x);
}
is_uppercase = false;
} else {
result.push(c);
}
}
result
}
RenameRule::CamelCase => {
// Same as PascalCase code above, but is_uppercase = false to start
let mut result = String::new();
let mut is_uppercase = false;
for c in text.chars() {
if c == '_' {
is_uppercase = true;
continue;
}
if is_uppercase {
for x in c.to_uppercase() {
result.push(x);
}
is_uppercase = false;
} else {
result.push(c);
}
}
result
}
RenameRule::SnakeCase => text.to_owned(),
RenameRule::ScreamingSnakeCase => text.to_owned().to_uppercase(),
RenameRule::QualifiedScreamingSnakeCase => {
let mut result = String::new();
if let IdentifierType::EnumVariant(e) = context {
if let &RenameRule::QualifiedScreamingSnakeCase = self {
result.push_str(
&RenameRule::ScreamingSnakeCase
.apply_to_snake_case(e.path().name(), IdentifierType::Enum),
);
result.push_str("_");
}
}
result
.push_str(&RenameRule::ScreamingSnakeCase.apply_to_snake_case(&text, context));
result
}
}
}
}
impl Default for RenameRule {
fn default() -> RenameRule {
RenameRule::None
}
}
impl FromStr for RenameRule {
type Err = String;
fn from_str(s: &str) -> Result<RenameRule, Self::Err> {
match s {
"none" => Ok(RenameRule::None),
"None" => Ok(RenameRule::None),
"mGeckoCase" => Ok(RenameRule::GeckoCase),
"GeckoCase" => Ok(RenameRule::GeckoCase),
"gecko_case" => Ok(RenameRule::GeckoCase),
"lowercase" => Ok(RenameRule::LowerCase),
"LowerCase" => Ok(RenameRule::LowerCase),
"lower_case" => Ok(RenameRule::LowerCase),
"UPPERCASE" => Ok(RenameRule::UpperCase),
"UpperCase" => Ok(RenameRule::UpperCase),
"upper_case" => Ok(RenameRule::UpperCase),
"PascalCase" => Ok(RenameRule::PascalCase),
"pascal_case" => Ok(RenameRule::PascalCase),
"camelCase" => Ok(RenameRule::CamelCase),
"CamelCase" => Ok(RenameRule::CamelCase),
"camel_case" => Ok(RenameRule::CamelCase),
"snake_case" => Ok(RenameRule::SnakeCase),
"SnakeCase" => Ok(RenameRule::SnakeCase),
"SCREAMING_SNAKE_CASE" => Ok(RenameRule::ScreamingSnakeCase),
"ScreamingSnakeCase" => Ok(RenameRule::ScreamingSnakeCase),
"screaming_snake_case" => Ok(RenameRule::ScreamingSnakeCase),
"QUALIFIED_SCREAMING_SNAKE_CASE" => Ok(RenameRule::QualifiedScreamingSnakeCase),
"QualifiedScreamingSnakeCase" => Ok(RenameRule::QualifiedScreamingSnakeCase),
"qualified_screaming_snake_case" => Ok(RenameRule::QualifiedScreamingSnakeCase),
_ => Err(format!("Unrecognized RenameRule: '{}'.", s)),
}
}
}
deserialize_enum_str!(RenameRule);
Make the ScreamingSnakeCase formatter behave as the SnakeCase one
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::str::FromStr;
use bindgen::ir::{Enum, Item};
/// The type of identifier to be renamed.
#[derive(Debug, Clone, Copy)]
pub enum IdentifierType<'a> {
StructMember,
EnumVariant(&'a Enum),
FunctionArg,
Enum,
}
impl<'a> IdentifierType<'a> {
fn to_str(&'a self) -> &'static str {
match *self {
IdentifierType::StructMember => "m",
IdentifierType::EnumVariant(..) => "",
IdentifierType::FunctionArg => "a",
IdentifierType::Enum => "",
}
}
}
/// A rule to apply to an identifier when generating bindings.
#[derive(Debug, Clone, Copy)]
pub enum RenameRule {
/// Do not apply any renaming. The default.
None,
/// Converts the identifier to PascalCase and adds a context dependent prefix
GeckoCase,
/// Converts the identifier to lower case.
LowerCase,
/// Converts the identifier to upper case.
UpperCase,
/// Converts the identifier to PascalCase.
PascalCase,
/// Converts the identifier to camelCase.
CamelCase,
/// Converts the identifier to snake_case.
SnakeCase,
/// Converts the identifier to SCREAMING_SNAKE_CASE.
ScreamingSnakeCase,
/// Converts the identifier to SCREAMING_SNAKE_CASE and prefixes enum variants
/// with the enum name.
QualifiedScreamingSnakeCase,
}
impl RenameRule {
/// Applies the rename rule to a string that is formatted in PascalCase.
pub fn apply_to_pascal_case(&self, text: &str, context: IdentifierType) -> String {
if text.len() == 0 {
return String::new();
}
match *self {
RenameRule::None => String::from(text),
RenameRule::GeckoCase => context.to_str().to_owned() + text,
RenameRule::LowerCase => text.to_lowercase(),
RenameRule::UpperCase => text.to_uppercase(),
RenameRule::PascalCase => text.to_owned(),
RenameRule::CamelCase => text[..1].to_lowercase() + &text[1..],
RenameRule::SnakeCase => {
// Do not add additional `_` if the string already contains `_` e.g. `__Field`
// Do not split consecutive capital letters
let mut result = String::new();
let mut add_separator = true;
let mut prev_uppercase = false;
for (i, c) in text.char_indices() {
if c == '_' {
add_separator = false;
prev_uppercase = false;
}
if c.is_uppercase() {
if i != 0 && add_separator && !prev_uppercase {
result.push_str("_");
} else {
add_separator = true;
}
prev_uppercase = true;
} else {
prev_uppercase = false;
}
for x in c.to_lowercase() {
result.push(x);
}
}
result
}
RenameRule::ScreamingSnakeCase => {
// Same as SnakeCase code above, but uses to_uppercase
let mut result = String::new();
let mut add_separator = true;
let mut prev_uppercase = false;
for (i, c) in text.char_indices() {
if c == '_' {
add_separator = false;
prev_uppercase = false;
}
if c.is_uppercase() {
if i != 0 && add_separator && !prev_uppercase {
result.push_str("_");
} else {
add_separator = true;
}
prev_uppercase = true;
} else {
prev_uppercase = false;
}
for x in c.to_uppercase() {
result.push(x);
}
}
result
}
RenameRule::QualifiedScreamingSnakeCase => {
let mut result = String::new();
if let IdentifierType::EnumVariant(e) = context {
if let &RenameRule::QualifiedScreamingSnakeCase = self {
result.push_str(
&RenameRule::ScreamingSnakeCase
.apply_to_pascal_case(e.path().name(), IdentifierType::Enum),
);
result.push_str("_");
}
}
result
.push_str(&RenameRule::ScreamingSnakeCase.apply_to_pascal_case(&text, context));
result
}
}
}
/// Applies the rename rule to a string that is formatted in snake_case.
pub fn apply_to_snake_case(&self, mut text: &str, context: IdentifierType) -> String {
if text.len() == 0 {
return String::new();
}
match *self {
RenameRule::None => String::from(text),
RenameRule::GeckoCase => {
if &text[..1] == "_" {
text = &text[1..];
}
context.to_str().to_owned()
+ &RenameRule::PascalCase.apply_to_snake_case(text, context)
}
RenameRule::LowerCase => text.to_lowercase(),
RenameRule::UpperCase => text.to_uppercase(),
RenameRule::PascalCase => {
let mut result = String::new();
let mut is_uppercase = true;
for c in text.chars() {
if c == '_' {
is_uppercase = true;
continue;
}
if is_uppercase {
for x in c.to_uppercase() {
result.push(x);
}
is_uppercase = false;
} else {
result.push(c);
}
}
result
}
RenameRule::CamelCase => {
// Same as PascalCase code above, but is_uppercase = false to start
let mut result = String::new();
let mut is_uppercase = false;
for c in text.chars() {
if c == '_' {
is_uppercase = true;
continue;
}
if is_uppercase {
for x in c.to_uppercase() {
result.push(x);
}
is_uppercase = false;
} else {
result.push(c);
}
}
result
}
RenameRule::SnakeCase => text.to_owned(),
RenameRule::ScreamingSnakeCase => text.to_owned().to_uppercase(),
RenameRule::QualifiedScreamingSnakeCase => {
let mut result = String::new();
if let IdentifierType::EnumVariant(e) = context {
if let &RenameRule::QualifiedScreamingSnakeCase = self {
result.push_str(
&RenameRule::ScreamingSnakeCase
.apply_to_snake_case(e.path().name(), IdentifierType::Enum),
);
result.push_str("_");
}
}
result
.push_str(&RenameRule::ScreamingSnakeCase.apply_to_snake_case(&text, context));
result
}
}
}
}
impl Default for RenameRule {
fn default() -> RenameRule {
RenameRule::None
}
}
impl FromStr for RenameRule {
type Err = String;
fn from_str(s: &str) -> Result<RenameRule, Self::Err> {
match s {
"none" => Ok(RenameRule::None),
"None" => Ok(RenameRule::None),
"mGeckoCase" => Ok(RenameRule::GeckoCase),
"GeckoCase" => Ok(RenameRule::GeckoCase),
"gecko_case" => Ok(RenameRule::GeckoCase),
"lowercase" => Ok(RenameRule::LowerCase),
"LowerCase" => Ok(RenameRule::LowerCase),
"lower_case" => Ok(RenameRule::LowerCase),
"UPPERCASE" => Ok(RenameRule::UpperCase),
"UpperCase" => Ok(RenameRule::UpperCase),
"upper_case" => Ok(RenameRule::UpperCase),
"PascalCase" => Ok(RenameRule::PascalCase),
"pascal_case" => Ok(RenameRule::PascalCase),
"camelCase" => Ok(RenameRule::CamelCase),
"CamelCase" => Ok(RenameRule::CamelCase),
"camel_case" => Ok(RenameRule::CamelCase),
"snake_case" => Ok(RenameRule::SnakeCase),
"SnakeCase" => Ok(RenameRule::SnakeCase),
"SCREAMING_SNAKE_CASE" => Ok(RenameRule::ScreamingSnakeCase),
"ScreamingSnakeCase" => Ok(RenameRule::ScreamingSnakeCase),
"screaming_snake_case" => Ok(RenameRule::ScreamingSnakeCase),
"QUALIFIED_SCREAMING_SNAKE_CASE" => Ok(RenameRule::QualifiedScreamingSnakeCase),
"QualifiedScreamingSnakeCase" => Ok(RenameRule::QualifiedScreamingSnakeCase),
"qualified_screaming_snake_case" => Ok(RenameRule::QualifiedScreamingSnakeCase),
_ => Err(format!("Unrecognized RenameRule: '{}'.", s)),
}
}
}
deserialize_enum_str!(RenameRule);
|
use bins::error::*;
use bins::Bins;
use bins::configuration::BinsConfiguration;
use bins::engines;
use bins::FlexibleRange;
use bins::network;
use clap::{App, Arg, ArgGroup};
use hyper::Url;
use std::path::Path;
use std::process;
pub struct Arguments {
pub all: bool,
pub auth: bool,
pub bin: Option<String>,
pub copy: bool,
pub files: Vec<String>,
pub force: bool,
pub input: Option<String>,
pub json: bool,
pub message: Option<String>,
pub name: Option<String>,
pub number_lines: bool,
pub output: Option<String>,
pub private: bool,
pub range: Option<FlexibleRange>,
pub raw_urls: bool,
pub server: Option<Url>,
pub urls: bool,
pub write: bool
}
include!(concat!(env!("OUT_DIR"), "/git_short_tag.rs"));
fn get_name() -> String {
option_env!("CARGO_PKG_NAME").unwrap_or("unknown_name").to_owned()
}
fn get_version() -> String {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown_version").to_owned();
let git_tag = git_short_tag();
format!("{}{}", version, git_tag)
}
cfg_if! {
if #[cfg(feature = "clipboard_support")] {
fn get_clipboard_args<'a, 'b>() -> Vec<Arg<'a, 'b>> {
vec![Arg::with_name("copy")
.short("C")
.long("copy")
.help("copies the output of the command to the clipboard without a newline")
.conflicts_with("no-copy"),
Arg::with_name("no-copy")
.short("c")
.long("no-copy")
.help("does not copy the output of the command to the clipboard")]
}
} else {
fn get_clipboard_args<'a, 'b>() -> Vec<Arg<'a, 'b>> {
Vec::new()
}
}
}
pub fn get_arguments(config: &BinsConfiguration) -> Result<Arguments> {
let mut arguments = Arguments {
all: false,
auth: config.get_defaults_auth(),
bin: config.get_defaults_bin().map(|s| s.to_owned()),
copy: config.get_defaults_copy(),
files: Vec::new(),
force: false,
input: None,
json: false,
message: None,
name: None,
number_lines: false,
output: None,
private: config.get_defaults_private(),
range: None,
raw_urls: false,
server: None,
urls: false,
write: false
};
let name = get_name();
let version = get_version();
let mut app = App::new(name.as_ref())
.version(version.as_ref())
.about("A tool for pasting from the terminal")
.arg(Arg::with_name("files")
.help("files to paste")
.takes_value(true)
.multiple(true))
.arg(Arg::with_name("message")
.short("m")
.long("message")
.help("message to paste")
.use_delimiter(false)
.takes_value(true)
.value_name("string"))
.arg(Arg::with_name("private")
.short("p")
.long("private")
.help("if the paste should be private")
.conflicts_with("public"))
.arg(Arg::with_name("public")
.short("P")
.long("public")
.help("if the paste should be public"))
.arg(Arg::with_name("auth")
.short("a")
.long("auth")
.help("if authentication (like api keys and tokens) should be used")
.conflicts_with("anon"))
.arg(Arg::with_name("anon")
.short("A")
.long("anon")
.help("if pastes should be posted without authentication"))
.arg(Arg::with_name("bin")
.short("b")
.long("bin")
.help("bin to use when uploading")
.takes_value(true)
.possible_values(&*engines::get_bin_names()))
.arg(Arg::with_name("service")
.short("s")
.long("service")
.help("legacy flag included for backwards compatibility. use --bin, as this will be removed in 2.0.0")
.takes_value(true)
.possible_values(&*engines::get_bin_names()))
.group(ArgGroup::with_name("bin_or_service")
.args(&["bin", "service"])
.required(arguments.bin.is_none()))
.arg(Arg::with_name("list-bins")
.short("l")
.long("list-bins")
.help("lists available bins and exits")
.conflicts_with_all(&["files", "message", "private", "public", "auth", "anon", "bin_or_service", "input"]))
.arg(Arg::with_name("list-services")
.long("list-services")
.help("legacy flag included for backwards compatibility. use --list-bins, as this will be removed in 2.0.0")
.conflicts_with_all(&["files", "message", "private", "public", "auth", "anon", "bin_or_service", "input"]))
.group(ArgGroup::with_name("list-bins_or_list-services").args(&["list-bins", "list-services"]))
.arg(Arg::with_name("input")
.short("i")
.long("input")
.help("displays raw contents of input paste")
.takes_value(true)
.value_name("url")
.conflicts_with_all(&["auth", "anon", "public", "private", "message", "bin_or_service"]))
.arg(Arg::with_name("range")
.short("n")
.long("range")
.help("chooses the files to get in input mode, starting from 0 (e.g. \"0\", \"0,1\", \"0-2\", \"2-0,3\")")
.takes_value(true)
.value_name("range")
.use_delimiter(false)
.requires("input")
.conflicts_with("files"))
.arg(Arg::with_name("all")
.short("L")
.long("all")
.help("gets all files in input mode")
.requires("input")
.conflicts_with_all(&["files", "range"]))
.arg(Arg::with_name("raw-urls")
.short("r")
.long("raw-urls")
.help("gets the raw urls instead of the content in input mode")
.requires("input"))
.arg(Arg::with_name("urls")
.short("u")
.long("urls")
.help("gets the urls instead of the content in input mode")
.requires("input")
.conflicts_with("raw-urls"))
.arg(Arg::with_name("server")
.short("S")
.long("server")
.help("specifies the server to use for the service (only support on hastebin)")
.takes_value(true)
.value_name("server_url"))
.arg(Arg::with_name("name")
.short("N")
.long("name")
.help("specifies a file name for --message or stdin")
.takes_value(true)
.value_name("name")
.conflicts_with("files"))
.arg(Arg::with_name("force")
.short("f")
.long("force")
.help("overrides warnings about file type or size when uploading")
.conflicts_with("input"))
.arg(Arg::with_name("number_lines")
.short("e")
.long("number-lines")
.help("display line numbers for each file in input mode")
.requires("input"))
.arg(Arg::with_name("write")
.short("w")
.long("write")
.help("writes pastes to files in input mode")
.requires("input"))
.arg(Arg::with_name("output")
.short("o")
.long("output")
.help("specifies where to save files in write mode")
.takes_value(true)
.value_name("dir")
.requires("write"))
.arg(Arg::with_name("json")
.short("j")
.long("json")
.help("output json a object instead of normal values")
.conflicts_with_all(&["write", "urls", "raw-urls"]));
for arg in get_clipboard_args() {
app = app.arg(arg);
}
let res = app.get_matches();
if res.is_present("list-bins_or_list-services") {
println!("{}", engines::get_bin_names().join("\n"));
process::exit(0);
}
if let Some(files) = res.values_of("files") {
arguments.files = files.map(|s| s.to_owned()).collect();
}
if let Some(message) = res.value_of("message") {
arguments.message = Some(message.to_owned());
}
if let Some(bin) = res.value_of("bin_or_service") {
arguments.bin = Some(bin.to_owned());
}
if let Some(input) = res.value_of("input") {
arguments.input = Some(input.to_owned());
}
if let Some(range) = res.value_of("range") {
arguments.range = Some(try!(FlexibleRange::parse(range)));
}
if let Some(server) = res.value_of("server") {
if let Some(ref bin) = arguments.bin {
if bin.to_lowercase() != "hastebin" {
return Err("--server may only be used if --service is hastebin".into());
}
}
arguments.server = Some(try!(network::parse_url(server).chain_err(|| "invalid --server")));
}
if let Some(name) = res.value_of("name") {
let name = try!(Bins::sanitize_path(Path::new(name)));
arguments.name = Some(name.to_owned());
}
if let Some(output) = res.value_of("output") {
arguments.output = Some(output.to_owned());
}
arguments.all = res.is_present("all");
arguments.force = res.is_present("force");
arguments.json = res.is_present("json");
arguments.number_lines = res.is_present("number_lines");
arguments.raw_urls = res.is_present("raw-urls");
arguments.urls = res.is_present("urls");
arguments.write = res.is_present("write");
if res.is_present("private") {
arguments.private = true;
} else if res.is_present("public") {
arguments.private = false;
}
if res.is_present("anon") {
arguments.auth = false;
} else if res.is_present("auth") {
arguments.auth = true;
}
if res.is_present("copy") {
arguments.copy = true;
} else if res.is_present("no-copy") {
arguments.copy = false;
}
Ok(arguments)
}
style(arguments): fix typo in --json
use bins::error::*;
use bins::Bins;
use bins::configuration::BinsConfiguration;
use bins::engines;
use bins::FlexibleRange;
use bins::network;
use clap::{App, Arg, ArgGroup};
use hyper::Url;
use std::path::Path;
use std::process;
pub struct Arguments {
pub all: bool,
pub auth: bool,
pub bin: Option<String>,
pub copy: bool,
pub files: Vec<String>,
pub force: bool,
pub input: Option<String>,
pub json: bool,
pub message: Option<String>,
pub name: Option<String>,
pub number_lines: bool,
pub output: Option<String>,
pub private: bool,
pub range: Option<FlexibleRange>,
pub raw_urls: bool,
pub server: Option<Url>,
pub urls: bool,
pub write: bool
}
include!(concat!(env!("OUT_DIR"), "/git_short_tag.rs"));
fn get_name() -> String {
option_env!("CARGO_PKG_NAME").unwrap_or("unknown_name").to_owned()
}
fn get_version() -> String {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown_version").to_owned();
let git_tag = git_short_tag();
format!("{}{}", version, git_tag)
}
cfg_if! {
if #[cfg(feature = "clipboard_support")] {
fn get_clipboard_args<'a, 'b>() -> Vec<Arg<'a, 'b>> {
vec![Arg::with_name("copy")
.short("C")
.long("copy")
.help("copies the output of the command to the clipboard without a newline")
.conflicts_with("no-copy"),
Arg::with_name("no-copy")
.short("c")
.long("no-copy")
.help("does not copy the output of the command to the clipboard")]
}
} else {
fn get_clipboard_args<'a, 'b>() -> Vec<Arg<'a, 'b>> {
Vec::new()
}
}
}
pub fn get_arguments(config: &BinsConfiguration) -> Result<Arguments> {
let mut arguments = Arguments {
all: false,
auth: config.get_defaults_auth(),
bin: config.get_defaults_bin().map(|s| s.to_owned()),
copy: config.get_defaults_copy(),
files: Vec::new(),
force: false,
input: None,
json: false,
message: None,
name: None,
number_lines: false,
output: None,
private: config.get_defaults_private(),
range: None,
raw_urls: false,
server: None,
urls: false,
write: false
};
let name = get_name();
let version = get_version();
let mut app = App::new(name.as_ref())
.version(version.as_ref())
.about("A tool for pasting from the terminal")
.arg(Arg::with_name("files")
.help("files to paste")
.takes_value(true)
.multiple(true))
.arg(Arg::with_name("message")
.short("m")
.long("message")
.help("message to paste")
.use_delimiter(false)
.takes_value(true)
.value_name("string"))
.arg(Arg::with_name("private")
.short("p")
.long("private")
.help("if the paste should be private")
.conflicts_with("public"))
.arg(Arg::with_name("public")
.short("P")
.long("public")
.help("if the paste should be public"))
.arg(Arg::with_name("auth")
.short("a")
.long("auth")
.help("if authentication (like api keys and tokens) should be used")
.conflicts_with("anon"))
.arg(Arg::with_name("anon")
.short("A")
.long("anon")
.help("if pastes should be posted without authentication"))
.arg(Arg::with_name("bin")
.short("b")
.long("bin")
.help("bin to use when uploading")
.takes_value(true)
.possible_values(&*engines::get_bin_names()))
.arg(Arg::with_name("service")
.short("s")
.long("service")
.help("legacy flag included for backwards compatibility. use --bin, as this will be removed in 2.0.0")
.takes_value(true)
.possible_values(&*engines::get_bin_names()))
.group(ArgGroup::with_name("bin_or_service")
.args(&["bin", "service"])
.required(arguments.bin.is_none()))
.arg(Arg::with_name("list-bins")
.short("l")
.long("list-bins")
.help("lists available bins and exits")
.conflicts_with_all(&["files", "message", "private", "public", "auth", "anon", "bin_or_service", "input"]))
.arg(Arg::with_name("list-services")
.long("list-services")
.help("legacy flag included for backwards compatibility. use --list-bins, as this will be removed in 2.0.0")
.conflicts_with_all(&["files", "message", "private", "public", "auth", "anon", "bin_or_service", "input"]))
.group(ArgGroup::with_name("list-bins_or_list-services").args(&["list-bins", "list-services"]))
.arg(Arg::with_name("input")
.short("i")
.long("input")
.help("displays raw contents of input paste")
.takes_value(true)
.value_name("url")
.conflicts_with_all(&["auth", "anon", "public", "private", "message", "bin_or_service"]))
.arg(Arg::with_name("range")
.short("n")
.long("range")
.help("chooses the files to get in input mode, starting from 0 (e.g. \"0\", \"0,1\", \"0-2\", \"2-0,3\")")
.takes_value(true)
.value_name("range")
.use_delimiter(false)
.requires("input")
.conflicts_with("files"))
.arg(Arg::with_name("all")
.short("L")
.long("all")
.help("gets all files in input mode")
.requires("input")
.conflicts_with_all(&["files", "range"]))
.arg(Arg::with_name("raw-urls")
.short("r")
.long("raw-urls")
.help("gets the raw urls instead of the content in input mode")
.requires("input"))
.arg(Arg::with_name("urls")
.short("u")
.long("urls")
.help("gets the urls instead of the content in input mode")
.requires("input")
.conflicts_with("raw-urls"))
.arg(Arg::with_name("server")
.short("S")
.long("server")
.help("specifies the server to use for the service (only support on hastebin)")
.takes_value(true)
.value_name("server_url"))
.arg(Arg::with_name("name")
.short("N")
.long("name")
.help("specifies a file name for --message or stdin")
.takes_value(true)
.value_name("name")
.conflicts_with("files"))
.arg(Arg::with_name("force")
.short("f")
.long("force")
.help("overrides warnings about file type or size when uploading")
.conflicts_with("input"))
.arg(Arg::with_name("number_lines")
.short("e")
.long("number-lines")
.help("display line numbers for each file in input mode")
.requires("input"))
.arg(Arg::with_name("write")
.short("w")
.long("write")
.help("writes pastes to files in input mode")
.requires("input"))
.arg(Arg::with_name("output")
.short("o")
.long("output")
.help("specifies where to save files in write mode")
.takes_value(true)
.value_name("dir")
.requires("write"))
.arg(Arg::with_name("json")
.short("j")
.long("json")
.help("output a json object instead of normal values")
.conflicts_with_all(&["write", "urls", "raw-urls"]));
for arg in get_clipboard_args() {
app = app.arg(arg);
}
let res = app.get_matches();
if res.is_present("list-bins_or_list-services") {
println!("{}", engines::get_bin_names().join("\n"));
process::exit(0);
}
if let Some(files) = res.values_of("files") {
arguments.files = files.map(|s| s.to_owned()).collect();
}
if let Some(message) = res.value_of("message") {
arguments.message = Some(message.to_owned());
}
if let Some(bin) = res.value_of("bin_or_service") {
arguments.bin = Some(bin.to_owned());
}
if let Some(input) = res.value_of("input") {
arguments.input = Some(input.to_owned());
}
if let Some(range) = res.value_of("range") {
arguments.range = Some(try!(FlexibleRange::parse(range)));
}
if let Some(server) = res.value_of("server") {
if let Some(ref bin) = arguments.bin {
if bin.to_lowercase() != "hastebin" {
return Err("--server may only be used if --service is hastebin".into());
}
}
arguments.server = Some(try!(network::parse_url(server).chain_err(|| "invalid --server")));
}
if let Some(name) = res.value_of("name") {
let name = try!(Bins::sanitize_path(Path::new(name)));
arguments.name = Some(name.to_owned());
}
if let Some(output) = res.value_of("output") {
arguments.output = Some(output.to_owned());
}
arguments.all = res.is_present("all");
arguments.force = res.is_present("force");
arguments.json = res.is_present("json");
arguments.number_lines = res.is_present("number_lines");
arguments.raw_urls = res.is_present("raw-urls");
arguments.urls = res.is_present("urls");
arguments.write = res.is_present("write");
if res.is_present("private") {
arguments.private = true;
} else if res.is_present("public") {
arguments.private = false;
}
if res.is_present("anon") {
arguments.auth = false;
} else if res.is_present("auth") {
arguments.auth = true;
}
if res.is_present("copy") {
arguments.copy = true;
} else if res.is_present("no-copy") {
arguments.copy = false;
}
Ok(arguments)
}
|
//! Implementation of the various distribution aspects of the compiler.
//!
//! This module is responsible for creating tarballs of the standard library,
//! compiler, and documentation. This ends up being what we distribute to
//! everyone as well.
//!
//! No tarball is actually created literally in this file, but rather we shell
//! out to `rust-installer` still. This may one day be replaced with bits and
//! pieces of `rustup.rs`!
use std::env;
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use build_helper::{output, t};
use crate::builder::{Builder, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::channel;
use crate::compile;
use crate::config::TargetSelection;
use crate::tool::{self, Tool};
use crate::util::{exe, is_dylib, timeit};
use crate::{Compiler, DependencyType, Mode, LLVM_TOOLS};
use time::{self, Timespec};
pub fn pkgname(builder: &Builder<'_>, component: &str) -> String {
if component == "cargo" {
format!("{}-{}", component, builder.cargo_package_vers())
} else if component == "rls" {
format!("{}-{}", component, builder.rls_package_vers())
} else if component == "rust-analyzer" {
format!("{}-{}", component, builder.rust_analyzer_package_vers())
} else if component == "clippy" {
format!("{}-{}", component, builder.clippy_package_vers())
} else if component == "miri" {
format!("{}-{}", component, builder.miri_package_vers())
} else if component == "rustfmt" {
format!("{}-{}", component, builder.rustfmt_package_vers())
} else if component == "llvm-tools" {
format!("{}-{}", component, builder.llvm_tools_package_vers())
} else {
assert!(component.starts_with("rust"));
format!("{}-{}", component, builder.rust_package_vers())
}
}
fn distdir(builder: &Builder<'_>) -> PathBuf {
builder.out.join("dist")
}
pub fn tmpdir(builder: &Builder<'_>) -> PathBuf {
builder.out.join("tmp/dist")
}
fn rust_installer(builder: &Builder<'_>) -> Command {
builder.tool_cmd(Tool::RustInstaller)
}
fn missing_tool(tool_name: &str, skip: bool) {
if skip {
println!("Unable to build {}, skipping dist", tool_name)
} else {
panic!("Unable to build {}", tool_name)
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Docs {
pub host: TargetSelection,
}
impl Step for Docs {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/doc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Docs { host: run.target });
}
/// Builds the `rust-docs` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let host = self.host;
let name = pkgname(builder, "rust-docs");
if !builder.config.docs {
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
}
builder.default_doc(None);
builder.info(&format!("Dist docs ({})", host));
let _time = timeit(builder);
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let dst = image.join("share/doc/rust/html");
t!(fs::create_dir_all(&dst));
let src = builder.doc_out(host);
builder.cp_r(&src, &dst);
builder.install(&builder.src.join("src/doc/robots.txt"), &dst, 0o644);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust-Documentation")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-documentation-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rust-docs")
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--bulk-dirs=share/doc/rust/html");
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple))
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustcDocs {
pub host: TargetSelection,
}
impl Step for RustcDocs {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcDocs { host: run.target });
}
/// Builds the `rustc-docs` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let host = self.host;
let name = pkgname(builder, "rustc-docs");
if !builder.config.compiler_docs {
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
}
builder.default_doc(None);
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let dst = image.join("share/doc/rust/html");
t!(fs::create_dir_all(&dst));
let src = builder.compiler_doc_out(host);
builder.cp_r(&src, &dst);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rustc-Documentation")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rustc-documentation-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rustc-docs")
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--bulk-dirs=share/doc/rust/html");
builder.info(&format!("Dist compiler docs ({})", host));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple))
}
}
fn find_files(files: &[&str], path: &[PathBuf]) -> Vec<PathBuf> {
let mut found = Vec::with_capacity(files.len());
for file in files {
let file_path = path.iter().map(|dir| dir.join(file)).find(|p| p.exists());
if let Some(file_path) = file_path {
found.push(file_path);
} else {
panic!("Could not find '{}' in {:?}", file, path);
}
}
found
}
fn make_win_dist(
rust_root: &Path,
plat_root: &Path,
target: TargetSelection,
builder: &Builder<'_>,
) {
//Ask gcc where it keeps its stuff
let mut cmd = Command::new(builder.cc(target));
cmd.arg("-print-search-dirs");
let gcc_out = output(&mut cmd);
let mut bin_path: Vec<_> = env::split_paths(&env::var_os("PATH").unwrap_or_default()).collect();
let mut lib_path = Vec::new();
for line in gcc_out.lines() {
let idx = line.find(':').unwrap();
let key = &line[..idx];
let trim_chars: &[_] = &[' ', '='];
let value = line[(idx + 1)..].trim_start_matches(trim_chars).split(';').map(PathBuf::from);
if key == "programs" {
bin_path.extend(value);
} else if key == "libraries" {
lib_path.extend(value);
}
}
let compiler = if target == "i686-pc-windows-gnu" {
"i686-w64-mingw32-gcc.exe"
} else if target == "x86_64-pc-windows-gnu" {
"x86_64-w64-mingw32-gcc.exe"
} else {
"gcc.exe"
};
let target_tools = [compiler, "ld.exe", "dlltool.exe", "libwinpthread-1.dll"];
let mut rustc_dlls = vec!["libwinpthread-1.dll"];
if target.starts_with("i686-") {
rustc_dlls.push("libgcc_s_dw2-1.dll");
} else {
rustc_dlls.push("libgcc_s_seh-1.dll");
}
let target_libs = [
//MinGW libs
"libgcc.a",
"libgcc_eh.a",
"libgcc_s.a",
"libm.a",
"libmingw32.a",
"libmingwex.a",
"libstdc++.a",
"libiconv.a",
"libmoldname.a",
"libpthread.a",
//Windows import libs
"libadvapi32.a",
"libbcrypt.a",
"libcomctl32.a",
"libcomdlg32.a",
"libcredui.a",
"libcrypt32.a",
"libdbghelp.a",
"libgdi32.a",
"libimagehlp.a",
"libiphlpapi.a",
"libkernel32.a",
"libmsimg32.a",
"libmsvcrt.a",
"libodbc32.a",
"libole32.a",
"liboleaut32.a",
"libopengl32.a",
"libpsapi.a",
"librpcrt4.a",
"libsecur32.a",
"libsetupapi.a",
"libshell32.a",
"libsynchronization.a",
"libuser32.a",
"libuserenv.a",
"libuuid.a",
"libwinhttp.a",
"libwinmm.a",
"libwinspool.a",
"libws2_32.a",
"libwsock32.a",
];
//Find mingw artifacts we want to bundle
let target_tools = find_files(&target_tools, &bin_path);
let rustc_dlls = find_files(&rustc_dlls, &bin_path);
let target_libs = find_files(&target_libs, &lib_path);
// Copy runtime dlls next to rustc.exe
let dist_bin_dir = rust_root.join("bin/");
fs::create_dir_all(&dist_bin_dir).expect("creating dist_bin_dir failed");
for src in rustc_dlls {
builder.copy_to_folder(&src, &dist_bin_dir);
}
//Copy platform tools to platform-specific bin directory
let target_bin_dir = plat_root
.join("lib")
.join("rustlib")
.join(target.triple)
.join("bin")
.join("self-contained");
fs::create_dir_all(&target_bin_dir).expect("creating target_bin_dir failed");
for src in target_tools {
builder.copy_to_folder(&src, &target_bin_dir);
}
// Warn windows-gnu users that the bundled GCC cannot compile C files
builder.create(
&target_bin_dir.join("GCC-WARNING.txt"),
"gcc.exe contained in this folder cannot be used for compiling C files - it is only\
used as a linker. In order to be able to compile projects containing C code use\
the GCC provided by MinGW or Cygwin.",
);
//Copy platform libs to platform-specific lib directory
let target_lib_dir = plat_root
.join("lib")
.join("rustlib")
.join(target.triple)
.join("lib")
.join("self-contained");
fs::create_dir_all(&target_lib_dir).expect("creating target_lib_dir failed");
for src in target_libs {
builder.copy_to_folder(&src, &target_lib_dir);
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Mingw {
pub host: TargetSelection,
}
impl Step for Mingw {
type Output = Option<PathBuf>;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Mingw { host: run.target });
}
/// Builds the `rust-mingw` installer component.
///
/// This contains all the bits and pieces to run the MinGW Windows targets
/// without any extra installed software (e.g., we bundle gcc, libraries, etc).
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let host = self.host;
if !host.contains("pc-windows-gnu") {
return None;
}
builder.info(&format!("Dist mingw ({})", host));
let _time = timeit(builder);
let name = pkgname(builder, "rust-mingw");
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
t!(fs::create_dir_all(&image));
// The first argument is a "temporary directory" which is just
// thrown away (this contains the runtime DLLs included in the rustc package
// above) and the second argument is where to place all the MinGW components
// (which is what we want).
make_win_dist(&tmpdir(builder), &image, host, &builder);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust-MinGW")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-MinGW-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rust-mingw")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.run(&mut cmd);
t!(fs::remove_dir_all(&image));
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustc {
pub compiler: Compiler,
}
impl Step for Rustc {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc")
}
fn make_run(run: RunConfig<'_>) {
run.builder
.ensure(Rustc { compiler: run.builder.compiler(run.builder.top_stage, run.target) });
}
/// Creates the `rustc` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let host = self.compiler.host;
let name = pkgname(builder, "rustc");
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let overlay = tmpdir(builder).join(format!("{}-{}-overlay", name, host.triple));
let _ = fs::remove_dir_all(&overlay);
// Prepare the rustc "image", what will actually end up getting installed
prepare_image(builder, compiler, &image);
// Prepare the overlay which is part of the tarball but won't actually be
// installed
let cp = |file: &str| {
builder.install(&builder.src.join(file), &overlay, 0o644);
};
cp("COPYRIGHT");
cp("LICENSE-APACHE");
cp("LICENSE-MIT");
cp("README.md");
// tiny morsel of metadata is used by rust-packaging
let version = builder.rust_version();
builder.create(&overlay.join("version"), &version);
if let Some(sha) = builder.rust_sha() {
builder.create(&overlay.join("git-commit-hash"), &sha);
}
// On MinGW we've got a few runtime DLL dependencies that we need to
// include. The first argument to this script is where to put these DLLs
// (the image we're creating), and the second argument is a junk directory
// to ignore all other MinGW stuff the script creates.
//
// On 32-bit MinGW we're always including a DLL which needs some extra
// licenses to distribute. On 64-bit MinGW we don't actually distribute
// anything requiring us to distribute a license, but it's likely the
// install will *also* include the rust-mingw package, which also needs
// licenses, so to be safe we just include it here in all MinGW packages.
if host.contains("pc-windows-gnu") {
make_win_dist(&image, &tmpdir(builder), host, builder);
let dst = image.join("share/doc");
t!(fs::create_dir_all(&dst));
builder.cp_r(&builder.src.join("src/etc/third-party"), &dst);
}
// Finally, wrap everything up in a nice tarball!
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rustc")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!("Dist rustc stage{} ({})", compiler.stage, host.triple));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
builder.remove_dir(&overlay);
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
fn prepare_image(builder: &Builder<'_>, compiler: Compiler, image: &Path) {
let host = compiler.host;
let src = builder.sysroot(compiler);
// Copy rustc/rustdoc binaries
t!(fs::create_dir_all(image.join("bin")));
builder.cp_r(&src.join("bin"), &image.join("bin"));
builder.install(&builder.rustdoc(compiler), &image.join("bin"), 0o755);
let libdir_relative = builder.libdir_relative(compiler);
// Copy runtime DLLs needed by the compiler
if libdir_relative.to_str() != Some("bin") {
let libdir = builder.rustc_libdir(compiler);
for entry in builder.read_dir(&libdir) {
let name = entry.file_name();
if let Some(s) = name.to_str() {
if is_dylib(s) {
// Don't use custom libdir here because ^lib/ will be resolved again
// with installer
builder.install(&entry.path(), &image.join("lib"), 0o644);
}
}
}
}
// Copy libLLVM.so to the lib dir as well, if needed. While not
// technically needed by rustc itself it's needed by lots of other
// components like the llvm tools and LLD. LLD is included below and
// tools/LLDB come later, so let's just throw it in the rustc
// component for now.
maybe_install_llvm_runtime(builder, host, image);
// Copy over lld if it's there
if builder.config.lld_enabled {
let exe = exe("rust-lld", compiler.host);
let src =
builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin").join(&exe);
// for the rationale about this rename check `compile::copy_lld_to_sysroot`
let dst = image.join("lib/rustlib").join(&*host.triple).join("bin").join(&exe);
t!(fs::create_dir_all(&dst.parent().unwrap()));
builder.copy(&src, &dst);
}
// Man pages
t!(fs::create_dir_all(image.join("share/man/man1")));
let man_src = builder.src.join("src/doc/man");
let man_dst = image.join("share/man/man1");
// Reproducible builds: If SOURCE_DATE_EPOCH is set, use that as the time.
let time = env::var("SOURCE_DATE_EPOCH")
.map(|timestamp| {
let epoch = timestamp
.parse()
.map_err(|err| format!("could not parse SOURCE_DATE_EPOCH: {}", err))
.unwrap();
time::at(Timespec::new(epoch, 0))
})
.unwrap_or_else(|_| time::now());
let month_year = t!(time::strftime("%B %Y", &time));
// don't use our `bootstrap::util::{copy, cp_r}`, because those try
// to hardlink, and we don't want to edit the source templates
for file_entry in builder.read_dir(&man_src) {
let page_src = file_entry.path();
let page_dst = man_dst.join(file_entry.file_name());
t!(fs::copy(&page_src, &page_dst));
// template in month/year and version number
builder.replace_in_file(
&page_dst,
&[
("<INSERT DATE HERE>", &month_year),
("<INSERT VERSION HERE>", channel::CFG_RELEASE_NUM),
],
);
}
// Debugger scripts
builder
.ensure(DebuggerScripts { sysroot: INTERNER.intern_path(image.to_owned()), host });
// Misc license info
let cp = |file: &str| {
builder.install(&builder.src.join(file), &image.join("share/doc/rust"), 0o644);
};
cp("COPYRIGHT");
cp("LICENSE-APACHE");
cp("LICENSE-MIT");
cp("README.md");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct DebuggerScripts {
pub sysroot: Interned<PathBuf>,
pub host: TargetSelection,
}
impl Step for DebuggerScripts {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/lldb_batchmode.py")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(DebuggerScripts {
sysroot: run.builder.sysroot(run.builder.compiler(run.builder.top_stage, run.host)),
host: run.target,
});
}
/// Copies debugger scripts for `target` into the `sysroot` specified.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
let sysroot = self.sysroot;
let dst = sysroot.join("lib/rustlib/etc");
t!(fs::create_dir_all(&dst));
let cp_debugger_script = |file: &str| {
builder.install(&builder.src.join("src/etc/").join(file), &dst, 0o644);
};
if host.contains("windows-msvc") {
// windbg debugger scripts
builder.install(
&builder.src.join("src/etc/rust-windbg.cmd"),
&sysroot.join("bin"),
0o755,
);
cp_debugger_script("natvis/intrinsic.natvis");
cp_debugger_script("natvis/liballoc.natvis");
cp_debugger_script("natvis/libcore.natvis");
cp_debugger_script("natvis/libstd.natvis");
} else {
cp_debugger_script("rust_types.py");
// gdb debugger scripts
builder.install(&builder.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), 0o755);
builder.install(&builder.src.join("src/etc/rust-gdbgui"), &sysroot.join("bin"), 0o755);
cp_debugger_script("gdb_load_rust_pretty_printers.py");
cp_debugger_script("gdb_lookup.py");
cp_debugger_script("gdb_providers.py");
// lldb debugger scripts
builder.install(&builder.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), 0o755);
cp_debugger_script("lldb_lookup.py");
cp_debugger_script("lldb_providers.py");
}
}
}
fn skip_host_target_lib(builder: &Builder<'_>, compiler: Compiler) -> bool {
// The only true set of target libraries came from the build triple, so
// let's reduce redundant work by only producing archives from that host.
if compiler.host != builder.config.build {
builder.info("\tskipping, not a build host");
true
} else {
false
}
}
/// Copy stamped files into an image's `target/lib` directory.
fn copy_target_libs(builder: &Builder<'_>, target: TargetSelection, image: &Path, stamp: &Path) {
let dst = image.join("lib/rustlib").join(target.triple).join("lib");
let self_contained_dst = dst.join("self-contained");
t!(fs::create_dir_all(&dst));
t!(fs::create_dir_all(&self_contained_dst));
for (path, dependency_type) in builder.read_stamp_file(stamp) {
if dependency_type == DependencyType::TargetSelfContained {
builder.copy(&path, &self_contained_dst.join(path.file_name().unwrap()));
} else if dependency_type == DependencyType::Target || builder.config.build == target {
builder.copy(&path, &dst.join(path.file_name().unwrap()));
}
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Std {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Std {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/libstd")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Std {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let name = pkgname(builder, "rust-std");
let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
if skip_host_target_lib(builder, compiler) {
return archive;
}
builder.ensure(compile::Std { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let _ = fs::remove_dir_all(&image);
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
let stamp = compile::libstd_stamp(builder, compiler_to_use, target);
copy_target_libs(builder, target, &image, &stamp);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=std-is-standing-at-the-ready.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rust-std-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder
.info(&format!("Dist std stage{} ({} -> {})", compiler.stage, &compiler.host, target));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
archive
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustcDev {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustcDev {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rustc-dev")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcDev {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let name = pkgname(builder, "rustc-dev");
let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
if skip_host_target_lib(builder, compiler) {
return archive;
}
builder.ensure(compile::Rustc { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let _ = fs::remove_dir_all(&image);
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
let stamp = compile::librustc_stamp(builder, compiler_to_use, target);
copy_target_libs(builder, target, &image, &stamp);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-develop.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rustc-dev-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!(
"Dist rustc-dev stage{} ({} -> {})",
compiler.stage, &compiler.host, target
));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
archive
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Analysis {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Analysis {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("analysis").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Analysis {
// Find the actual compiler (handling the full bootstrap option) which
// produced the save-analysis data because that data isn't copied
// through the sysroot uplifting.
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
/// Creates a tarball of save-analysis metadata, if available.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let name = pkgname(builder, "rust-analysis");
if compiler.host != builder.config.build {
return distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
}
builder.ensure(compile::Std { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let src = builder
.stage_out(compiler, Mode::Std)
.join(target.triple)
.join(builder.cargo_dir())
.join("deps");
let image_src = src.join("save-analysis");
let dst = image.join("lib/rustlib").join(target.triple).join("analysis");
t!(fs::create_dir_all(&dst));
builder.info(&format!("image_src: {:?}, dst: {:?}", image_src, dst));
builder.cp_r(&image_src, &dst);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=save-analysis-saved.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rust-analysis-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info("Dist analysis");
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
fn copy_src_dirs(builder: &Builder<'_>, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) {
fn filter_fn(exclude_dirs: &[&str], dir: &str, path: &Path) -> bool {
let spath = match path.to_str() {
Some(path) => path,
None => return false,
};
if spath.ends_with('~') || spath.ends_with(".pyc") {
return false;
}
const LLVM_PROJECTS: &[&str] = &[
"llvm-project/clang",
"llvm-project\\clang",
"llvm-project/libunwind",
"llvm-project\\libunwind",
"llvm-project/lld",
"llvm-project\\lld",
"llvm-project/lldb",
"llvm-project\\lldb",
"llvm-project/llvm",
"llvm-project\\llvm",
"llvm-project/compiler-rt",
"llvm-project\\compiler-rt",
];
if spath.contains("llvm-project")
&& !spath.ends_with("llvm-project")
&& !LLVM_PROJECTS.iter().any(|path| spath.contains(path))
{
return false;
}
const LLVM_TEST: &[&str] = &["llvm-project/llvm/test", "llvm-project\\llvm\\test"];
if LLVM_TEST.iter().any(|path| spath.contains(path))
&& (spath.ends_with(".ll") || spath.ends_with(".td") || spath.ends_with(".s"))
{
return false;
}
let full_path = Path::new(dir).join(path);
if exclude_dirs.iter().any(|excl| full_path == Path::new(excl)) {
return false;
}
let excludes = [
"CVS",
"RCS",
"SCCS",
".git",
".gitignore",
".gitmodules",
".gitattributes",
".cvsignore",
".svn",
".arch-ids",
"{arch}",
"=RELEASE-ID",
"=meta-update",
"=update",
".bzr",
".bzrignore",
".bzrtags",
".hg",
".hgignore",
".hgrags",
"_darcs",
];
!path.iter().map(|s| s.to_str().unwrap()).any(|s| excludes.contains(&s))
}
// Copy the directories using our filter
for item in src_dirs {
let dst = &dst_dir.join(item);
t!(fs::create_dir_all(dst));
builder
.cp_filtered(&builder.src.join(item), dst, &|path| filter_fn(exclude_dirs, item, path));
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Src;
impl Step for Src {
/// The output path of the src installer tarball
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Src);
}
/// Creates the `rust-src` installer component
fn run(self, builder: &Builder<'_>) -> PathBuf {
let name = pkgname(builder, "rust-src");
let image = tmpdir(builder).join(format!("{}-image", name));
let _ = fs::remove_dir_all(&image);
let dst = image.join("lib/rustlib/src");
let dst_src = dst.join("rust");
t!(fs::create_dir_all(&dst_src));
let src_files = ["Cargo.lock"];
// This is the reduced set of paths which will become the rust-src component
// (essentially libstd and all of its path dependencies)
let std_src_dirs = [
"src/build_helper",
"src/backtrace/src",
"src/liballoc",
"src/libcore",
"src/libpanic_abort",
"src/libpanic_unwind",
"src/libstd",
"src/libunwind",
"src/libtest",
"src/libterm",
"src/libprofiler_builtins",
"src/stdarch",
"src/libproc_macro",
"src/tools/rustc-std-workspace-core",
"src/tools/rustc-std-workspace-alloc",
"src/tools/rustc-std-workspace-std",
];
copy_src_dirs(builder, &std_src_dirs[..], &[], &dst_src);
for file in src_files.iter() {
builder.copy(&builder.src.join(file), &dst_src.join(file));
}
// Create source tarball in rust-installer format
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Awesome-Source.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}", name))
.arg("--component-name=rust-src")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info("Dist src");
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(&format!("{}.tar.gz", name))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct PlainSourceTarball;
impl Step for PlainSourceTarball {
/// Produces the location of the tarball generated
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src").default_condition(builder.config.rust_dist_src)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(PlainSourceTarball);
}
/// Creates the plain source tarball
fn run(self, builder: &Builder<'_>) -> PathBuf {
// Make sure that the root folder of tarball has the correct name
let plain_name = format!("{}-src", pkgname(builder, "rustc"));
let plain_dst_src = tmpdir(builder).join(&plain_name);
let _ = fs::remove_dir_all(&plain_dst_src);
t!(fs::create_dir_all(&plain_dst_src));
// This is the set of root paths which will become part of the source package
let src_files = [
"COPYRIGHT",
"LICENSE-APACHE",
"LICENSE-MIT",
"CONTRIBUTING.md",
"README.md",
"RELEASES.md",
"configure",
"x.py",
"config.toml.example",
"Cargo.toml",
"Cargo.lock",
];
let src_dirs = ["src"];
copy_src_dirs(builder, &src_dirs[..], &[], &plain_dst_src);
// Copy the files normally
for item in &src_files {
builder.copy(&builder.src.join(item), &plain_dst_src.join(item));
}
// Create the version file
builder.create(&plain_dst_src.join("version"), &builder.rust_version());
if let Some(sha) = builder.rust_sha() {
builder.create(&plain_dst_src.join("git-commit-hash"), &sha);
}
// If we're building from git sources, we need to vendor a complete distribution.
if builder.rust_info.is_git() {
// Vendor all Cargo dependencies
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("vendor")
.arg("--sync")
.arg(builder.src.join("./src/tools/rust-analyzer/Cargo.toml"))
.current_dir(&plain_dst_src);
builder.run(&mut cmd);
}
// Create plain source tarball
let plain_name = format!("rustc-{}-src", builder.rust_package_vers());
let mut tarball = distdir(builder).join(&format!("{}.tar.gz", plain_name));
tarball.set_extension(""); // strip .gz
tarball.set_extension(""); // strip .tar
if let Some(dir) = tarball.parent() {
builder.create_dir(&dir);
}
builder.info("running installer");
let mut cmd = rust_installer(builder);
cmd.arg("tarball")
.arg("--input")
.arg(&plain_name)
.arg("--output")
.arg(&tarball)
.arg("--work-dir=.")
.current_dir(tmpdir(builder));
builder.info("Create plain source tarball");
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(&format!("{}.tar.gz", plain_name))
}
}
// We have to run a few shell scripts, which choke quite a bit on both `\`
// characters and on `C:\` paths, so normalize both of them away.
pub fn sanitize_sh(path: &Path) -> String {
let path = path.to_str().unwrap().replace("\\", "/");
return change_drive(&path).unwrap_or(path);
fn change_drive(s: &str) -> Option<String> {
let mut ch = s.chars();
let drive = ch.next().unwrap_or('C');
if ch.next() != Some(':') {
return None;
}
if ch.next() != Some('/') {
return None;
}
Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..]))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Cargo {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Cargo {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("cargo")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let src = builder.src.join("src/tools/cargo");
let etc = src.join("src/etc");
let release_num = builder.release_num("cargo");
let name = pkgname(builder, "cargo");
let version = builder.cargo_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("cargo-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
builder.create_dir(&image.join("share/zsh/site-functions"));
builder.create_dir(&image.join("etc/bash_completion.d"));
let cargo = builder.ensure(tool::Cargo { compiler, target });
builder.install(&cargo, &image.join("bin"), 0o755);
for man in t!(etc.join("man").read_dir()) {
let man = t!(man);
builder.install(&man.path(), &image.join("share/man/man1"), 0o644);
}
builder.install(&etc.join("_cargo"), &image.join("share/zsh/site-functions"), 0o644);
builder.copy(&etc.join("cargo.bashcomp.sh"), &image.join("etc/bash_completion.d/cargo"));
let doc = image.join("share/doc/cargo");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-THIRD-PARTY"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("cargo-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.install(&src.join("LICENSE-THIRD-PARTY"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--component-name=cargo")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!("Dist cargo stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rls {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Rls {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rls")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rls {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/rls");
let release_num = builder.release_num("rls");
let name = pkgname(builder, "rls");
let version = builder.rls_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rls-image");
drop(fs::remove_dir_all(&image));
t!(fs::create_dir_all(&image));
// Prepare the image directory
// We expect RLS to build, because we've exited this step above if tool
// state for RLS isn't testing.
let rls = builder
.ensure(tool::Rls { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("RLS", builder.build.config.missing_tools);
None
})?;
builder.install(&rls, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rls");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rls-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=RLS-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rls-preview");
builder.info(&format!("Dist RLS stage{} ({})", compiler.stage, target.triple));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustAnalyzer {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustAnalyzer {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rust-analyzer")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustAnalyzer {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/rust-analyzer");
let release_num = builder.release_num("rust-analyzer/crates/rust-analyzer");
let name = pkgname(builder, "rust-analyzer");
let version = builder.rust_analyzer_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rust-analyzer-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect rust-analyer to always build, as it doesn't depend on rustc internals
// and doesn't have associated toolstate.
let rust_analyzer = builder
.ensure(tool::RustAnalyzer { compiler, target, extra_features: Vec::new() })
.expect("rust-analyzer always builds");
builder.install(&rust_analyzer, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rust-analyzer");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rust-analyzer-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rust-analyzer-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rust-analyzer-preview");
builder.info(&format!("Dist rust-analyzer stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Clippy {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Clippy {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("clippy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Clippy {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/clippy");
let release_num = builder.release_num("clippy");
let name = pkgname(builder, "clippy");
let version = builder.clippy_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("clippy-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect clippy to build, because we've exited this step above if tool
// state for clippy isn't testing.
let clippy = builder
.ensure(tool::Clippy { compiler, target, extra_features: Vec::new() })
.expect("clippy expected to build - essential tool");
let cargoclippy = builder
.ensure(tool::CargoClippy { compiler, target, extra_features: Vec::new() })
.expect("clippy expected to build - essential tool");
builder.install(&clippy, &image.join("bin"), 0o755);
builder.install(&cargoclippy, &image.join("bin"), 0o755);
let doc = image.join("share/doc/clippy");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("clippy-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=clippy-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=clippy-preview");
builder.info(&format!("Dist clippy stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Miri {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Miri {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("miri")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Miri {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/miri");
let release_num = builder.release_num("miri");
let name = pkgname(builder, "miri");
let version = builder.miri_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("miri-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect miri to build, because we've exited this step above if tool
// state for miri isn't testing.
let miri = builder
.ensure(tool::Miri { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("miri", builder.build.config.missing_tools);
None
})?;
let cargomiri = builder
.ensure(tool::CargoMiri { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("cargo miri", builder.build.config.missing_tools);
None
})?;
builder.install(&miri, &image.join("bin"), 0o755);
builder.install(&cargomiri, &image.join("bin"), 0o755);
let doc = image.join("share/doc/miri");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("miri-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=miri-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=miri-preview");
builder.info(&format!("Dist miri stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustfmt {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Rustfmt {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rustfmt")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustfmt {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
let src = builder.src.join("src/tools/rustfmt");
let release_num = builder.release_num("rustfmt");
let name = pkgname(builder, "rustfmt");
let version = builder.rustfmt_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rustfmt-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
let rustfmt = builder
.ensure(tool::Rustfmt { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("Rustfmt", builder.build.config.missing_tools);
None
})?;
let cargofmt = builder
.ensure(tool::Cargofmt { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("Cargofmt", builder.build.config.missing_tools);
None
})?;
builder.install(&rustfmt, &image.join("bin"), 0o755);
builder.install(&cargofmt, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rustfmt");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rustfmt-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rustfmt-ready-to-fmt.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rustfmt-preview");
builder.info(&format!("Dist Rustfmt stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Extended {
stage: u32,
host: TargetSelection,
target: TargetSelection,
}
impl Step for Extended {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("extended").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Extended {
stage: run.builder.top_stage,
host: run.builder.config.build,
target: run.target,
});
}
/// Creates a combined installer for the specified target in the provided stage.
fn run(self, builder: &Builder<'_>) {
let target = self.target;
let stage = self.stage;
let compiler = builder.compiler_for(self.stage, self.host, self.target);
builder.info(&format!("Dist extended stage{} ({})", compiler.stage, target));
let rustc_installer = builder.ensure(Rustc { compiler: builder.compiler(stage, target) });
let cargo_installer = builder.ensure(Cargo { compiler, target });
let rustfmt_installer = builder.ensure(Rustfmt { compiler, target });
let rls_installer = builder.ensure(Rls { compiler, target });
let rust_analyzer_installer = builder.ensure(RustAnalyzer { compiler, target });
let llvm_tools_installer = builder.ensure(LlvmTools { target });
let clippy_installer = builder.ensure(Clippy { compiler, target });
let miri_installer = builder.ensure(Miri { compiler, target });
let mingw_installer = builder.ensure(Mingw { host: target });
let analysis_installer = builder.ensure(Analysis { compiler, target });
let docs_installer = builder.ensure(Docs { host: target });
let std_installer =
builder.ensure(Std { compiler: builder.compiler(stage, target), target });
let tmp = tmpdir(builder);
let overlay = tmp.join("extended-overlay");
let etc = builder.src.join("src/etc/installer");
let work = tmp.join("work");
let _ = fs::remove_dir_all(&overlay);
builder.install(&builder.src.join("COPYRIGHT"), &overlay, 0o644);
builder.install(&builder.src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.install(&builder.src.join("LICENSE-MIT"), &overlay, 0o644);
let version = builder.rust_version();
builder.create(&overlay.join("version"), &version);
if let Some(sha) = builder.rust_sha() {
builder.create(&overlay.join("git-commit-hash"), &sha);
}
builder.install(&etc.join("README.md"), &overlay, 0o644);
// When rust-std package split from rustc, we needed to ensure that during
// upgrades rustc was upgraded before rust-std. To avoid rustc clobbering
// the std files during uninstall. To do this ensure that rustc comes
// before rust-std in the list below.
let mut tarballs = Vec::new();
tarballs.push(rustc_installer);
tarballs.push(cargo_installer);
tarballs.extend(rls_installer.clone());
tarballs.push(rust_analyzer_installer.clone());
tarballs.push(clippy_installer);
tarballs.extend(miri_installer.clone());
tarballs.extend(rustfmt_installer.clone());
tarballs.extend(llvm_tools_installer);
tarballs.push(analysis_installer);
tarballs.push(std_installer);
if builder.config.docs {
tarballs.push(docs_installer);
}
if target.contains("pc-windows-gnu") {
tarballs.push(mingw_installer.unwrap());
}
let mut input_tarballs = tarballs[0].as_os_str().to_owned();
for tarball in &tarballs[1..] {
input_tarballs.push(",");
input_tarballs.push(tarball);
}
builder.info("building combined installer");
let mut cmd = rust_installer(builder);
cmd.arg("combine")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--work-dir")
.arg(&work)
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", pkgname(builder, "rust"), target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--input-tarballs")
.arg(input_tarballs)
.arg("--non-installed-overlay")
.arg(&overlay);
let time = timeit(&builder);
builder.run(&mut cmd);
drop(time);
let mut license = String::new();
license += &builder.read(&builder.src.join("COPYRIGHT"));
license += &builder.read(&builder.src.join("LICENSE-APACHE"));
license += &builder.read(&builder.src.join("LICENSE-MIT"));
license.push_str("\n");
license.push_str("\n");
let rtf = r"{\rtf1\ansi\deff0{\fonttbl{\f0\fnil\fcharset0 Arial;}}\nowwrap\fs18";
let mut rtf = rtf.to_string();
rtf.push_str("\n");
for line in license.lines() {
rtf.push_str(line);
rtf.push_str("\\line ");
}
rtf.push_str("}");
fn filter(contents: &str, marker: &str) -> String {
let start = format!("tool-{}-start", marker);
let end = format!("tool-{}-end", marker);
let mut lines = Vec::new();
let mut omitted = false;
for line in contents.lines() {
if line.contains(&start) {
omitted = true;
} else if line.contains(&end) {
omitted = false;
} else if !omitted {
lines.push(line);
}
}
lines.join("\n")
}
let xform = |p: &Path| {
let mut contents = t!(fs::read_to_string(p));
if rls_installer.is_none() {
contents = filter(&contents, "rls");
}
contents = filter(&contents, "rust-analyzer");
if miri_installer.is_none() {
contents = filter(&contents, "miri");
}
if rustfmt_installer.is_none() {
contents = filter(&contents, "rustfmt");
}
let ret = tmp.join(p.file_name().unwrap());
t!(fs::write(&ret, &contents));
ret
};
if target.contains("apple-darwin") {
builder.info("building pkg installer");
let pkg = tmp.join("pkg");
let _ = fs::remove_dir_all(&pkg);
let pkgbuild = |component: &str| {
let mut cmd = Command::new("pkgbuild");
cmd.arg("--identifier")
.arg(format!("org.rust-lang.{}", component))
.arg("--scripts")
.arg(pkg.join(component))
.arg("--nopayload")
.arg(pkg.join(component).with_extension("pkg"));
builder.run(&mut cmd);
};
let prepare = |name: &str| {
builder.create_dir(&pkg.join(name));
builder.cp_r(
&work.join(&format!("{}-{}", pkgname(builder, name), target.triple)),
&pkg.join(name),
);
builder.install(&etc.join("pkg/postinstall"), &pkg.join(name), 0o755);
pkgbuild(name);
};
prepare("rustc");
prepare("cargo");
prepare("rust-docs");
prepare("rust-std");
prepare("rust-analysis");
prepare("clippy");
if rls_installer.is_some() {
prepare("rls");
}
prepare("rust-analyzer");
if miri_installer.is_some() {
prepare("miri");
}
// create an 'uninstall' package
builder.install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755);
pkgbuild("uninstall");
builder.create_dir(&pkg.join("res"));
builder.create(&pkg.join("res/LICENSE.txt"), &license);
builder.install(&etc.join("gfx/rust-logo.png"), &pkg.join("res"), 0o644);
let mut cmd = Command::new("productbuild");
cmd.arg("--distribution")
.arg(xform(&etc.join("pkg/Distribution.xml")))
.arg("--resources")
.arg(pkg.join("res"))
.arg(distdir(builder).join(format!(
"{}-{}.pkg",
pkgname(builder, "rust"),
target.triple
)))
.arg("--package-path")
.arg(&pkg);
let _time = timeit(builder);
builder.run(&mut cmd);
}
if target.contains("windows") {
let exe = tmp.join("exe");
let _ = fs::remove_dir_all(&exe);
let prepare = |name: &str| {
builder.create_dir(&exe.join(name));
let dir = if name == "rust-std" || name == "rust-analysis" {
format!("{}-{}", name, target.triple)
} else if name == "rls" {
"rls-preview".to_string()
} else if name == "rust-analyzer" {
"rust-analyzer-preview".to_string()
} else if name == "clippy" {
"clippy-preview".to_string()
} else if name == "miri" {
"miri-preview".to_string()
} else {
name.to_string()
};
builder.cp_r(
&work.join(&format!("{}-{}", pkgname(builder, name), target.triple)).join(dir),
&exe.join(name),
);
builder.remove(&exe.join(name).join("manifest.in"));
};
prepare("rustc");
prepare("cargo");
prepare("rust-analysis");
prepare("rust-docs");
prepare("rust-std");
prepare("clippy");
if rls_installer.is_some() {
prepare("rls");
}
prepare("rust-analyzer");
if miri_installer.is_some() {
prepare("miri");
}
if target.contains("windows-gnu") {
prepare("rust-mingw");
}
builder.install(&etc.join("gfx/rust-logo.ico"), &exe, 0o644);
// Generate msi installer
let wix = PathBuf::from(env::var_os("WIX").unwrap());
let heat = wix.join("bin/heat.exe");
let candle = wix.join("bin/candle.exe");
let light = wix.join("bin/light.exe");
let heat_flags = ["-nologo", "-gg", "-sfrag", "-srd", "-sreg"];
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rustc")
.args(&heat_flags)
.arg("-cg")
.arg("RustcGroup")
.arg("-dr")
.arg("Rustc")
.arg("-var")
.arg("var.RustcDir")
.arg("-out")
.arg(exe.join("RustcGroup.wxs")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-docs")
.args(&heat_flags)
.arg("-cg")
.arg("DocsGroup")
.arg("-dr")
.arg("Docs")
.arg("-var")
.arg("var.DocsDir")
.arg("-out")
.arg(exe.join("DocsGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/squash-components.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("cargo")
.args(&heat_flags)
.arg("-cg")
.arg("CargoGroup")
.arg("-dr")
.arg("Cargo")
.arg("-var")
.arg("var.CargoDir")
.arg("-out")
.arg(exe.join("CargoGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-std")
.args(&heat_flags)
.arg("-cg")
.arg("StdGroup")
.arg("-dr")
.arg("Std")
.arg("-var")
.arg("var.StdDir")
.arg("-out")
.arg(exe.join("StdGroup.wxs")),
);
if rls_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rls")
.args(&heat_flags)
.arg("-cg")
.arg("RlsGroup")
.arg("-dr")
.arg("Rls")
.arg("-var")
.arg("var.RlsDir")
.arg("-out")
.arg(exe.join("RlsGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-analyzer")
.args(&heat_flags)
.arg("-cg")
.arg("RustAnalyzerGroup")
.arg("-dr")
.arg("RustAnalyzer")
.arg("-var")
.arg("var.RustAnalyzerDir")
.arg("-out")
.arg(exe.join("RustAnalyzerGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("clippy")
.args(&heat_flags)
.arg("-cg")
.arg("ClippyGroup")
.arg("-dr")
.arg("Clippy")
.arg("-var")
.arg("var.ClippyDir")
.arg("-out")
.arg(exe.join("ClippyGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
if miri_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("miri")
.args(&heat_flags)
.arg("-cg")
.arg("MiriGroup")
.arg("-dr")
.arg("Miri")
.arg("-var")
.arg("var.MiriDir")
.arg("-out")
.arg(exe.join("MiriGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-analysis")
.args(&heat_flags)
.arg("-cg")
.arg("AnalysisGroup")
.arg("-dr")
.arg("Analysis")
.arg("-var")
.arg("var.AnalysisDir")
.arg("-out")
.arg(exe.join("AnalysisGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
if target.contains("windows-gnu") {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-mingw")
.args(&heat_flags)
.arg("-cg")
.arg("GccGroup")
.arg("-dr")
.arg("Gcc")
.arg("-var")
.arg("var.GccDir")
.arg("-out")
.arg(exe.join("GccGroup.wxs")),
);
}
let candle = |input: &Path| {
let output = exe.join(input.file_stem().unwrap()).with_extension("wixobj");
let arch = if target.contains("x86_64") { "x64" } else { "x86" };
let mut cmd = Command::new(&candle);
cmd.current_dir(&exe)
.arg("-nologo")
.arg("-dRustcDir=rustc")
.arg("-dDocsDir=rust-docs")
.arg("-dCargoDir=cargo")
.arg("-dStdDir=rust-std")
.arg("-dAnalysisDir=rust-analysis")
.arg("-dClippyDir=clippy")
.arg("-arch")
.arg(&arch)
.arg("-out")
.arg(&output)
.arg(&input);
add_env(builder, &mut cmd, target);
if rls_installer.is_some() {
cmd.arg("-dRlsDir=rls");
}
cmd.arg("-dRustAnalyzerDir=rust-analyzer");
if miri_installer.is_some() {
cmd.arg("-dMiriDir=miri");
}
if target.contains("windows-gnu") {
cmd.arg("-dGccDir=rust-mingw");
}
builder.run(&mut cmd);
};
candle(&xform(&etc.join("msi/rust.wxs")));
candle(&etc.join("msi/ui.wxs"));
candle(&etc.join("msi/rustwelcomedlg.wxs"));
candle("RustcGroup.wxs".as_ref());
candle("DocsGroup.wxs".as_ref());
candle("CargoGroup.wxs".as_ref());
candle("StdGroup.wxs".as_ref());
candle("ClippyGroup.wxs".as_ref());
if rls_installer.is_some() {
candle("RlsGroup.wxs".as_ref());
}
candle("RustAnalyzerGroup.wxs".as_ref());
if miri_installer.is_some() {
candle("MiriGroup.wxs".as_ref());
}
candle("AnalysisGroup.wxs".as_ref());
if target.contains("windows-gnu") {
candle("GccGroup.wxs".as_ref());
}
builder.create(&exe.join("LICENSE.rtf"), &rtf);
builder.install(&etc.join("gfx/banner.bmp"), &exe, 0o644);
builder.install(&etc.join("gfx/dialogbg.bmp"), &exe, 0o644);
builder.info(&format!("building `msi` installer with {:?}", light));
let filename = format!("{}-{}.msi", pkgname(builder, "rust"), target.triple);
let mut cmd = Command::new(&light);
cmd.arg("-nologo")
.arg("-ext")
.arg("WixUIExtension")
.arg("-ext")
.arg("WixUtilExtension")
.arg("-out")
.arg(exe.join(&filename))
.arg("rust.wixobj")
.arg("ui.wixobj")
.arg("rustwelcomedlg.wixobj")
.arg("RustcGroup.wixobj")
.arg("DocsGroup.wixobj")
.arg("CargoGroup.wixobj")
.arg("StdGroup.wixobj")
.arg("AnalysisGroup.wixobj")
.arg("ClippyGroup.wixobj")
.current_dir(&exe);
if rls_installer.is_some() {
cmd.arg("RlsGroup.wixobj");
}
cmd.arg("RustAnalyzerGroup.wixobj");
if miri_installer.is_some() {
cmd.arg("MiriGroup.wixobj");
}
if target.contains("windows-gnu") {
cmd.arg("GccGroup.wixobj");
}
// ICE57 wrongly complains about the shortcuts
cmd.arg("-sice:ICE57");
let _time = timeit(builder);
builder.run(&mut cmd);
if !builder.config.dry_run {
t!(fs::rename(exe.join(&filename), distdir(builder).join(&filename)));
}
}
}
}
fn add_env(builder: &Builder<'_>, cmd: &mut Command, target: TargetSelection) {
let mut parts = channel::CFG_RELEASE_NUM.split('.');
cmd.env("CFG_RELEASE_INFO", builder.rust_version())
.env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM)
.env("CFG_RELEASE", builder.rust_release())
.env("CFG_VER_MAJOR", parts.next().unwrap())
.env("CFG_VER_MINOR", parts.next().unwrap())
.env("CFG_VER_PATCH", parts.next().unwrap())
.env("CFG_VER_BUILD", "0") // just needed to build
.env("CFG_PACKAGE_VERS", builder.rust_package_vers())
.env("CFG_PACKAGE_NAME", pkgname(builder, "rust"))
.env("CFG_BUILD", target.triple)
.env("CFG_CHANNEL", &builder.config.channel);
if target.contains("windows-gnu") {
cmd.env("CFG_MINGW", "1").env("CFG_ABI", "GNU");
} else {
cmd.env("CFG_MINGW", "0").env("CFG_ABI", "MSVC");
}
if target.contains("x86_64") {
cmd.env("CFG_PLATFORM", "x64");
} else {
cmd.env("CFG_PLATFORM", "x86");
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct HashSign;
impl Step for HashSign {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("hash-and-sign")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(HashSign);
}
fn run(self, builder: &Builder<'_>) {
// This gets called by `promote-release`
// (https://github.com/rust-lang/rust-central-station/tree/master/promote-release).
let mut cmd = builder.tool_cmd(Tool::BuildManifest);
if builder.config.dry_run {
return;
}
let sign = builder.config.dist_sign_folder.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.sign-folder` in `config.toml`\n\n")
});
let addr = builder.config.dist_upload_addr.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.upload-addr` in `config.toml`\n\n")
});
let pass = if env::var("BUILD_MANIFEST_DISABLE_SIGNING").is_err() {
let file = builder.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.gpg-password-file` in `config.toml`\n\n")
});
t!(fs::read_to_string(&file))
} else {
String::new()
};
let today = output(Command::new("date").arg("+%Y-%m-%d"));
cmd.arg(sign);
cmd.arg(distdir(builder));
cmd.arg(today.trim());
cmd.arg(builder.rust_package_vers());
cmd.arg(addr);
cmd.arg(builder.package_vers(&builder.release_num("cargo")));
cmd.arg(builder.package_vers(&builder.release_num("rls")));
cmd.arg(builder.package_vers(&builder.release_num("rust-analyzer/crates/rust-analyzer")));
cmd.arg(builder.package_vers(&builder.release_num("clippy")));
cmd.arg(builder.package_vers(&builder.release_num("miri")));
cmd.arg(builder.package_vers(&builder.release_num("rustfmt")));
cmd.arg(builder.llvm_tools_package_vers());
builder.create_dir(&distdir(builder));
let mut child = t!(cmd.stdin(Stdio::piped()).spawn());
t!(child.stdin.take().unwrap().write_all(pass.as_bytes()));
let status = t!(child.wait());
assert!(status.success());
}
}
/// Maybe add libLLVM.so to the given destination lib-dir. It will only have
/// been built if LLVM tools are linked dynamically.
///
/// Note: This function does not yet support Windows, but we also don't support
/// linking LLVM tools dynamically on Windows yet.
fn maybe_install_llvm(builder: &Builder<'_>, target: TargetSelection, dst_libdir: &Path) {
let src_libdir = builder.llvm_out(target).join("lib");
if target.contains("apple-darwin") {
let llvm_dylib_path = src_libdir.join("libLLVM.dylib");
if llvm_dylib_path.exists() {
builder.install(&llvm_dylib_path, dst_libdir, 0o644);
}
return;
}
// Usually libLLVM.so is a symlink to something like libLLVM-6.0.so.
// Since tools link to the latter rather than the former, we have to
// follow the symlink to find out what to distribute.
let llvm_dylib_path = src_libdir.join("libLLVM.so");
if llvm_dylib_path.exists() {
let llvm_dylib_path = llvm_dylib_path.canonicalize().unwrap_or_else(|e| {
panic!("dist: Error calling canonicalize path `{}`: {}", llvm_dylib_path.display(), e);
});
builder.install(&llvm_dylib_path, dst_libdir, 0o644);
}
}
/// Maybe add libLLVM.so to the target lib-dir for linking.
pub fn maybe_install_llvm_target(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir = sysroot.join("lib/rustlib").join(&*target.triple).join("lib");
maybe_install_llvm(builder, target, &dst_libdir);
}
/// Maybe add libLLVM.so to the runtime lib-dir for rustc itself.
pub fn maybe_install_llvm_runtime(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir =
sysroot.join(builder.sysroot_libdir_relative(Compiler { stage: 1, host: target }));
maybe_install_llvm(builder, target, &dst_libdir);
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct LlvmTools {
pub target: TargetSelection,
}
impl Step for LlvmTools {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("llvm-tools")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(LlvmTools { target: run.target });
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let target = self.target;
assert!(builder.config.extended);
/* run only if llvm-config isn't used */
if let Some(config) = builder.config.target_config.get(&target) {
if let Some(ref _s) = config.llvm_config {
builder.info(&format!("Skipping LlvmTools ({}): external LLVM", target));
return None;
}
}
builder.info(&format!("Dist LlvmTools ({})", target));
let _time = timeit(builder);
let src = builder.src.join("src/llvm-project/llvm");
let name = pkgname(builder, "llvm-tools");
let tmp = tmpdir(builder);
let image = tmp.join("llvm-tools-image");
drop(fs::remove_dir_all(&image));
// Prepare the image directory
let src_bindir = builder.llvm_out(target).join("bin");
let dst_bindir = image.join("lib/rustlib").join(&*target.triple).join("bin");
t!(fs::create_dir_all(&dst_bindir));
for tool in LLVM_TOOLS {
let exe = src_bindir.join(exe(tool, target));
builder.install(&exe, &dst_bindir, 0o755);
}
// Copy libLLVM.so to the target lib dir as well, so the RPATH like
// `$ORIGIN/../lib` can find it. It may also be used as a dependency
// of `rustc-dev` to support the inherited `-lLLVM` when using the
// compiler libraries.
maybe_install_llvm_target(builder, target, &image);
// Prepare the overlay
let overlay = tmp.join("llvm-tools-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.txt"), &overlay, 0o644);
builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644);
builder.create(&overlay.join("version"), &builder.llvm_tools_vers());
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=llvm-tools-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=llvm-tools-preview");
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
Revert "include backtrace folder in rust-src component"
This reverts commit d7a36d8964c927863faef5d3b42da08f37e5896c.
//! Implementation of the various distribution aspects of the compiler.
//!
//! This module is responsible for creating tarballs of the standard library,
//! compiler, and documentation. This ends up being what we distribute to
//! everyone as well.
//!
//! No tarball is actually created literally in this file, but rather we shell
//! out to `rust-installer` still. This may one day be replaced with bits and
//! pieces of `rustup.rs`!
use std::env;
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use build_helper::{output, t};
use crate::builder::{Builder, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::channel;
use crate::compile;
use crate::config::TargetSelection;
use crate::tool::{self, Tool};
use crate::util::{exe, is_dylib, timeit};
use crate::{Compiler, DependencyType, Mode, LLVM_TOOLS};
use time::{self, Timespec};
pub fn pkgname(builder: &Builder<'_>, component: &str) -> String {
if component == "cargo" {
format!("{}-{}", component, builder.cargo_package_vers())
} else if component == "rls" {
format!("{}-{}", component, builder.rls_package_vers())
} else if component == "rust-analyzer" {
format!("{}-{}", component, builder.rust_analyzer_package_vers())
} else if component == "clippy" {
format!("{}-{}", component, builder.clippy_package_vers())
} else if component == "miri" {
format!("{}-{}", component, builder.miri_package_vers())
} else if component == "rustfmt" {
format!("{}-{}", component, builder.rustfmt_package_vers())
} else if component == "llvm-tools" {
format!("{}-{}", component, builder.llvm_tools_package_vers())
} else {
assert!(component.starts_with("rust"));
format!("{}-{}", component, builder.rust_package_vers())
}
}
fn distdir(builder: &Builder<'_>) -> PathBuf {
builder.out.join("dist")
}
pub fn tmpdir(builder: &Builder<'_>) -> PathBuf {
builder.out.join("tmp/dist")
}
fn rust_installer(builder: &Builder<'_>) -> Command {
builder.tool_cmd(Tool::RustInstaller)
}
fn missing_tool(tool_name: &str, skip: bool) {
if skip {
println!("Unable to build {}, skipping dist", tool_name)
} else {
panic!("Unable to build {}", tool_name)
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Docs {
pub host: TargetSelection,
}
impl Step for Docs {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/doc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Docs { host: run.target });
}
/// Builds the `rust-docs` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let host = self.host;
let name = pkgname(builder, "rust-docs");
if !builder.config.docs {
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
}
builder.default_doc(None);
builder.info(&format!("Dist docs ({})", host));
let _time = timeit(builder);
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let dst = image.join("share/doc/rust/html");
t!(fs::create_dir_all(&dst));
let src = builder.doc_out(host);
builder.cp_r(&src, &dst);
builder.install(&builder.src.join("src/doc/robots.txt"), &dst, 0o644);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust-Documentation")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-documentation-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rust-docs")
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--bulk-dirs=share/doc/rust/html");
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple))
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustcDocs {
pub host: TargetSelection,
}
impl Step for RustcDocs {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcDocs { host: run.target });
}
/// Builds the `rustc-docs` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let host = self.host;
let name = pkgname(builder, "rustc-docs");
if !builder.config.compiler_docs {
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
}
builder.default_doc(None);
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let dst = image.join("share/doc/rust/html");
t!(fs::create_dir_all(&dst));
let src = builder.compiler_doc_out(host);
builder.cp_r(&src, &dst);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rustc-Documentation")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rustc-documentation-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rustc-docs")
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--bulk-dirs=share/doc/rust/html");
builder.info(&format!("Dist compiler docs ({})", host));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple))
}
}
fn find_files(files: &[&str], path: &[PathBuf]) -> Vec<PathBuf> {
let mut found = Vec::with_capacity(files.len());
for file in files {
let file_path = path.iter().map(|dir| dir.join(file)).find(|p| p.exists());
if let Some(file_path) = file_path {
found.push(file_path);
} else {
panic!("Could not find '{}' in {:?}", file, path);
}
}
found
}
fn make_win_dist(
rust_root: &Path,
plat_root: &Path,
target: TargetSelection,
builder: &Builder<'_>,
) {
//Ask gcc where it keeps its stuff
let mut cmd = Command::new(builder.cc(target));
cmd.arg("-print-search-dirs");
let gcc_out = output(&mut cmd);
let mut bin_path: Vec<_> = env::split_paths(&env::var_os("PATH").unwrap_or_default()).collect();
let mut lib_path = Vec::new();
for line in gcc_out.lines() {
let idx = line.find(':').unwrap();
let key = &line[..idx];
let trim_chars: &[_] = &[' ', '='];
let value = line[(idx + 1)..].trim_start_matches(trim_chars).split(';').map(PathBuf::from);
if key == "programs" {
bin_path.extend(value);
} else if key == "libraries" {
lib_path.extend(value);
}
}
let compiler = if target == "i686-pc-windows-gnu" {
"i686-w64-mingw32-gcc.exe"
} else if target == "x86_64-pc-windows-gnu" {
"x86_64-w64-mingw32-gcc.exe"
} else {
"gcc.exe"
};
let target_tools = [compiler, "ld.exe", "dlltool.exe", "libwinpthread-1.dll"];
let mut rustc_dlls = vec!["libwinpthread-1.dll"];
if target.starts_with("i686-") {
rustc_dlls.push("libgcc_s_dw2-1.dll");
} else {
rustc_dlls.push("libgcc_s_seh-1.dll");
}
let target_libs = [
//MinGW libs
"libgcc.a",
"libgcc_eh.a",
"libgcc_s.a",
"libm.a",
"libmingw32.a",
"libmingwex.a",
"libstdc++.a",
"libiconv.a",
"libmoldname.a",
"libpthread.a",
//Windows import libs
"libadvapi32.a",
"libbcrypt.a",
"libcomctl32.a",
"libcomdlg32.a",
"libcredui.a",
"libcrypt32.a",
"libdbghelp.a",
"libgdi32.a",
"libimagehlp.a",
"libiphlpapi.a",
"libkernel32.a",
"libmsimg32.a",
"libmsvcrt.a",
"libodbc32.a",
"libole32.a",
"liboleaut32.a",
"libopengl32.a",
"libpsapi.a",
"librpcrt4.a",
"libsecur32.a",
"libsetupapi.a",
"libshell32.a",
"libsynchronization.a",
"libuser32.a",
"libuserenv.a",
"libuuid.a",
"libwinhttp.a",
"libwinmm.a",
"libwinspool.a",
"libws2_32.a",
"libwsock32.a",
];
//Find mingw artifacts we want to bundle
let target_tools = find_files(&target_tools, &bin_path);
let rustc_dlls = find_files(&rustc_dlls, &bin_path);
let target_libs = find_files(&target_libs, &lib_path);
// Copy runtime dlls next to rustc.exe
let dist_bin_dir = rust_root.join("bin/");
fs::create_dir_all(&dist_bin_dir).expect("creating dist_bin_dir failed");
for src in rustc_dlls {
builder.copy_to_folder(&src, &dist_bin_dir);
}
//Copy platform tools to platform-specific bin directory
let target_bin_dir = plat_root
.join("lib")
.join("rustlib")
.join(target.triple)
.join("bin")
.join("self-contained");
fs::create_dir_all(&target_bin_dir).expect("creating target_bin_dir failed");
for src in target_tools {
builder.copy_to_folder(&src, &target_bin_dir);
}
// Warn windows-gnu users that the bundled GCC cannot compile C files
builder.create(
&target_bin_dir.join("GCC-WARNING.txt"),
"gcc.exe contained in this folder cannot be used for compiling C files - it is only\
used as a linker. In order to be able to compile projects containing C code use\
the GCC provided by MinGW or Cygwin.",
);
//Copy platform libs to platform-specific lib directory
let target_lib_dir = plat_root
.join("lib")
.join("rustlib")
.join(target.triple)
.join("lib")
.join("self-contained");
fs::create_dir_all(&target_lib_dir).expect("creating target_lib_dir failed");
for src in target_libs {
builder.copy_to_folder(&src, &target_lib_dir);
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Mingw {
pub host: TargetSelection,
}
impl Step for Mingw {
type Output = Option<PathBuf>;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Mingw { host: run.target });
}
/// Builds the `rust-mingw` installer component.
///
/// This contains all the bits and pieces to run the MinGW Windows targets
/// without any extra installed software (e.g., we bundle gcc, libraries, etc).
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let host = self.host;
if !host.contains("pc-windows-gnu") {
return None;
}
builder.info(&format!("Dist mingw ({})", host));
let _time = timeit(builder);
let name = pkgname(builder, "rust-mingw");
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
t!(fs::create_dir_all(&image));
// The first argument is a "temporary directory" which is just
// thrown away (this contains the runtime DLLs included in the rustc package
// above) and the second argument is where to place all the MinGW components
// (which is what we want).
make_win_dist(&tmpdir(builder), &image, host, &builder);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust-MinGW")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-MinGW-is-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rust-mingw")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.run(&mut cmd);
t!(fs::remove_dir_all(&image));
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustc {
pub compiler: Compiler,
}
impl Step for Rustc {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc")
}
fn make_run(run: RunConfig<'_>) {
run.builder
.ensure(Rustc { compiler: run.builder.compiler(run.builder.top_stage, run.target) });
}
/// Creates the `rustc` installer component.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let host = self.compiler.host;
let name = pkgname(builder, "rustc");
let image = tmpdir(builder).join(format!("{}-{}-image", name, host.triple));
let _ = fs::remove_dir_all(&image);
let overlay = tmpdir(builder).join(format!("{}-{}-overlay", name, host.triple));
let _ = fs::remove_dir_all(&overlay);
// Prepare the rustc "image", what will actually end up getting installed
prepare_image(builder, compiler, &image);
// Prepare the overlay which is part of the tarball but won't actually be
// installed
let cp = |file: &str| {
builder.install(&builder.src.join(file), &overlay, 0o644);
};
cp("COPYRIGHT");
cp("LICENSE-APACHE");
cp("LICENSE-MIT");
cp("README.md");
// tiny morsel of metadata is used by rust-packaging
let version = builder.rust_version();
builder.create(&overlay.join("version"), &version);
if let Some(sha) = builder.rust_sha() {
builder.create(&overlay.join("git-commit-hash"), &sha);
}
// On MinGW we've got a few runtime DLL dependencies that we need to
// include. The first argument to this script is where to put these DLLs
// (the image we're creating), and the second argument is a junk directory
// to ignore all other MinGW stuff the script creates.
//
// On 32-bit MinGW we're always including a DLL which needs some extra
// licenses to distribute. On 64-bit MinGW we don't actually distribute
// anything requiring us to distribute a license, but it's likely the
// install will *also* include the rust-mingw package, which also needs
// licenses, so to be safe we just include it here in all MinGW packages.
if host.contains("pc-windows-gnu") {
make_win_dist(&image, &tmpdir(builder), host, builder);
let dst = image.join("share/doc");
t!(fs::create_dir_all(&dst));
builder.cp_r(&builder.src.join("src/etc/third-party"), &dst);
}
// Finally, wrap everything up in a nice tarball!
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, host.triple))
.arg("--component-name=rustc")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!("Dist rustc stage{} ({})", compiler.stage, host.triple));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
builder.remove_dir(&overlay);
return distdir(builder).join(format!("{}-{}.tar.gz", name, host.triple));
fn prepare_image(builder: &Builder<'_>, compiler: Compiler, image: &Path) {
let host = compiler.host;
let src = builder.sysroot(compiler);
// Copy rustc/rustdoc binaries
t!(fs::create_dir_all(image.join("bin")));
builder.cp_r(&src.join("bin"), &image.join("bin"));
builder.install(&builder.rustdoc(compiler), &image.join("bin"), 0o755);
let libdir_relative = builder.libdir_relative(compiler);
// Copy runtime DLLs needed by the compiler
if libdir_relative.to_str() != Some("bin") {
let libdir = builder.rustc_libdir(compiler);
for entry in builder.read_dir(&libdir) {
let name = entry.file_name();
if let Some(s) = name.to_str() {
if is_dylib(s) {
// Don't use custom libdir here because ^lib/ will be resolved again
// with installer
builder.install(&entry.path(), &image.join("lib"), 0o644);
}
}
}
}
// Copy libLLVM.so to the lib dir as well, if needed. While not
// technically needed by rustc itself it's needed by lots of other
// components like the llvm tools and LLD. LLD is included below and
// tools/LLDB come later, so let's just throw it in the rustc
// component for now.
maybe_install_llvm_runtime(builder, host, image);
// Copy over lld if it's there
if builder.config.lld_enabled {
let exe = exe("rust-lld", compiler.host);
let src =
builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin").join(&exe);
// for the rationale about this rename check `compile::copy_lld_to_sysroot`
let dst = image.join("lib/rustlib").join(&*host.triple).join("bin").join(&exe);
t!(fs::create_dir_all(&dst.parent().unwrap()));
builder.copy(&src, &dst);
}
// Man pages
t!(fs::create_dir_all(image.join("share/man/man1")));
let man_src = builder.src.join("src/doc/man");
let man_dst = image.join("share/man/man1");
// Reproducible builds: If SOURCE_DATE_EPOCH is set, use that as the time.
let time = env::var("SOURCE_DATE_EPOCH")
.map(|timestamp| {
let epoch = timestamp
.parse()
.map_err(|err| format!("could not parse SOURCE_DATE_EPOCH: {}", err))
.unwrap();
time::at(Timespec::new(epoch, 0))
})
.unwrap_or_else(|_| time::now());
let month_year = t!(time::strftime("%B %Y", &time));
// don't use our `bootstrap::util::{copy, cp_r}`, because those try
// to hardlink, and we don't want to edit the source templates
for file_entry in builder.read_dir(&man_src) {
let page_src = file_entry.path();
let page_dst = man_dst.join(file_entry.file_name());
t!(fs::copy(&page_src, &page_dst));
// template in month/year and version number
builder.replace_in_file(
&page_dst,
&[
("<INSERT DATE HERE>", &month_year),
("<INSERT VERSION HERE>", channel::CFG_RELEASE_NUM),
],
);
}
// Debugger scripts
builder
.ensure(DebuggerScripts { sysroot: INTERNER.intern_path(image.to_owned()), host });
// Misc license info
let cp = |file: &str| {
builder.install(&builder.src.join(file), &image.join("share/doc/rust"), 0o644);
};
cp("COPYRIGHT");
cp("LICENSE-APACHE");
cp("LICENSE-MIT");
cp("README.md");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct DebuggerScripts {
pub sysroot: Interned<PathBuf>,
pub host: TargetSelection,
}
impl Step for DebuggerScripts {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/lldb_batchmode.py")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(DebuggerScripts {
sysroot: run.builder.sysroot(run.builder.compiler(run.builder.top_stage, run.host)),
host: run.target,
});
}
/// Copies debugger scripts for `target` into the `sysroot` specified.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
let sysroot = self.sysroot;
let dst = sysroot.join("lib/rustlib/etc");
t!(fs::create_dir_all(&dst));
let cp_debugger_script = |file: &str| {
builder.install(&builder.src.join("src/etc/").join(file), &dst, 0o644);
};
if host.contains("windows-msvc") {
// windbg debugger scripts
builder.install(
&builder.src.join("src/etc/rust-windbg.cmd"),
&sysroot.join("bin"),
0o755,
);
cp_debugger_script("natvis/intrinsic.natvis");
cp_debugger_script("natvis/liballoc.natvis");
cp_debugger_script("natvis/libcore.natvis");
cp_debugger_script("natvis/libstd.natvis");
} else {
cp_debugger_script("rust_types.py");
// gdb debugger scripts
builder.install(&builder.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), 0o755);
builder.install(&builder.src.join("src/etc/rust-gdbgui"), &sysroot.join("bin"), 0o755);
cp_debugger_script("gdb_load_rust_pretty_printers.py");
cp_debugger_script("gdb_lookup.py");
cp_debugger_script("gdb_providers.py");
// lldb debugger scripts
builder.install(&builder.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), 0o755);
cp_debugger_script("lldb_lookup.py");
cp_debugger_script("lldb_providers.py");
}
}
}
fn skip_host_target_lib(builder: &Builder<'_>, compiler: Compiler) -> bool {
// The only true set of target libraries came from the build triple, so
// let's reduce redundant work by only producing archives from that host.
if compiler.host != builder.config.build {
builder.info("\tskipping, not a build host");
true
} else {
false
}
}
/// Copy stamped files into an image's `target/lib` directory.
fn copy_target_libs(builder: &Builder<'_>, target: TargetSelection, image: &Path, stamp: &Path) {
let dst = image.join("lib/rustlib").join(target.triple).join("lib");
let self_contained_dst = dst.join("self-contained");
t!(fs::create_dir_all(&dst));
t!(fs::create_dir_all(&self_contained_dst));
for (path, dependency_type) in builder.read_stamp_file(stamp) {
if dependency_type == DependencyType::TargetSelfContained {
builder.copy(&path, &self_contained_dst.join(path.file_name().unwrap()));
} else if dependency_type == DependencyType::Target || builder.config.build == target {
builder.copy(&path, &dst.join(path.file_name().unwrap()));
}
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Std {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Std {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/libstd")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Std {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let name = pkgname(builder, "rust-std");
let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
if skip_host_target_lib(builder, compiler) {
return archive;
}
builder.ensure(compile::Std { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let _ = fs::remove_dir_all(&image);
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
let stamp = compile::libstd_stamp(builder, compiler_to_use, target);
copy_target_libs(builder, target, &image, &stamp);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=std-is-standing-at-the-ready.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rust-std-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder
.info(&format!("Dist std stage{} ({} -> {})", compiler.stage, &compiler.host, target));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
archive
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustcDev {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustcDev {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rustc-dev")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcDev {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let name = pkgname(builder, "rustc-dev");
let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
if skip_host_target_lib(builder, compiler) {
return archive;
}
builder.ensure(compile::Rustc { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let _ = fs::remove_dir_all(&image);
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
let stamp = compile::librustc_stamp(builder, compiler_to_use, target);
copy_target_libs(builder, target, &image, &stamp);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-develop.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rustc-dev-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!(
"Dist rustc-dev stage{} ({} -> {})",
compiler.stage, &compiler.host, target
));
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
archive
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Analysis {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Analysis {
type Output = PathBuf;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("analysis").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Analysis {
// Find the actual compiler (handling the full bootstrap option) which
// produced the save-analysis data because that data isn't copied
// through the sysroot uplifting.
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
/// Creates a tarball of save-analysis metadata, if available.
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let name = pkgname(builder, "rust-analysis");
if compiler.host != builder.config.build {
return distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple));
}
builder.ensure(compile::Std { compiler, target });
let image = tmpdir(builder).join(format!("{}-{}-image", name, target.triple));
let src = builder
.stage_out(compiler, Mode::Std)
.join(target.triple)
.join(builder.cargo_dir())
.join("deps");
let image_src = src.join("save-analysis");
let dst = image.join("lib/rustlib").join(target.triple).join("analysis");
t!(fs::create_dir_all(&dst));
builder.info(&format!("image_src: {:?}, dst: {:?}", image_src, dst));
builder.cp_r(&image_src, &dst);
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=save-analysis-saved.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", name, target.triple))
.arg(format!("--component-name=rust-analysis-{}", target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info("Dist analysis");
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
fn copy_src_dirs(builder: &Builder<'_>, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) {
fn filter_fn(exclude_dirs: &[&str], dir: &str, path: &Path) -> bool {
let spath = match path.to_str() {
Some(path) => path,
None => return false,
};
if spath.ends_with('~') || spath.ends_with(".pyc") {
return false;
}
const LLVM_PROJECTS: &[&str] = &[
"llvm-project/clang",
"llvm-project\\clang",
"llvm-project/libunwind",
"llvm-project\\libunwind",
"llvm-project/lld",
"llvm-project\\lld",
"llvm-project/lldb",
"llvm-project\\lldb",
"llvm-project/llvm",
"llvm-project\\llvm",
"llvm-project/compiler-rt",
"llvm-project\\compiler-rt",
];
if spath.contains("llvm-project")
&& !spath.ends_with("llvm-project")
&& !LLVM_PROJECTS.iter().any(|path| spath.contains(path))
{
return false;
}
const LLVM_TEST: &[&str] = &["llvm-project/llvm/test", "llvm-project\\llvm\\test"];
if LLVM_TEST.iter().any(|path| spath.contains(path))
&& (spath.ends_with(".ll") || spath.ends_with(".td") || spath.ends_with(".s"))
{
return false;
}
let full_path = Path::new(dir).join(path);
if exclude_dirs.iter().any(|excl| full_path == Path::new(excl)) {
return false;
}
let excludes = [
"CVS",
"RCS",
"SCCS",
".git",
".gitignore",
".gitmodules",
".gitattributes",
".cvsignore",
".svn",
".arch-ids",
"{arch}",
"=RELEASE-ID",
"=meta-update",
"=update",
".bzr",
".bzrignore",
".bzrtags",
".hg",
".hgignore",
".hgrags",
"_darcs",
];
!path.iter().map(|s| s.to_str().unwrap()).any(|s| excludes.contains(&s))
}
// Copy the directories using our filter
for item in src_dirs {
let dst = &dst_dir.join(item);
t!(fs::create_dir_all(dst));
builder
.cp_filtered(&builder.src.join(item), dst, &|path| filter_fn(exclude_dirs, item, path));
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Src;
impl Step for Src {
/// The output path of the src installer tarball
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Src);
}
/// Creates the `rust-src` installer component
fn run(self, builder: &Builder<'_>) -> PathBuf {
let name = pkgname(builder, "rust-src");
let image = tmpdir(builder).join(format!("{}-image", name));
let _ = fs::remove_dir_all(&image);
let dst = image.join("lib/rustlib/src");
let dst_src = dst.join("rust");
t!(fs::create_dir_all(&dst_src));
let src_files = ["Cargo.lock"];
// This is the reduced set of paths which will become the rust-src component
// (essentially libstd and all of its path dependencies)
let std_src_dirs = [
"src/build_helper",
"src/liballoc",
"src/libcore",
"src/libpanic_abort",
"src/libpanic_unwind",
"src/libstd",
"src/libunwind",
"src/libtest",
"src/libterm",
"src/libprofiler_builtins",
"src/stdarch",
"src/libproc_macro",
"src/tools/rustc-std-workspace-core",
"src/tools/rustc-std-workspace-alloc",
"src/tools/rustc-std-workspace-std",
];
copy_src_dirs(builder, &std_src_dirs[..], &[], &dst_src);
for file in src_files.iter() {
builder.copy(&builder.src.join(file), &dst_src.join(file));
}
// Create source tarball in rust-installer format
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Awesome-Source.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}", name))
.arg("--component-name=rust-src")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info("Dist src");
let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
distdir(builder).join(&format!("{}.tar.gz", name))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct PlainSourceTarball;
impl Step for PlainSourceTarball {
/// Produces the location of the tarball generated
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src").default_condition(builder.config.rust_dist_src)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(PlainSourceTarball);
}
/// Creates the plain source tarball
fn run(self, builder: &Builder<'_>) -> PathBuf {
// Make sure that the root folder of tarball has the correct name
let plain_name = format!("{}-src", pkgname(builder, "rustc"));
let plain_dst_src = tmpdir(builder).join(&plain_name);
let _ = fs::remove_dir_all(&plain_dst_src);
t!(fs::create_dir_all(&plain_dst_src));
// This is the set of root paths which will become part of the source package
let src_files = [
"COPYRIGHT",
"LICENSE-APACHE",
"LICENSE-MIT",
"CONTRIBUTING.md",
"README.md",
"RELEASES.md",
"configure",
"x.py",
"config.toml.example",
"Cargo.toml",
"Cargo.lock",
];
let src_dirs = ["src"];
copy_src_dirs(builder, &src_dirs[..], &[], &plain_dst_src);
// Copy the files normally
for item in &src_files {
builder.copy(&builder.src.join(item), &plain_dst_src.join(item));
}
// Create the version file
builder.create(&plain_dst_src.join("version"), &builder.rust_version());
if let Some(sha) = builder.rust_sha() {
builder.create(&plain_dst_src.join("git-commit-hash"), &sha);
}
// If we're building from git sources, we need to vendor a complete distribution.
if builder.rust_info.is_git() {
// Vendor all Cargo dependencies
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("vendor")
.arg("--sync")
.arg(builder.src.join("./src/tools/rust-analyzer/Cargo.toml"))
.current_dir(&plain_dst_src);
builder.run(&mut cmd);
}
// Create plain source tarball
let plain_name = format!("rustc-{}-src", builder.rust_package_vers());
let mut tarball = distdir(builder).join(&format!("{}.tar.gz", plain_name));
tarball.set_extension(""); // strip .gz
tarball.set_extension(""); // strip .tar
if let Some(dir) = tarball.parent() {
builder.create_dir(&dir);
}
builder.info("running installer");
let mut cmd = rust_installer(builder);
cmd.arg("tarball")
.arg("--input")
.arg(&plain_name)
.arg("--output")
.arg(&tarball)
.arg("--work-dir=.")
.current_dir(tmpdir(builder));
builder.info("Create plain source tarball");
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(&format!("{}.tar.gz", plain_name))
}
}
// We have to run a few shell scripts, which choke quite a bit on both `\`
// characters and on `C:\` paths, so normalize both of them away.
pub fn sanitize_sh(path: &Path) -> String {
let path = path.to_str().unwrap().replace("\\", "/");
return change_drive(&path).unwrap_or(path);
fn change_drive(s: &str) -> Option<String> {
let mut ch = s.chars();
let drive = ch.next().unwrap_or('C');
if ch.next() != Some(':') {
return None;
}
if ch.next() != Some('/') {
return None;
}
Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..]))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Cargo {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Cargo {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("cargo")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
let src = builder.src.join("src/tools/cargo");
let etc = src.join("src/etc");
let release_num = builder.release_num("cargo");
let name = pkgname(builder, "cargo");
let version = builder.cargo_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("cargo-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
builder.create_dir(&image.join("share/zsh/site-functions"));
builder.create_dir(&image.join("etc/bash_completion.d"));
let cargo = builder.ensure(tool::Cargo { compiler, target });
builder.install(&cargo, &image.join("bin"), 0o755);
for man in t!(etc.join("man").read_dir()) {
let man = t!(man);
builder.install(&man.path(), &image.join("share/man/man1"), 0o644);
}
builder.install(&etc.join("_cargo"), &image.join("share/zsh/site-functions"), 0o644);
builder.copy(&etc.join("cargo.bashcomp.sh"), &image.join("etc/bash_completion.d/cargo"));
let doc = image.join("share/doc/cargo");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-THIRD-PARTY"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("cargo-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.install(&src.join("LICENSE-THIRD-PARTY"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--component-name=cargo")
.arg("--legacy-manifest-dirs=rustlib,cargo");
builder.info(&format!("Dist cargo stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rls {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Rls {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rls")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rls {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/rls");
let release_num = builder.release_num("rls");
let name = pkgname(builder, "rls");
let version = builder.rls_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rls-image");
drop(fs::remove_dir_all(&image));
t!(fs::create_dir_all(&image));
// Prepare the image directory
// We expect RLS to build, because we've exited this step above if tool
// state for RLS isn't testing.
let rls = builder
.ensure(tool::Rls { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("RLS", builder.build.config.missing_tools);
None
})?;
builder.install(&rls, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rls");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rls-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=RLS-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rls-preview");
builder.info(&format!("Dist RLS stage{} ({})", compiler.stage, target.triple));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustAnalyzer {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RustAnalyzer {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rust-analyzer")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustAnalyzer {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/rust-analyzer");
let release_num = builder.release_num("rust-analyzer/crates/rust-analyzer");
let name = pkgname(builder, "rust-analyzer");
let version = builder.rust_analyzer_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rust-analyzer-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect rust-analyer to always build, as it doesn't depend on rustc internals
// and doesn't have associated toolstate.
let rust_analyzer = builder
.ensure(tool::RustAnalyzer { compiler, target, extra_features: Vec::new() })
.expect("rust-analyzer always builds");
builder.install(&rust_analyzer, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rust-analyzer");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rust-analyzer-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rust-analyzer-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rust-analyzer-preview");
builder.info(&format!("Dist rust-analyzer stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Clippy {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Clippy {
type Output = PathBuf;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("clippy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Clippy {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/clippy");
let release_num = builder.release_num("clippy");
let name = pkgname(builder, "clippy");
let version = builder.clippy_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("clippy-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect clippy to build, because we've exited this step above if tool
// state for clippy isn't testing.
let clippy = builder
.ensure(tool::Clippy { compiler, target, extra_features: Vec::new() })
.expect("clippy expected to build - essential tool");
let cargoclippy = builder
.ensure(tool::CargoClippy { compiler, target, extra_features: Vec::new() })
.expect("clippy expected to build - essential tool");
builder.install(&clippy, &image.join("bin"), 0o755);
builder.install(&cargoclippy, &image.join("bin"), 0o755);
let doc = image.join("share/doc/clippy");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("clippy-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=clippy-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=clippy-preview");
builder.info(&format!("Dist clippy stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Miri {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Miri {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("miri")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Miri {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
assert!(builder.config.extended);
let src = builder.src.join("src/tools/miri");
let release_num = builder.release_num("miri");
let name = pkgname(builder, "miri");
let version = builder.miri_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("miri-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
// We expect miri to build, because we've exited this step above if tool
// state for miri isn't testing.
let miri = builder
.ensure(tool::Miri { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("miri", builder.build.config.missing_tools);
None
})?;
let cargomiri = builder
.ensure(tool::CargoMiri { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("cargo miri", builder.build.config.missing_tools);
None
})?;
builder.install(&miri, &image.join("bin"), 0o755);
builder.install(&cargomiri, &image.join("bin"), 0o755);
let doc = image.join("share/doc/miri");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("miri-overlay");
drop(fs::remove_dir_all(&overlay));
t!(fs::create_dir_all(&overlay));
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=miri-ready-to-serve.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=miri-preview");
builder.info(&format!("Dist miri stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustfmt {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Rustfmt {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("rustfmt")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustfmt {
compiler: run.builder.compiler_for(
run.builder.top_stage,
run.builder.config.build,
run.target,
),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
let src = builder.src.join("src/tools/rustfmt");
let release_num = builder.release_num("rustfmt");
let name = pkgname(builder, "rustfmt");
let version = builder.rustfmt_info.version(builder, &release_num);
let tmp = tmpdir(builder);
let image = tmp.join("rustfmt-image");
drop(fs::remove_dir_all(&image));
builder.create_dir(&image);
// Prepare the image directory
let rustfmt = builder
.ensure(tool::Rustfmt { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("Rustfmt", builder.build.config.missing_tools);
None
})?;
let cargofmt = builder
.ensure(tool::Cargofmt { compiler, target, extra_features: Vec::new() })
.or_else(|| {
missing_tool("Cargofmt", builder.build.config.missing_tools);
None
})?;
builder.install(&rustfmt, &image.join("bin"), 0o755);
builder.install(&cargofmt, &image.join("bin"), 0o755);
let doc = image.join("share/doc/rustfmt");
builder.install(&src.join("README.md"), &doc, 0o644);
builder.install(&src.join("LICENSE-MIT"), &doc, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644);
// Prepare the overlay
let overlay = tmp.join("rustfmt-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.md"), &overlay, 0o644);
builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644);
builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.create(&overlay.join("version"), &version);
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=rustfmt-ready-to-fmt.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=rustfmt-preview");
builder.info(&format!("Dist Rustfmt stage{} ({})", compiler.stage, target));
let _time = timeit(builder);
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Extended {
stage: u32,
host: TargetSelection,
target: TargetSelection,
}
impl Step for Extended {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("extended").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Extended {
stage: run.builder.top_stage,
host: run.builder.config.build,
target: run.target,
});
}
/// Creates a combined installer for the specified target in the provided stage.
fn run(self, builder: &Builder<'_>) {
let target = self.target;
let stage = self.stage;
let compiler = builder.compiler_for(self.stage, self.host, self.target);
builder.info(&format!("Dist extended stage{} ({})", compiler.stage, target));
let rustc_installer = builder.ensure(Rustc { compiler: builder.compiler(stage, target) });
let cargo_installer = builder.ensure(Cargo { compiler, target });
let rustfmt_installer = builder.ensure(Rustfmt { compiler, target });
let rls_installer = builder.ensure(Rls { compiler, target });
let rust_analyzer_installer = builder.ensure(RustAnalyzer { compiler, target });
let llvm_tools_installer = builder.ensure(LlvmTools { target });
let clippy_installer = builder.ensure(Clippy { compiler, target });
let miri_installer = builder.ensure(Miri { compiler, target });
let mingw_installer = builder.ensure(Mingw { host: target });
let analysis_installer = builder.ensure(Analysis { compiler, target });
let docs_installer = builder.ensure(Docs { host: target });
let std_installer =
builder.ensure(Std { compiler: builder.compiler(stage, target), target });
let tmp = tmpdir(builder);
let overlay = tmp.join("extended-overlay");
let etc = builder.src.join("src/etc/installer");
let work = tmp.join("work");
let _ = fs::remove_dir_all(&overlay);
builder.install(&builder.src.join("COPYRIGHT"), &overlay, 0o644);
builder.install(&builder.src.join("LICENSE-APACHE"), &overlay, 0o644);
builder.install(&builder.src.join("LICENSE-MIT"), &overlay, 0o644);
let version = builder.rust_version();
builder.create(&overlay.join("version"), &version);
if let Some(sha) = builder.rust_sha() {
builder.create(&overlay.join("git-commit-hash"), &sha);
}
builder.install(&etc.join("README.md"), &overlay, 0o644);
// When rust-std package split from rustc, we needed to ensure that during
// upgrades rustc was upgraded before rust-std. To avoid rustc clobbering
// the std files during uninstall. To do this ensure that rustc comes
// before rust-std in the list below.
let mut tarballs = Vec::new();
tarballs.push(rustc_installer);
tarballs.push(cargo_installer);
tarballs.extend(rls_installer.clone());
tarballs.push(rust_analyzer_installer.clone());
tarballs.push(clippy_installer);
tarballs.extend(miri_installer.clone());
tarballs.extend(rustfmt_installer.clone());
tarballs.extend(llvm_tools_installer);
tarballs.push(analysis_installer);
tarballs.push(std_installer);
if builder.config.docs {
tarballs.push(docs_installer);
}
if target.contains("pc-windows-gnu") {
tarballs.push(mingw_installer.unwrap());
}
let mut input_tarballs = tarballs[0].as_os_str().to_owned();
for tarball in &tarballs[1..] {
input_tarballs.push(",");
input_tarballs.push(tarball);
}
builder.info("building combined installer");
let mut cmd = rust_installer(builder);
cmd.arg("combine")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=Rust-is-ready-to-roll.")
.arg("--work-dir")
.arg(&work)
.arg("--output-dir")
.arg(&distdir(builder))
.arg(format!("--package-name={}-{}", pkgname(builder, "rust"), target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--input-tarballs")
.arg(input_tarballs)
.arg("--non-installed-overlay")
.arg(&overlay);
let time = timeit(&builder);
builder.run(&mut cmd);
drop(time);
let mut license = String::new();
license += &builder.read(&builder.src.join("COPYRIGHT"));
license += &builder.read(&builder.src.join("LICENSE-APACHE"));
license += &builder.read(&builder.src.join("LICENSE-MIT"));
license.push_str("\n");
license.push_str("\n");
let rtf = r"{\rtf1\ansi\deff0{\fonttbl{\f0\fnil\fcharset0 Arial;}}\nowwrap\fs18";
let mut rtf = rtf.to_string();
rtf.push_str("\n");
for line in license.lines() {
rtf.push_str(line);
rtf.push_str("\\line ");
}
rtf.push_str("}");
fn filter(contents: &str, marker: &str) -> String {
let start = format!("tool-{}-start", marker);
let end = format!("tool-{}-end", marker);
let mut lines = Vec::new();
let mut omitted = false;
for line in contents.lines() {
if line.contains(&start) {
omitted = true;
} else if line.contains(&end) {
omitted = false;
} else if !omitted {
lines.push(line);
}
}
lines.join("\n")
}
let xform = |p: &Path| {
let mut contents = t!(fs::read_to_string(p));
if rls_installer.is_none() {
contents = filter(&contents, "rls");
}
contents = filter(&contents, "rust-analyzer");
if miri_installer.is_none() {
contents = filter(&contents, "miri");
}
if rustfmt_installer.is_none() {
contents = filter(&contents, "rustfmt");
}
let ret = tmp.join(p.file_name().unwrap());
t!(fs::write(&ret, &contents));
ret
};
if target.contains("apple-darwin") {
builder.info("building pkg installer");
let pkg = tmp.join("pkg");
let _ = fs::remove_dir_all(&pkg);
let pkgbuild = |component: &str| {
let mut cmd = Command::new("pkgbuild");
cmd.arg("--identifier")
.arg(format!("org.rust-lang.{}", component))
.arg("--scripts")
.arg(pkg.join(component))
.arg("--nopayload")
.arg(pkg.join(component).with_extension("pkg"));
builder.run(&mut cmd);
};
let prepare = |name: &str| {
builder.create_dir(&pkg.join(name));
builder.cp_r(
&work.join(&format!("{}-{}", pkgname(builder, name), target.triple)),
&pkg.join(name),
);
builder.install(&etc.join("pkg/postinstall"), &pkg.join(name), 0o755);
pkgbuild(name);
};
prepare("rustc");
prepare("cargo");
prepare("rust-docs");
prepare("rust-std");
prepare("rust-analysis");
prepare("clippy");
if rls_installer.is_some() {
prepare("rls");
}
prepare("rust-analyzer");
if miri_installer.is_some() {
prepare("miri");
}
// create an 'uninstall' package
builder.install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755);
pkgbuild("uninstall");
builder.create_dir(&pkg.join("res"));
builder.create(&pkg.join("res/LICENSE.txt"), &license);
builder.install(&etc.join("gfx/rust-logo.png"), &pkg.join("res"), 0o644);
let mut cmd = Command::new("productbuild");
cmd.arg("--distribution")
.arg(xform(&etc.join("pkg/Distribution.xml")))
.arg("--resources")
.arg(pkg.join("res"))
.arg(distdir(builder).join(format!(
"{}-{}.pkg",
pkgname(builder, "rust"),
target.triple
)))
.arg("--package-path")
.arg(&pkg);
let _time = timeit(builder);
builder.run(&mut cmd);
}
if target.contains("windows") {
let exe = tmp.join("exe");
let _ = fs::remove_dir_all(&exe);
let prepare = |name: &str| {
builder.create_dir(&exe.join(name));
let dir = if name == "rust-std" || name == "rust-analysis" {
format!("{}-{}", name, target.triple)
} else if name == "rls" {
"rls-preview".to_string()
} else if name == "rust-analyzer" {
"rust-analyzer-preview".to_string()
} else if name == "clippy" {
"clippy-preview".to_string()
} else if name == "miri" {
"miri-preview".to_string()
} else {
name.to_string()
};
builder.cp_r(
&work.join(&format!("{}-{}", pkgname(builder, name), target.triple)).join(dir),
&exe.join(name),
);
builder.remove(&exe.join(name).join("manifest.in"));
};
prepare("rustc");
prepare("cargo");
prepare("rust-analysis");
prepare("rust-docs");
prepare("rust-std");
prepare("clippy");
if rls_installer.is_some() {
prepare("rls");
}
prepare("rust-analyzer");
if miri_installer.is_some() {
prepare("miri");
}
if target.contains("windows-gnu") {
prepare("rust-mingw");
}
builder.install(&etc.join("gfx/rust-logo.ico"), &exe, 0o644);
// Generate msi installer
let wix = PathBuf::from(env::var_os("WIX").unwrap());
let heat = wix.join("bin/heat.exe");
let candle = wix.join("bin/candle.exe");
let light = wix.join("bin/light.exe");
let heat_flags = ["-nologo", "-gg", "-sfrag", "-srd", "-sreg"];
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rustc")
.args(&heat_flags)
.arg("-cg")
.arg("RustcGroup")
.arg("-dr")
.arg("Rustc")
.arg("-var")
.arg("var.RustcDir")
.arg("-out")
.arg(exe.join("RustcGroup.wxs")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-docs")
.args(&heat_flags)
.arg("-cg")
.arg("DocsGroup")
.arg("-dr")
.arg("Docs")
.arg("-var")
.arg("var.DocsDir")
.arg("-out")
.arg(exe.join("DocsGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/squash-components.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("cargo")
.args(&heat_flags)
.arg("-cg")
.arg("CargoGroup")
.arg("-dr")
.arg("Cargo")
.arg("-var")
.arg("var.CargoDir")
.arg("-out")
.arg(exe.join("CargoGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-std")
.args(&heat_flags)
.arg("-cg")
.arg("StdGroup")
.arg("-dr")
.arg("Std")
.arg("-var")
.arg("var.StdDir")
.arg("-out")
.arg(exe.join("StdGroup.wxs")),
);
if rls_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rls")
.args(&heat_flags)
.arg("-cg")
.arg("RlsGroup")
.arg("-dr")
.arg("Rls")
.arg("-var")
.arg("var.RlsDir")
.arg("-out")
.arg(exe.join("RlsGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-analyzer")
.args(&heat_flags)
.arg("-cg")
.arg("RustAnalyzerGroup")
.arg("-dr")
.arg("RustAnalyzer")
.arg("-var")
.arg("var.RustAnalyzerDir")
.arg("-out")
.arg(exe.join("RustAnalyzerGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("clippy")
.args(&heat_flags)
.arg("-cg")
.arg("ClippyGroup")
.arg("-dr")
.arg("Clippy")
.arg("-var")
.arg("var.ClippyDir")
.arg("-out")
.arg(exe.join("ClippyGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
if miri_installer.is_some() {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("miri")
.args(&heat_flags)
.arg("-cg")
.arg("MiriGroup")
.arg("-dr")
.arg("Miri")
.arg("-var")
.arg("var.MiriDir")
.arg("-out")
.arg(exe.join("MiriGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
}
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-analysis")
.args(&heat_flags)
.arg("-cg")
.arg("AnalysisGroup")
.arg("-dr")
.arg("Analysis")
.arg("-var")
.arg("var.AnalysisDir")
.arg("-out")
.arg(exe.join("AnalysisGroup.wxs"))
.arg("-t")
.arg(etc.join("msi/remove-duplicates.xsl")),
);
if target.contains("windows-gnu") {
builder.run(
Command::new(&heat)
.current_dir(&exe)
.arg("dir")
.arg("rust-mingw")
.args(&heat_flags)
.arg("-cg")
.arg("GccGroup")
.arg("-dr")
.arg("Gcc")
.arg("-var")
.arg("var.GccDir")
.arg("-out")
.arg(exe.join("GccGroup.wxs")),
);
}
let candle = |input: &Path| {
let output = exe.join(input.file_stem().unwrap()).with_extension("wixobj");
let arch = if target.contains("x86_64") { "x64" } else { "x86" };
let mut cmd = Command::new(&candle);
cmd.current_dir(&exe)
.arg("-nologo")
.arg("-dRustcDir=rustc")
.arg("-dDocsDir=rust-docs")
.arg("-dCargoDir=cargo")
.arg("-dStdDir=rust-std")
.arg("-dAnalysisDir=rust-analysis")
.arg("-dClippyDir=clippy")
.arg("-arch")
.arg(&arch)
.arg("-out")
.arg(&output)
.arg(&input);
add_env(builder, &mut cmd, target);
if rls_installer.is_some() {
cmd.arg("-dRlsDir=rls");
}
cmd.arg("-dRustAnalyzerDir=rust-analyzer");
if miri_installer.is_some() {
cmd.arg("-dMiriDir=miri");
}
if target.contains("windows-gnu") {
cmd.arg("-dGccDir=rust-mingw");
}
builder.run(&mut cmd);
};
candle(&xform(&etc.join("msi/rust.wxs")));
candle(&etc.join("msi/ui.wxs"));
candle(&etc.join("msi/rustwelcomedlg.wxs"));
candle("RustcGroup.wxs".as_ref());
candle("DocsGroup.wxs".as_ref());
candle("CargoGroup.wxs".as_ref());
candle("StdGroup.wxs".as_ref());
candle("ClippyGroup.wxs".as_ref());
if rls_installer.is_some() {
candle("RlsGroup.wxs".as_ref());
}
candle("RustAnalyzerGroup.wxs".as_ref());
if miri_installer.is_some() {
candle("MiriGroup.wxs".as_ref());
}
candle("AnalysisGroup.wxs".as_ref());
if target.contains("windows-gnu") {
candle("GccGroup.wxs".as_ref());
}
builder.create(&exe.join("LICENSE.rtf"), &rtf);
builder.install(&etc.join("gfx/banner.bmp"), &exe, 0o644);
builder.install(&etc.join("gfx/dialogbg.bmp"), &exe, 0o644);
builder.info(&format!("building `msi` installer with {:?}", light));
let filename = format!("{}-{}.msi", pkgname(builder, "rust"), target.triple);
let mut cmd = Command::new(&light);
cmd.arg("-nologo")
.arg("-ext")
.arg("WixUIExtension")
.arg("-ext")
.arg("WixUtilExtension")
.arg("-out")
.arg(exe.join(&filename))
.arg("rust.wixobj")
.arg("ui.wixobj")
.arg("rustwelcomedlg.wixobj")
.arg("RustcGroup.wixobj")
.arg("DocsGroup.wixobj")
.arg("CargoGroup.wixobj")
.arg("StdGroup.wixobj")
.arg("AnalysisGroup.wixobj")
.arg("ClippyGroup.wixobj")
.current_dir(&exe);
if rls_installer.is_some() {
cmd.arg("RlsGroup.wixobj");
}
cmd.arg("RustAnalyzerGroup.wixobj");
if miri_installer.is_some() {
cmd.arg("MiriGroup.wixobj");
}
if target.contains("windows-gnu") {
cmd.arg("GccGroup.wixobj");
}
// ICE57 wrongly complains about the shortcuts
cmd.arg("-sice:ICE57");
let _time = timeit(builder);
builder.run(&mut cmd);
if !builder.config.dry_run {
t!(fs::rename(exe.join(&filename), distdir(builder).join(&filename)));
}
}
}
}
fn add_env(builder: &Builder<'_>, cmd: &mut Command, target: TargetSelection) {
let mut parts = channel::CFG_RELEASE_NUM.split('.');
cmd.env("CFG_RELEASE_INFO", builder.rust_version())
.env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM)
.env("CFG_RELEASE", builder.rust_release())
.env("CFG_VER_MAJOR", parts.next().unwrap())
.env("CFG_VER_MINOR", parts.next().unwrap())
.env("CFG_VER_PATCH", parts.next().unwrap())
.env("CFG_VER_BUILD", "0") // just needed to build
.env("CFG_PACKAGE_VERS", builder.rust_package_vers())
.env("CFG_PACKAGE_NAME", pkgname(builder, "rust"))
.env("CFG_BUILD", target.triple)
.env("CFG_CHANNEL", &builder.config.channel);
if target.contains("windows-gnu") {
cmd.env("CFG_MINGW", "1").env("CFG_ABI", "GNU");
} else {
cmd.env("CFG_MINGW", "0").env("CFG_ABI", "MSVC");
}
if target.contains("x86_64") {
cmd.env("CFG_PLATFORM", "x64");
} else {
cmd.env("CFG_PLATFORM", "x86");
}
}
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct HashSign;
impl Step for HashSign {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("hash-and-sign")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(HashSign);
}
fn run(self, builder: &Builder<'_>) {
// This gets called by `promote-release`
// (https://github.com/rust-lang/rust-central-station/tree/master/promote-release).
let mut cmd = builder.tool_cmd(Tool::BuildManifest);
if builder.config.dry_run {
return;
}
let sign = builder.config.dist_sign_folder.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.sign-folder` in `config.toml`\n\n")
});
let addr = builder.config.dist_upload_addr.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.upload-addr` in `config.toml`\n\n")
});
let pass = if env::var("BUILD_MANIFEST_DISABLE_SIGNING").is_err() {
let file = builder.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.gpg-password-file` in `config.toml`\n\n")
});
t!(fs::read_to_string(&file))
} else {
String::new()
};
let today = output(Command::new("date").arg("+%Y-%m-%d"));
cmd.arg(sign);
cmd.arg(distdir(builder));
cmd.arg(today.trim());
cmd.arg(builder.rust_package_vers());
cmd.arg(addr);
cmd.arg(builder.package_vers(&builder.release_num("cargo")));
cmd.arg(builder.package_vers(&builder.release_num("rls")));
cmd.arg(builder.package_vers(&builder.release_num("rust-analyzer/crates/rust-analyzer")));
cmd.arg(builder.package_vers(&builder.release_num("clippy")));
cmd.arg(builder.package_vers(&builder.release_num("miri")));
cmd.arg(builder.package_vers(&builder.release_num("rustfmt")));
cmd.arg(builder.llvm_tools_package_vers());
builder.create_dir(&distdir(builder));
let mut child = t!(cmd.stdin(Stdio::piped()).spawn());
t!(child.stdin.take().unwrap().write_all(pass.as_bytes()));
let status = t!(child.wait());
assert!(status.success());
}
}
/// Maybe add libLLVM.so to the given destination lib-dir. It will only have
/// been built if LLVM tools are linked dynamically.
///
/// Note: This function does not yet support Windows, but we also don't support
/// linking LLVM tools dynamically on Windows yet.
fn maybe_install_llvm(builder: &Builder<'_>, target: TargetSelection, dst_libdir: &Path) {
let src_libdir = builder.llvm_out(target).join("lib");
if target.contains("apple-darwin") {
let llvm_dylib_path = src_libdir.join("libLLVM.dylib");
if llvm_dylib_path.exists() {
builder.install(&llvm_dylib_path, dst_libdir, 0o644);
}
return;
}
// Usually libLLVM.so is a symlink to something like libLLVM-6.0.so.
// Since tools link to the latter rather than the former, we have to
// follow the symlink to find out what to distribute.
let llvm_dylib_path = src_libdir.join("libLLVM.so");
if llvm_dylib_path.exists() {
let llvm_dylib_path = llvm_dylib_path.canonicalize().unwrap_or_else(|e| {
panic!("dist: Error calling canonicalize path `{}`: {}", llvm_dylib_path.display(), e);
});
builder.install(&llvm_dylib_path, dst_libdir, 0o644);
}
}
/// Maybe add libLLVM.so to the target lib-dir for linking.
pub fn maybe_install_llvm_target(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir = sysroot.join("lib/rustlib").join(&*target.triple).join("lib");
maybe_install_llvm(builder, target, &dst_libdir);
}
/// Maybe add libLLVM.so to the runtime lib-dir for rustc itself.
pub fn maybe_install_llvm_runtime(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir =
sysroot.join(builder.sysroot_libdir_relative(Compiler { stage: 1, host: target }));
maybe_install_llvm(builder, target, &dst_libdir);
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct LlvmTools {
pub target: TargetSelection,
}
impl Step for LlvmTools {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("llvm-tools")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(LlvmTools { target: run.target });
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let target = self.target;
assert!(builder.config.extended);
/* run only if llvm-config isn't used */
if let Some(config) = builder.config.target_config.get(&target) {
if let Some(ref _s) = config.llvm_config {
builder.info(&format!("Skipping LlvmTools ({}): external LLVM", target));
return None;
}
}
builder.info(&format!("Dist LlvmTools ({})", target));
let _time = timeit(builder);
let src = builder.src.join("src/llvm-project/llvm");
let name = pkgname(builder, "llvm-tools");
let tmp = tmpdir(builder);
let image = tmp.join("llvm-tools-image");
drop(fs::remove_dir_all(&image));
// Prepare the image directory
let src_bindir = builder.llvm_out(target).join("bin");
let dst_bindir = image.join("lib/rustlib").join(&*target.triple).join("bin");
t!(fs::create_dir_all(&dst_bindir));
for tool in LLVM_TOOLS {
let exe = src_bindir.join(exe(tool, target));
builder.install(&exe, &dst_bindir, 0o755);
}
// Copy libLLVM.so to the target lib dir as well, so the RPATH like
// `$ORIGIN/../lib` can find it. It may also be used as a dependency
// of `rustc-dev` to support the inherited `-lLLVM` when using the
// compiler libraries.
maybe_install_llvm_target(builder, target, &image);
// Prepare the overlay
let overlay = tmp.join("llvm-tools-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("README.txt"), &overlay, 0o644);
builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644);
builder.create(&overlay.join("version"), &builder.llvm_tools_vers());
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=llvm-tools-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target.triple))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=llvm-tools-preview");
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target.triple)))
}
}
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs::{self, File};
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output};
use builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use cache::{Interned, INTERNER};
use compile;
use dist;
use flags::Subcommand;
use native;
use tool::{self, Tool, SourceType};
use toolstate::ToolState;
use util::{self, dylib_path, dylib_path_var};
use Crate as CargoCrate;
use {DocTests, Mode};
const ADB_TEST_DIR: &str = "/data/tmp/work";
/// The two modes of the test runner; tests or benchmarks.
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)]
pub enum TestKind {
/// Run `cargo test`
Test,
/// Run `cargo bench`
Bench,
}
impl From<Kind> for TestKind {
fn from(kind: Kind) -> Self {
match kind {
Kind::Test => TestKind::Test,
Kind::Bench => TestKind::Bench,
_ => panic!("unexpected kind in crate: {:?}", kind),
}
}
}
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
match self {
TestKind::Test => "test",
TestKind::Bench => "bench",
}
}
}
impl fmt::Display for TestKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
TestKind::Test => "Testing",
TestKind::Bench => "Benchmarking",
})
}
}
fn try_run(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run(cmd);
}
true
}
fn try_run_quiet(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run_quiet(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run_quiet(cmd);
}
true
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Linkcheck {
host: Interned<String>,
}
impl Step for Linkcheck {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will verify the validity of all our links in the
/// documentation to ensure we don't have a bunch of dead ones.
fn run(self, builder: &Builder) {
let host = self.host;
builder.info(&format!("Linkcheck ({})", host));
builder.default_doc(None);
let _time = util::timeit(&builder);
try_run(
builder,
builder
.tool_cmd(Tool::Linkchecker)
.arg(builder.out.join(host).join("doc")),
);
}
fn should_run(run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run.path("src/tools/linkchecker")
.default_condition(builder.config.docs)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Linkcheck { host: run.target });
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargotest {
stage: u32,
host: Interned<String>,
}
impl Step for Cargotest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargotest")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargotest {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will check out a few Rust projects and run `cargo
/// test` to ensure that we don't regress the test suites there.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(compile::Rustc {
compiler,
target: compiler.host,
});
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let out_dir = builder.out.join("ct");
t!(fs::create_dir_all(&out_dir));
let _time = util::timeit(&builder);
let mut cmd = builder.tool_cmd(Tool::CargoTest);
try_run(
builder,
cmd.arg(&builder.initial_cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler.host)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargo {
stage: u32,
host: Interned<String>,
}
impl Step for Cargo {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargo")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargo {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for `cargo` packaged with Rust.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(tool::Cargo {
compiler,
target: self.host,
});
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
self.host,
"test",
"src/tools/cargo",
SourceType::Submodule);
if !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
// Don't run cross-compile tests, we may not have cross-compiled libstd libs
// available.
cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
try_run(
builder,
cargo.env("PATH", &path_for_cargo(builder, compiler)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rls {
stage: u32,
host: Interned<String>,
}
impl Step for Rls {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rls")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rls {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for the rls.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rls {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rls: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rls",
SourceType::Submodule);
// Copy `src/tools/rls/test_data` to a writable drive.
let test_workspace_path = builder.out.join("rls-test-data");
let test_data_path = test_workspace_path.join("test_data");
builder.create_dir(&test_data_path);
builder.cp_r(&builder.src.join("src/tools/rls/test_data"), &test_data_path);
cargo.env("RLS_TEST_WORKSPACE_DIR", test_workspace_path);
builder.add_rustc_lib_path(compiler, &mut cargo);
cargo.arg("--")
.args(builder.config.cmd.test_args());
if try_run(builder, &mut cargo) {
builder.save_toolstate("rls", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustfmt {
stage: u32,
host: Interned<String>,
}
impl Step for Rustfmt {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustfmt")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rustfmt {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for rustfmt.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rustfmt {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rustfmt: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rustfmt",
SourceType::Submodule);
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
cargo.env("RUSTFMT_TEST_DIR", dir);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("rustfmt", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
let test_miri = run.builder.config.test_miri;
run.path("src/tools/miri").default_condition(test_miri)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Miri {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let miri = builder.ensure(tool::Miri {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(miri) = miri {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/miri",
SourceType::Submodule);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI_PATH", miri);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("miri", ToolState::TestPass);
}
} else {
eprintln!("failed to test miri: could not build");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
host: Interned<String>,
}
impl Step for Clippy {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = false;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/clippy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Clippy {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for clippy.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let clippy = builder.ensure(tool::Clippy {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(clippy) = clippy {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/clippy",
SourceType::Submodule);
// clippy tests need to know about the stage sysroot
cargo.env("SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
let host_libs = builder
.stage_out(compiler, Mode::ToolRustc)
.join(builder.cargo_dir());
cargo.env("HOST_LIBS", host_libs);
// clippy tests need to find the driver
cargo.env("CLIPPY_DRIVER_PATH", clippy);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("clippy-driver", ToolState::TestPass);
}
} else {
eprintln!("failed to test clippy: could not build");
}
}
}
fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString {
// Configure PATH to find the right rustc. NB. we have to use PATH
// and not RUSTC because the Cargo test suite has tests that will
// fail if rustc is not spelled `rustc`.
let path = builder.sysroot(compiler).join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("")
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocTheme {
pub compiler: Compiler,
}
impl Step for RustdocTheme {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustdoc-themes")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocTheme { compiler: compiler });
}
fn run(self, builder: &Builder) {
let rustdoc = builder.out.join("bootstrap/debug/rustdoc");
let mut cmd = builder.tool_cmd(Tool::RustdocTheme);
cmd.arg(rustdoc.to_str().unwrap())
.arg(
builder
.src
.join("src/librustdoc/html/static/themes")
.to_str()
.unwrap(),
)
.env("RUSTC_STAGE", self.compiler.stage.to_string())
.env("RUSTC_SYSROOT", builder.sysroot(self.compiler))
.env(
"RUSTDOC_LIBDIR",
builder.sysroot_libdir(self.compiler, self.compiler.host),
)
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host))
.env("RUSTDOC_CRATE_VERSION", builder.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
if let Some(linker) = builder.linker(self.compiler.host) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
try_run(builder, &mut cmd);
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJS {
pub host: Interned<String>,
pub target: Interned<String>,
}
impl Step for RustdocJS {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-js")
}
fn make_run(run: RunConfig) {
run.builder.ensure(RustdocJS {
host: run.host,
target: run.target,
});
}
fn run(self, builder: &Builder) {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]);
builder.ensure(::doc::Std {
target: self.target,
stage: builder.top_stage,
});
builder.run(&mut command);
} else {
builder.info(&format!(
"No nodejs found, skipping \"src/test/rustdoc-js\" tests"
));
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocUi {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocUi {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-ui")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocUi {
host: run.host,
target: run.target,
compiler,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "ui",
suite: "rustdoc-ui",
path: None,
compare_mode: None,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Tidy;
impl Step for Tidy {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Runs the `tidy` tool.
///
/// This tool in `src/tools` checks up on various bits and pieces of style and
/// otherwise just implements a few lint-like checks that are specific to the
/// compiler itself.
fn run(self, builder: &Builder) {
let mut cmd = builder.tool_cmd(Tool::Tidy);
cmd.arg(builder.src.join("src"));
cmd.arg(&builder.initial_cargo);
if !builder.config.vendor {
cmd.arg("--no-vendor");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
let _folder = builder.fold_output(|| "tidy");
builder.info(&format!("tidy check"));
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/tidy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Tidy);
}
}
fn testdir(builder: &Builder, host: Interned<String>) -> PathBuf {
builder.out.join(host).join("test")
}
macro_rules! default_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false });
}
}
macro_rules! default_test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr,
compare_mode: $compare_mode:expr }) => {
test_with_compare_mode!($name { path: $path, mode: $mode, suite: $suite, default: true,
host: false, compare_mode: $compare_mode });
}
}
macro_rules! host_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true });
}
}
macro_rules! test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: None });
}
}
macro_rules! test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr, compare_mode: $compare_mode:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: Some($compare_mode) });
}
}
macro_rules! test_definitions {
($name:ident {
path: $path:expr,
mode: $mode:expr,
suite: $suite:expr,
default: $default:expr,
host: $host:expr,
compare_mode: $compare_mode:expr
}) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub compiler: Compiler,
pub target: Interned<String>,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = $host;
fn should_run(run: ShouldRun) -> ShouldRun {
run.suite_path($path)
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure($name {
compiler,
target: run.target,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: $mode,
suite: $suite,
path: Some($path),
compare_mode: $compare_mode,
})
}
}
}
}
default_test_with_compare_mode!(Ui {
path: "src/test/ui",
mode: "ui",
suite: "ui",
compare_mode: "nll"
});
default_test!(RunPass {
path: "src/test/run-pass",
mode: "run-pass",
suite: "run-pass"
});
default_test!(CompileFail {
path: "src/test/compile-fail",
mode: "compile-fail",
suite: "compile-fail"
});
default_test!(ParseFail {
path: "src/test/parse-fail",
mode: "parse-fail",
suite: "parse-fail"
});
default_test!(RunFail {
path: "src/test/run-fail",
mode: "run-fail",
suite: "run-fail"
});
default_test!(RunPassValgrind {
path: "src/test/run-pass-valgrind",
mode: "run-pass-valgrind",
suite: "run-pass-valgrind"
});
default_test!(MirOpt {
path: "src/test/mir-opt",
mode: "mir-opt",
suite: "mir-opt"
});
default_test!(Codegen {
path: "src/test/codegen",
mode: "codegen",
suite: "codegen"
});
default_test!(CodegenUnits {
path: "src/test/codegen-units",
mode: "codegen-units",
suite: "codegen-units"
});
default_test!(Incremental {
path: "src/test/incremental",
mode: "incremental",
suite: "incremental"
});
default_test!(Debuginfo {
path: "src/test/debuginfo",
// What this runs varies depending on the native platform being apple
mode: "debuginfo-XXX",
suite: "debuginfo"
});
host_test!(UiFullDeps {
path: "src/test/ui-fulldeps",
mode: "ui",
suite: "ui-fulldeps"
});
host_test!(RunPassFullDeps {
path: "src/test/run-pass-fulldeps",
mode: "run-pass",
suite: "run-pass-fulldeps"
});
host_test!(RunFailFullDeps {
path: "src/test/run-fail-fulldeps",
mode: "run-fail",
suite: "run-fail-fulldeps"
});
host_test!(CompileFailFullDeps {
path: "src/test/compile-fail-fulldeps",
mode: "compile-fail",
suite: "compile-fail-fulldeps"
});
host_test!(IncrementalFullDeps {
path: "src/test/incremental-fulldeps",
mode: "incremental",
suite: "incremental-fulldeps"
});
host_test!(Rustdoc {
path: "src/test/rustdoc",
mode: "rustdoc",
suite: "rustdoc"
});
test!(Pretty {
path: "src/test/pretty",
mode: "pretty",
suite: "pretty",
default: false,
host: true
});
test!(RunPassPretty {
path: "src/test/run-pass/pretty",
mode: "pretty",
suite: "run-pass",
default: false,
host: true
});
test!(RunFailPretty {
path: "src/test/run-fail/pretty",
mode: "pretty",
suite: "run-fail",
default: false,
host: true
});
test!(RunPassValgrindPretty {
path: "src/test/run-pass-valgrind/pretty",
mode: "pretty",
suite: "run-pass-valgrind",
default: false,
host: true
});
test!(RunPassFullDepsPretty {
path: "src/test/run-pass-fulldeps/pretty",
mode: "pretty",
suite: "run-pass-fulldeps",
default: false,
host: true
});
test!(RunFailFullDepsPretty {
path: "src/test/run-fail-fulldeps/pretty",
mode: "pretty",
suite: "run-fail-fulldeps",
default: false,
host: true
});
default_test!(RunMake {
path: "src/test/run-make",
mode: "run-make",
suite: "run-make"
});
host_test!(RunMakeFullDeps {
path: "src/test/run-make-fulldeps",
mode: "run-make",
suite: "run-make-fulldeps"
});
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct Compiletest {
compiler: Compiler,
target: Interned<String>,
mode: &'static str,
suite: &'static str,
path: Option<&'static str>,
compare_mode: Option<&'static str>,
}
impl Step for Compiletest {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Executes the `compiletest` tool to run a suite of tests.
///
/// Compiles all tests with `compiler` for `target` with the specified
/// compiletest `mode` and `suite` arguments. For example `mode` can be
/// "run-pass" or `suite` can be something like `debuginfo`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let suite = self.suite;
// Path for test suite
let suite_path = self.path.unwrap_or("");
// Skip codegen tests if they aren't enabled in configuration.
if !builder.config.codegen_tests && suite == "codegen" {
return;
}
if suite == "debuginfo" {
// Skip debuginfo tests on MSVC
if builder.config.build.contains("msvc") {
return;
}
if mode == "debuginfo-XXX" {
return if builder.config.build.contains("apple") {
builder.ensure(Compiletest {
mode: "debuginfo-lldb",
..self
});
} else {
builder.ensure(Compiletest {
mode: "debuginfo-gdb",
..self
});
};
}
builder.ensure(dist::DebuggerScripts {
sysroot: builder.sysroot(compiler),
host: target,
});
}
if suite.ends_with("fulldeps") ||
// FIXME: Does pretty need librustc compiled? Note that there are
// fulldeps test suites with mode = pretty as well.
mode == "pretty"
{
builder.ensure(compile::Rustc { compiler, target });
}
if builder.no_std(target) == Some(true) {
// the `test` doesn't compile for no-std targets
builder.ensure(compile::Std { compiler, target });
} else {
builder.ensure(compile::Test { compiler, target });
}
if builder.no_std(target) == Some(true) {
// for no_std run-make (e.g. thumb*),
// we need a host compiler which is called by cargo.
builder.ensure(compile::Std { compiler, target: compiler.host });
}
builder.ensure(native::TestHelpers { target });
builder.ensure(RemoteCopyLibs { compiler, target });
let mut cmd = builder.tool_cmd(Tool::Compiletest);
// compiletest currently has... a lot of arguments, so let's just pass all
// of them!
cmd.arg("--compile-lib-path")
.arg(builder.rustc_libdir(compiler));
cmd.arg("--run-lib-path")
.arg(builder.sysroot_libdir(compiler, target));
cmd.arg("--rustc-path").arg(builder.rustc(compiler));
let is_rustdoc_ui = suite.ends_with("rustdoc-ui");
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc"
|| (mode == "run-make" && suite.ends_with("fulldeps"))
|| (mode == "ui" && is_rustdoc_ui)
{
cmd.arg("--rustdoc-path")
.arg(builder.rustdoc(compiler.host));
}
cmd.arg("--src-base")
.arg(builder.src.join("src/test").join(suite));
cmd.arg("--build-base")
.arg(testdir(builder, compiler.host).join(suite));
cmd.arg("--stage-id")
.arg(format!("stage{}-{}", compiler.stage, target));
cmd.arg("--mode").arg(mode);
cmd.arg("--target").arg(target);
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck")
.arg(builder.llvm_filecheck(builder.config.build));
if builder.config.cmd.bless() {
cmd.arg("--bless");
}
let compare_mode = builder.config.cmd.compare_mode().or(self.compare_mode);
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
let mut flags = if is_rustdoc_ui {
Vec::new()
} else {
vec!["-Crpath".to_string()]
};
if !is_rustdoc_ui {
if builder.config.rust_optimize_tests {
flags.push("-O".to_string());
}
if builder.config.rust_debuginfo_tests {
flags.push("-g".to_string());
}
}
flags.push("-Zunstable-options".to_string());
flags.push(builder.config.cmd.rustc_args().join(" "));
if let Some(linker) = builder.linker(target) {
cmd.arg("--linker").arg(linker);
}
let hostflags = flags.clone();
cmd.arg("--host-rustcflags").arg(hostflags.join(" "));
let mut targetflags = flags.clone();
targetflags.push(format!(
"-Lnative={}",
builder.test_helpers_out(target).display()
));
cmd.arg("--target-rustcflags").arg(targetflags.join(" "));
cmd.arg("--docck-python").arg(builder.python());
if builder.config.build.ends_with("apple-darwin") {
// Force /usr/bin/python on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python");
} else {
cmd.arg("--lldb-python").arg(builder.python());
}
if let Some(ref gdb) = builder.config.gdb {
cmd.arg("--gdb").arg(gdb);
}
if let Some(ref vers) = builder.lldb_version {
cmd.arg("--lldb-version").arg(vers);
}
if let Some(ref dir) = builder.lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
// Get paths from cmd args
let paths = match &builder.config.cmd {
Subcommand::Test { ref paths, .. } => &paths[..],
_ => &[],
};
// Get test-args by striping suite path
let mut test_args: Vec<&str> = paths
.iter()
.map(|p| {
match p.strip_prefix(".") {
Ok(path) => path,
Err(_) => p,
}
})
.filter(|p| p.starts_with(suite_path) && p.is_file())
.map(|p| p.strip_prefix(suite_path).unwrap().to_str().unwrap())
.collect();
test_args.append(&mut builder.config.cmd.test_args());
cmd.args(&test_args);
if builder.is_verbose() {
cmd.arg("--verbose");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
if builder.config.llvm_enabled {
let llvm_config = builder.ensure(native::Llvm {
target: builder.config.build,
emscripten: false,
});
if !builder.config.dry_run {
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
cmd.arg("--llvm-version").arg(llvm_version);
}
if !builder.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if !builder.config.dry_run && suite == "run-make-fulldeps" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags"));
cmd.arg("--cc")
.arg(builder.cc(target))
.arg("--cxx")
.arg(builder.cxx(target).unwrap())
.arg("--cflags")
.arg(builder.cflags(target).join(" "))
.arg("--llvm-components")
.arg(llvm_components.trim())
.arg("--llvm-cxxflags")
.arg(llvm_cxxflags.trim());
if let Some(ar) = builder.ar(target) {
cmd.arg("--ar").arg(ar);
}
}
}
if suite == "run-make-fulldeps" && !builder.config.llvm_enabled {
builder.info(&format!(
"Ignoring run-make test suite as they generally don't work without LLVM"
));
return;
}
if suite != "run-make-fulldeps" {
cmd.arg("--cc")
.arg("")
.arg("--cxx")
.arg("")
.arg("--cflags")
.arg("")
.arg("--llvm-components")
.arg("")
.arg("--llvm-cxxflags")
.arg("");
}
if builder.remote_tested(target) {
cmd.arg("--remote-test-client")
.arg(builder.tool_exe(Tool::RemoteTestClient));
}
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
//
// Note that if we encounter `PATH` we make sure to append to our own `PATH`
// rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in builder.cc[&target].env() {
if k != "PATH" {
cmd.env(k, v);
}
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
builder.add_rust_test_threads(&mut cmd);
if builder.config.sanitizers {
cmd.env("SANITIZER_SUPPORT", "1");
}
if builder.config.profiler {
cmd.env("PROFILER_SUPPORT", "1");
}
cmd.env("RUST_TEST_TMPDIR", builder.out.join("tmp"));
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
// Assume that cc for this target comes from the android sysroot
cmd.arg("--android-cross-path")
.arg(builder.cc(target).parent().unwrap().parent().unwrap());
} else {
cmd.arg("--android-cross-path").arg("");
}
builder.ci_env.force_coloring_in_ci(&mut cmd);
let _folder = builder.fold_output(|| format!("test_{}", suite));
builder.info(&format!(
"Check compiletest suite={} mode={} ({} -> {})",
suite, mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
if let Some(compare_mode) = compare_mode {
cmd.arg("--compare-mode").arg(compare_mode);
let _folder = builder.fold_output(|| format!("test_{}_{}", suite, compare_mode));
builder.info(&format!(
"Check compiletest suite={} mode={} compare_mode={} ({} -> {})",
suite, mode, compare_mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct DocTest {
compiler: Compiler,
path: &'static str,
name: &'static str,
is_ext_doc: bool,
}
impl Step for DocTest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Run `rustdoc --test` for all documentation in `src/doc`.
///
/// This will run all tests in our markdown documentation (e.g. the book)
/// located in `src/doc`. The `rustdoc` that's run is the one that sits next to
/// `compiler`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Test {
compiler,
target: compiler.host,
});
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![builder.src.join(self.path)];
let _time = util::timeit(&builder);
let _folder = builder.fold_output(|| format!("test_{}", self.name));
let mut files = Vec::new();
while let Some(p) = stack.pop() {
if p.is_dir() {
stack.extend(t!(p.read_dir()).map(|p| t!(p).path()));
continue;
}
if p.extension().and_then(|s| s.to_str()) != Some("md") {
continue;
}
// The nostarch directory in the book is for no starch, and so isn't
// guaranteed to builder. We don't care if it doesn't build, so skip it.
if p.to_str().map_or(false, |p| p.contains("nostarch")) {
continue;
}
files.push(p);
}
files.sort();
let mut toolstate = ToolState::TestPass;
for file in files {
if !markdown_test(builder, compiler, &file) {
toolstate = ToolState::TestFail;
}
}
if self.is_ext_doc {
builder.save_toolstate(self.name, toolstate);
}
}
}
macro_rules! test_book {
($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => {
$(
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
compiler: Compiler,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path($path)
}
fn make_run(run: RunConfig) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
fn run(self, builder: &Builder) {
builder.ensure(DocTest {
compiler: self.compiler,
path: $path,
name: $book_name,
is_ext_doc: !$default,
});
}
}
)+
}
}
test_book!(
Nomicon, "src/doc/nomicon", "nomicon", default=false;
Reference, "src/doc/reference", "reference", default=false;
RustdocBook, "src/doc/rustdoc", "rustdoc", default=true;
RustcBook, "src/doc/rustc", "rustc", default=true;
RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false;
TheBook, "src/doc/book", "book", default=false;
UnstableBook, "src/doc/unstable-book", "unstable-book", default=true;
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ErrorIndex {
compiler: Compiler,
}
impl Step for ErrorIndex {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig) {
run.builder.ensure(ErrorIndex {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
/// Run the error index generator tool to execute the tests located in the error
/// index.
///
/// The `error_index_generator` tool lives in `src/tools` and is used to
/// generate a markdown file from the error indexes of the code base which is
/// then passed to `rustdoc --test`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Std {
compiler,
target: compiler.host,
});
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
let mut tool = builder.tool_cmd(Tool::ErrorIndex);
tool.arg("markdown")
.arg(&output)
.env("CFG_BUILD", &builder.config.build)
.env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir());
let _folder = builder.fold_output(|| "test_error_index");
builder.info(&format!("Testing error-index stage{}", compiler.stage));
let _time = util::timeit(&builder);
builder.run(&mut tool);
markdown_test(builder, compiler, &output);
}
}
fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool {
match File::open(markdown) {
Ok(mut file) => {
let mut contents = String::new();
t!(file.read_to_string(&mut contents));
if !contents.contains("```") {
return true;
}
}
Err(_) => {}
}
builder.info(&format!("doc tests for: {}", markdown.display()));
let mut cmd = builder.rustdoc_cmd(compiler.host);
builder.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = builder.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if builder.config.verbose_tests {
try_run(builder, &mut cmd)
} else {
try_run_quiet(builder, &mut cmd)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateLibrustc {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: Interned<String>,
}
impl Step for CrateLibrustc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.krate("rustc-main")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
target: run.target,
test_kind,
krate: krate.name,
});
}
}
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Rustc,
test_kind: self.test_kind,
krate: self.krate,
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateNotDefault {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: &'static str,
}
impl Step for CrateNotDefault {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/liballoc_jemalloc")
.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
target: run.target,
test_kind,
krate: match run.path {
_ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc",
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
_ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan",
_ => panic!("unexpected path {:?}", run.path),
},
});
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Std,
test_kind: self.test_kind,
krate: INTERNER.intern_str(self.krate),
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Crate {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
pub test_kind: TestKind,
pub krate: Interned<String>,
}
impl Step for Crate {
type Output = ();
const DEFAULT: bool = true;
fn should_run(mut run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run = run.krate("test");
for krate in run.builder.in_tree_crates("std") {
if krate.is_local(&run.builder)
&& !krate.name.contains("jemalloc")
&& !(krate.name.starts_with("rustc_") && krate.name.ends_with("san"))
&& krate.name != "dlmalloc"
{
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}
run
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
target: run.target,
mode,
test_kind,
krate: krate.name,
});
};
for krate in builder.in_tree_crates("std") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Std, krate);
}
}
for krate in builder.in_tree_crates("test") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Test, krate);
}
}
}
/// Run all unit tests plus documentation tests for a given crate defined
/// by a `Cargo.toml` (single manifest)
///
/// This is what runs tests for crates like the standard library, compiler, etc.
/// It essentially is the driver for running `cargo test`.
///
/// Currently this runs all tests for a DAG by passing a bunch of `-p foo`
/// arguments, and those arguments are discovered from `cargo metadata`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let test_kind = self.test_kind;
let krate = self.krate;
builder.ensure(compile::Test { compiler, target });
builder.ensure(RemoteCopyLibs { compiler, target });
// If we're not doing a full bootstrap but we're testing a stage2 version of
// libstd, then what we're actually testing is the libstd produced in
// stage1. Reflect that here by updating the compiler that we're working
// with automatically.
let compiler = if builder.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {
compiler.clone()
};
let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand());
match mode {
Mode::Std => {
compile::std_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Test => {
compile::test_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Rustc => {
builder.ensure(compile::Rustc { compiler, target });
compile::rustc_cargo(builder, &mut cargo);
}
_ => panic!("can only test libraries"),
};
// Build up the base `cargo test` command.
//
// Pass in some standard flags then iterate over the graph we've discovered
// in `cargo metadata` with the maps above and figure out what `-p`
// arguments need to get passed.
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
match builder.doc_tests {
DocTests::Only => {
cargo.arg("--doc");
}
DocTests::No => {
cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]);
}
DocTests::Yes => {}
}
cargo.arg("-p").arg(krate);
// The tests are going to run with the *target* libraries, so we need to
// ensure that those libraries show up in the LD_LIBRARY_PATH equivalent.
//
// Note that to run the compiler we need to run with the *host* libraries,
// but our wrapper scripts arrange for that to be the case anyway.
let mut dylib_path = dylib_path();
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
if target.contains("emscripten") {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured"),
);
} else if target.starts_with("wasm32") {
// Warn about running tests without the `wasm_syscall` feature enabled.
// The javascript shim implements the syscall interface so that test
// output can be correctly reported.
if !builder.config.wasm_syscall {
builder.info(&format!(
"Libstd was built without `wasm_syscall` feature enabled: \
test output may not be visible."
));
}
// On the wasm32-unknown-unknown target we're using LTO which is
// incompatible with `-C prefer-dynamic`, so disable that here
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
let node = builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured");
let runner = format!(
"{} {}/src/etc/wasm32-shim.js",
node.display(),
builder.src.display()
);
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner);
} else if builder.remote_tested(target) {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run", builder.tool_exe(Tool::RemoteTestClient).display()),
);
}
let _folder = builder.fold_output(|| {
format!(
"{}_stage{}-{}",
test_kind.subcommand(),
compiler.stage,
krate
)
});
builder.info(&format!(
"{} {} stage{} ({} -> {})",
test_kind, krate, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateRustdoc {
host: Interned<String>,
test_kind: TestKind,
}
impl Step for CrateRustdoc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.paths(&["src/librustdoc", "src/tools/rustdoc"])
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc {
host: run.host,
test_kind,
});
}
fn run(self, builder: &Builder) {
let test_kind = self.test_kind;
let compiler = builder.compiler(builder.top_stage, self.host);
let target = compiler.host;
builder.ensure(compile::Rustc { compiler, target });
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
target,
test_kind.subcommand(),
"src/tools/rustdoc",
SourceType::InTree);
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
cargo.arg("-p").arg("rustdoc:0.0.0");
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
let _folder = builder
.fold_output(|| format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage));
builder.info(&format!(
"{} rustdoc stage{} ({} -> {})",
test_kind, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
fn envify(s: &str) -> String {
s.chars()
.map(|c| match c {
'-' => '_',
c => c,
})
.flat_map(|c| c.to_uppercase())
.collect()
}
/// Some test suites are run inside emulators or on remote devices, and most
/// of our test binaries are linked dynamically which means we need to ship
/// the standard library and such to the emulator ahead of time. This step
/// represents this and is a dependency of all test suites.
///
/// Most of the time this is a noop. For some steps such as shipping data to
/// QEMU we have to build our own tools so we've got conditional dependencies
/// on those programs as well. Note that the remote test client is built for
/// the build target (us) and the server is built for the target.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RemoteCopyLibs {
compiler: Compiler,
target: Interned<String>,
}
impl Step for RemoteCopyLibs {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
if !builder.remote_tested(target) {
return;
}
builder.ensure(compile::Test { compiler, target });
builder.info(&format!("REMOTE copy libs to emulator ({})", target));
t!(fs::create_dir_all(builder.out.join("tmp")));
let server = builder.ensure(tool::RemoteTestServer {
compiler: compiler.with_stage(0),
target,
});
// Spawn the emulator and wait for it to come online
let tool = builder.tool_exe(Tool::RemoteTestClient);
let mut cmd = Command::new(&tool);
cmd.arg("spawn-emulator")
.arg(target)
.arg(&server)
.arg(builder.out.join("tmp"));
if let Some(rootfs) = builder.qemu_rootfs(target) {
cmd.arg(rootfs);
}
builder.run(&mut cmd);
// Push all our dylibs to the emulator
for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) {
let f = t!(f);
let name = f.file_name().into_string().unwrap();
if util::is_dylib(&name) {
builder.run(Command::new(&tool).arg("push").arg(f.path()));
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("distcheck")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Distcheck);
}
/// Run "distcheck", a 'make check' from a tarball
fn run(self, builder: &Builder) {
builder.info(&format!("Distcheck"));
let dir = builder.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
// Guarantee that these are built before we begin running.
builder.ensure(dist::PlainSourceTarball);
builder.ensure(dist::Src);
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::PlainSourceTarball))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
builder.run(
Command::new("./configure")
.args(&builder.config.configure_args)
.arg("--enable-vendor")
.current_dir(&dir),
);
builder.run(
Command::new(build_helper::make(&builder.config.build))
.arg("check")
.current_dir(&dir),
);
// Now make sure that rust-src has all of libstd's dependencies
builder.info(&format!("Distcheck rust-src"));
let dir = builder.out.join("tmp").join("distcheck-src");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::Src))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml");
builder.run(
Command::new(&builder.initial_cargo)
.arg("generate-lockfile")
.arg("--manifest-path")
.arg(&toml)
.current_dir(&dir),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Bootstrap;
impl Step for Bootstrap {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Test the build system itself
fn run(self, builder: &Builder) {
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("test")
.current_dir(builder.src.join("src/bootstrap"))
.env("RUSTFLAGS", "-Cdebuginfo=2")
.env("CARGO_TARGET_DIR", builder.out.join("bootstrap"))
.env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &builder.initial_rustc);
if let Some(flags) = option_env!("RUSTFLAGS") {
// Use the same rustc flags for testing as for "normal" compilation,
// so that Cargo doesn’t recompile the entire dependency graph every time:
// https://github.com/rust-lang/rust/issues/49215
cmd.env("RUSTFLAGS", flags);
}
if !builder.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&builder.config.cmd.test_args());
// rustbuild tests are racy on directory creation so just run them one at a time.
// Since there's not many this shouldn't be a problem.
cmd.arg("--test-threads=1");
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/bootstrap")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Bootstrap);
}
}
[CI] run-make/thumb: remove a trailing space.
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs::{self, File};
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output};
use builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use cache::{Interned, INTERNER};
use compile;
use dist;
use flags::Subcommand;
use native;
use tool::{self, Tool, SourceType};
use toolstate::ToolState;
use util::{self, dylib_path, dylib_path_var};
use Crate as CargoCrate;
use {DocTests, Mode};
const ADB_TEST_DIR: &str = "/data/tmp/work";
/// The two modes of the test runner; tests or benchmarks.
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)]
pub enum TestKind {
/// Run `cargo test`
Test,
/// Run `cargo bench`
Bench,
}
impl From<Kind> for TestKind {
fn from(kind: Kind) -> Self {
match kind {
Kind::Test => TestKind::Test,
Kind::Bench => TestKind::Bench,
_ => panic!("unexpected kind in crate: {:?}", kind),
}
}
}
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
match self {
TestKind::Test => "test",
TestKind::Bench => "bench",
}
}
}
impl fmt::Display for TestKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
TestKind::Test => "Testing",
TestKind::Bench => "Benchmarking",
})
}
}
fn try_run(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run(cmd);
}
true
}
fn try_run_quiet(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run_quiet(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run_quiet(cmd);
}
true
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Linkcheck {
host: Interned<String>,
}
impl Step for Linkcheck {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will verify the validity of all our links in the
/// documentation to ensure we don't have a bunch of dead ones.
fn run(self, builder: &Builder) {
let host = self.host;
builder.info(&format!("Linkcheck ({})", host));
builder.default_doc(None);
let _time = util::timeit(&builder);
try_run(
builder,
builder
.tool_cmd(Tool::Linkchecker)
.arg(builder.out.join(host).join("doc")),
);
}
fn should_run(run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run.path("src/tools/linkchecker")
.default_condition(builder.config.docs)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Linkcheck { host: run.target });
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargotest {
stage: u32,
host: Interned<String>,
}
impl Step for Cargotest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargotest")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargotest {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will check out a few Rust projects and run `cargo
/// test` to ensure that we don't regress the test suites there.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(compile::Rustc {
compiler,
target: compiler.host,
});
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let out_dir = builder.out.join("ct");
t!(fs::create_dir_all(&out_dir));
let _time = util::timeit(&builder);
let mut cmd = builder.tool_cmd(Tool::CargoTest);
try_run(
builder,
cmd.arg(&builder.initial_cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler.host)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargo {
stage: u32,
host: Interned<String>,
}
impl Step for Cargo {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargo")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargo {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for `cargo` packaged with Rust.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(tool::Cargo {
compiler,
target: self.host,
});
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
self.host,
"test",
"src/tools/cargo",
SourceType::Submodule);
if !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
// Don't run cross-compile tests, we may not have cross-compiled libstd libs
// available.
cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
try_run(
builder,
cargo.env("PATH", &path_for_cargo(builder, compiler)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rls {
stage: u32,
host: Interned<String>,
}
impl Step for Rls {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rls")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rls {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for the rls.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rls {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rls: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rls",
SourceType::Submodule);
// Copy `src/tools/rls/test_data` to a writable drive.
let test_workspace_path = builder.out.join("rls-test-data");
let test_data_path = test_workspace_path.join("test_data");
builder.create_dir(&test_data_path);
builder.cp_r(&builder.src.join("src/tools/rls/test_data"), &test_data_path);
cargo.env("RLS_TEST_WORKSPACE_DIR", test_workspace_path);
builder.add_rustc_lib_path(compiler, &mut cargo);
cargo.arg("--")
.args(builder.config.cmd.test_args());
if try_run(builder, &mut cargo) {
builder.save_toolstate("rls", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustfmt {
stage: u32,
host: Interned<String>,
}
impl Step for Rustfmt {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustfmt")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rustfmt {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for rustfmt.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rustfmt {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rustfmt: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rustfmt",
SourceType::Submodule);
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
cargo.env("RUSTFMT_TEST_DIR", dir);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("rustfmt", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
let test_miri = run.builder.config.test_miri;
run.path("src/tools/miri").default_condition(test_miri)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Miri {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let miri = builder.ensure(tool::Miri {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(miri) = miri {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/miri",
SourceType::Submodule);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI_PATH", miri);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("miri", ToolState::TestPass);
}
} else {
eprintln!("failed to test miri: could not build");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
host: Interned<String>,
}
impl Step for Clippy {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = false;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/clippy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Clippy {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for clippy.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let clippy = builder.ensure(tool::Clippy {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(clippy) = clippy {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/clippy",
SourceType::Submodule);
// clippy tests need to know about the stage sysroot
cargo.env("SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
let host_libs = builder
.stage_out(compiler, Mode::ToolRustc)
.join(builder.cargo_dir());
cargo.env("HOST_LIBS", host_libs);
// clippy tests need to find the driver
cargo.env("CLIPPY_DRIVER_PATH", clippy);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("clippy-driver", ToolState::TestPass);
}
} else {
eprintln!("failed to test clippy: could not build");
}
}
}
fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString {
// Configure PATH to find the right rustc. NB. we have to use PATH
// and not RUSTC because the Cargo test suite has tests that will
// fail if rustc is not spelled `rustc`.
let path = builder.sysroot(compiler).join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("")
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocTheme {
pub compiler: Compiler,
}
impl Step for RustdocTheme {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustdoc-themes")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocTheme { compiler: compiler });
}
fn run(self, builder: &Builder) {
let rustdoc = builder.out.join("bootstrap/debug/rustdoc");
let mut cmd = builder.tool_cmd(Tool::RustdocTheme);
cmd.arg(rustdoc.to_str().unwrap())
.arg(
builder
.src
.join("src/librustdoc/html/static/themes")
.to_str()
.unwrap(),
)
.env("RUSTC_STAGE", self.compiler.stage.to_string())
.env("RUSTC_SYSROOT", builder.sysroot(self.compiler))
.env(
"RUSTDOC_LIBDIR",
builder.sysroot_libdir(self.compiler, self.compiler.host),
)
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host))
.env("RUSTDOC_CRATE_VERSION", builder.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
if let Some(linker) = builder.linker(self.compiler.host) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
try_run(builder, &mut cmd);
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJS {
pub host: Interned<String>,
pub target: Interned<String>,
}
impl Step for RustdocJS {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-js")
}
fn make_run(run: RunConfig) {
run.builder.ensure(RustdocJS {
host: run.host,
target: run.target,
});
}
fn run(self, builder: &Builder) {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]);
builder.ensure(::doc::Std {
target: self.target,
stage: builder.top_stage,
});
builder.run(&mut command);
} else {
builder.info(&format!(
"No nodejs found, skipping \"src/test/rustdoc-js\" tests"
));
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocUi {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocUi {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-ui")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocUi {
host: run.host,
target: run.target,
compiler,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "ui",
suite: "rustdoc-ui",
path: None,
compare_mode: None,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Tidy;
impl Step for Tidy {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Runs the `tidy` tool.
///
/// This tool in `src/tools` checks up on various bits and pieces of style and
/// otherwise just implements a few lint-like checks that are specific to the
/// compiler itself.
fn run(self, builder: &Builder) {
let mut cmd = builder.tool_cmd(Tool::Tidy);
cmd.arg(builder.src.join("src"));
cmd.arg(&builder.initial_cargo);
if !builder.config.vendor {
cmd.arg("--no-vendor");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
let _folder = builder.fold_output(|| "tidy");
builder.info(&format!("tidy check"));
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/tidy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Tidy);
}
}
fn testdir(builder: &Builder, host: Interned<String>) -> PathBuf {
builder.out.join(host).join("test")
}
macro_rules! default_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false });
}
}
macro_rules! default_test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr,
compare_mode: $compare_mode:expr }) => {
test_with_compare_mode!($name { path: $path, mode: $mode, suite: $suite, default: true,
host: false, compare_mode: $compare_mode });
}
}
macro_rules! host_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true });
}
}
macro_rules! test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: None });
}
}
macro_rules! test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr, compare_mode: $compare_mode:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: Some($compare_mode) });
}
}
macro_rules! test_definitions {
($name:ident {
path: $path:expr,
mode: $mode:expr,
suite: $suite:expr,
default: $default:expr,
host: $host:expr,
compare_mode: $compare_mode:expr
}) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub compiler: Compiler,
pub target: Interned<String>,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = $host;
fn should_run(run: ShouldRun) -> ShouldRun {
run.suite_path($path)
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure($name {
compiler,
target: run.target,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: $mode,
suite: $suite,
path: Some($path),
compare_mode: $compare_mode,
})
}
}
}
}
default_test_with_compare_mode!(Ui {
path: "src/test/ui",
mode: "ui",
suite: "ui",
compare_mode: "nll"
});
default_test!(RunPass {
path: "src/test/run-pass",
mode: "run-pass",
suite: "run-pass"
});
default_test!(CompileFail {
path: "src/test/compile-fail",
mode: "compile-fail",
suite: "compile-fail"
});
default_test!(ParseFail {
path: "src/test/parse-fail",
mode: "parse-fail",
suite: "parse-fail"
});
default_test!(RunFail {
path: "src/test/run-fail",
mode: "run-fail",
suite: "run-fail"
});
default_test!(RunPassValgrind {
path: "src/test/run-pass-valgrind",
mode: "run-pass-valgrind",
suite: "run-pass-valgrind"
});
default_test!(MirOpt {
path: "src/test/mir-opt",
mode: "mir-opt",
suite: "mir-opt"
});
default_test!(Codegen {
path: "src/test/codegen",
mode: "codegen",
suite: "codegen"
});
default_test!(CodegenUnits {
path: "src/test/codegen-units",
mode: "codegen-units",
suite: "codegen-units"
});
default_test!(Incremental {
path: "src/test/incremental",
mode: "incremental",
suite: "incremental"
});
default_test!(Debuginfo {
path: "src/test/debuginfo",
// What this runs varies depending on the native platform being apple
mode: "debuginfo-XXX",
suite: "debuginfo"
});
host_test!(UiFullDeps {
path: "src/test/ui-fulldeps",
mode: "ui",
suite: "ui-fulldeps"
});
host_test!(RunPassFullDeps {
path: "src/test/run-pass-fulldeps",
mode: "run-pass",
suite: "run-pass-fulldeps"
});
host_test!(RunFailFullDeps {
path: "src/test/run-fail-fulldeps",
mode: "run-fail",
suite: "run-fail-fulldeps"
});
host_test!(CompileFailFullDeps {
path: "src/test/compile-fail-fulldeps",
mode: "compile-fail",
suite: "compile-fail-fulldeps"
});
host_test!(IncrementalFullDeps {
path: "src/test/incremental-fulldeps",
mode: "incremental",
suite: "incremental-fulldeps"
});
host_test!(Rustdoc {
path: "src/test/rustdoc",
mode: "rustdoc",
suite: "rustdoc"
});
test!(Pretty {
path: "src/test/pretty",
mode: "pretty",
suite: "pretty",
default: false,
host: true
});
test!(RunPassPretty {
path: "src/test/run-pass/pretty",
mode: "pretty",
suite: "run-pass",
default: false,
host: true
});
test!(RunFailPretty {
path: "src/test/run-fail/pretty",
mode: "pretty",
suite: "run-fail",
default: false,
host: true
});
test!(RunPassValgrindPretty {
path: "src/test/run-pass-valgrind/pretty",
mode: "pretty",
suite: "run-pass-valgrind",
default: false,
host: true
});
test!(RunPassFullDepsPretty {
path: "src/test/run-pass-fulldeps/pretty",
mode: "pretty",
suite: "run-pass-fulldeps",
default: false,
host: true
});
test!(RunFailFullDepsPretty {
path: "src/test/run-fail-fulldeps/pretty",
mode: "pretty",
suite: "run-fail-fulldeps",
default: false,
host: true
});
default_test!(RunMake {
path: "src/test/run-make",
mode: "run-make",
suite: "run-make"
});
host_test!(RunMakeFullDeps {
path: "src/test/run-make-fulldeps",
mode: "run-make",
suite: "run-make-fulldeps"
});
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct Compiletest {
compiler: Compiler,
target: Interned<String>,
mode: &'static str,
suite: &'static str,
path: Option<&'static str>,
compare_mode: Option<&'static str>,
}
impl Step for Compiletest {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Executes the `compiletest` tool to run a suite of tests.
///
/// Compiles all tests with `compiler` for `target` with the specified
/// compiletest `mode` and `suite` arguments. For example `mode` can be
/// "run-pass" or `suite` can be something like `debuginfo`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let suite = self.suite;
// Path for test suite
let suite_path = self.path.unwrap_or("");
// Skip codegen tests if they aren't enabled in configuration.
if !builder.config.codegen_tests && suite == "codegen" {
return;
}
if suite == "debuginfo" {
// Skip debuginfo tests on MSVC
if builder.config.build.contains("msvc") {
return;
}
if mode == "debuginfo-XXX" {
return if builder.config.build.contains("apple") {
builder.ensure(Compiletest {
mode: "debuginfo-lldb",
..self
});
} else {
builder.ensure(Compiletest {
mode: "debuginfo-gdb",
..self
});
};
}
builder.ensure(dist::DebuggerScripts {
sysroot: builder.sysroot(compiler),
host: target,
});
}
if suite.ends_with("fulldeps") ||
// FIXME: Does pretty need librustc compiled? Note that there are
// fulldeps test suites with mode = pretty as well.
mode == "pretty"
{
builder.ensure(compile::Rustc { compiler, target });
}
if builder.no_std(target) == Some(true) {
// the `test` doesn't compile for no-std targets
builder.ensure(compile::Std { compiler, target });
} else {
builder.ensure(compile::Test { compiler, target });
}
if builder.no_std(target) == Some(true) {
// for no_std run-make (e.g. thumb*),
// we need a host compiler which is called by cargo.
builder.ensure(compile::Std { compiler, target: compiler.host });
}
builder.ensure(native::TestHelpers { target });
builder.ensure(RemoteCopyLibs { compiler, target });
let mut cmd = builder.tool_cmd(Tool::Compiletest);
// compiletest currently has... a lot of arguments, so let's just pass all
// of them!
cmd.arg("--compile-lib-path")
.arg(builder.rustc_libdir(compiler));
cmd.arg("--run-lib-path")
.arg(builder.sysroot_libdir(compiler, target));
cmd.arg("--rustc-path").arg(builder.rustc(compiler));
let is_rustdoc_ui = suite.ends_with("rustdoc-ui");
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc"
|| (mode == "run-make" && suite.ends_with("fulldeps"))
|| (mode == "ui" && is_rustdoc_ui)
{
cmd.arg("--rustdoc-path")
.arg(builder.rustdoc(compiler.host));
}
cmd.arg("--src-base")
.arg(builder.src.join("src/test").join(suite));
cmd.arg("--build-base")
.arg(testdir(builder, compiler.host).join(suite));
cmd.arg("--stage-id")
.arg(format!("stage{}-{}", compiler.stage, target));
cmd.arg("--mode").arg(mode);
cmd.arg("--target").arg(target);
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck")
.arg(builder.llvm_filecheck(builder.config.build));
if builder.config.cmd.bless() {
cmd.arg("--bless");
}
let compare_mode = builder.config.cmd.compare_mode().or(self.compare_mode);
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
let mut flags = if is_rustdoc_ui {
Vec::new()
} else {
vec!["-Crpath".to_string()]
};
if !is_rustdoc_ui {
if builder.config.rust_optimize_tests {
flags.push("-O".to_string());
}
if builder.config.rust_debuginfo_tests {
flags.push("-g".to_string());
}
}
flags.push("-Zunstable-options".to_string());
flags.push(builder.config.cmd.rustc_args().join(" "));
if let Some(linker) = builder.linker(target) {
cmd.arg("--linker").arg(linker);
}
let hostflags = flags.clone();
cmd.arg("--host-rustcflags").arg(hostflags.join(" "));
let mut targetflags = flags.clone();
targetflags.push(format!(
"-Lnative={}",
builder.test_helpers_out(target).display()
));
cmd.arg("--target-rustcflags").arg(targetflags.join(" "));
cmd.arg("--docck-python").arg(builder.python());
if builder.config.build.ends_with("apple-darwin") {
// Force /usr/bin/python on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python");
} else {
cmd.arg("--lldb-python").arg(builder.python());
}
if let Some(ref gdb) = builder.config.gdb {
cmd.arg("--gdb").arg(gdb);
}
if let Some(ref vers) = builder.lldb_version {
cmd.arg("--lldb-version").arg(vers);
}
if let Some(ref dir) = builder.lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
// Get paths from cmd args
let paths = match &builder.config.cmd {
Subcommand::Test { ref paths, .. } => &paths[..],
_ => &[],
};
// Get test-args by striping suite path
let mut test_args: Vec<&str> = paths
.iter()
.map(|p| {
match p.strip_prefix(".") {
Ok(path) => path,
Err(_) => p,
}
})
.filter(|p| p.starts_with(suite_path) && p.is_file())
.map(|p| p.strip_prefix(suite_path).unwrap().to_str().unwrap())
.collect();
test_args.append(&mut builder.config.cmd.test_args());
cmd.args(&test_args);
if builder.is_verbose() {
cmd.arg("--verbose");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
if builder.config.llvm_enabled {
let llvm_config = builder.ensure(native::Llvm {
target: builder.config.build,
emscripten: false,
});
if !builder.config.dry_run {
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
cmd.arg("--llvm-version").arg(llvm_version);
}
if !builder.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if !builder.config.dry_run && suite == "run-make-fulldeps" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags"));
cmd.arg("--cc")
.arg(builder.cc(target))
.arg("--cxx")
.arg(builder.cxx(target).unwrap())
.arg("--cflags")
.arg(builder.cflags(target).join(" "))
.arg("--llvm-components")
.arg(llvm_components.trim())
.arg("--llvm-cxxflags")
.arg(llvm_cxxflags.trim());
if let Some(ar) = builder.ar(target) {
cmd.arg("--ar").arg(ar);
}
}
}
if suite == "run-make-fulldeps" && !builder.config.llvm_enabled {
builder.info(&format!(
"Ignoring run-make test suite as they generally don't work without LLVM"
));
return;
}
if suite != "run-make-fulldeps" {
cmd.arg("--cc")
.arg("")
.arg("--cxx")
.arg("")
.arg("--cflags")
.arg("")
.arg("--llvm-components")
.arg("")
.arg("--llvm-cxxflags")
.arg("");
}
if builder.remote_tested(target) {
cmd.arg("--remote-test-client")
.arg(builder.tool_exe(Tool::RemoteTestClient));
}
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
//
// Note that if we encounter `PATH` we make sure to append to our own `PATH`
// rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in builder.cc[&target].env() {
if k != "PATH" {
cmd.env(k, v);
}
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
builder.add_rust_test_threads(&mut cmd);
if builder.config.sanitizers {
cmd.env("SANITIZER_SUPPORT", "1");
}
if builder.config.profiler {
cmd.env("PROFILER_SUPPORT", "1");
}
cmd.env("RUST_TEST_TMPDIR", builder.out.join("tmp"));
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
// Assume that cc for this target comes from the android sysroot
cmd.arg("--android-cross-path")
.arg(builder.cc(target).parent().unwrap().parent().unwrap());
} else {
cmd.arg("--android-cross-path").arg("");
}
builder.ci_env.force_coloring_in_ci(&mut cmd);
let _folder = builder.fold_output(|| format!("test_{}", suite));
builder.info(&format!(
"Check compiletest suite={} mode={} ({} -> {})",
suite, mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
if let Some(compare_mode) = compare_mode {
cmd.arg("--compare-mode").arg(compare_mode);
let _folder = builder.fold_output(|| format!("test_{}_{}", suite, compare_mode));
builder.info(&format!(
"Check compiletest suite={} mode={} compare_mode={} ({} -> {})",
suite, mode, compare_mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct DocTest {
compiler: Compiler,
path: &'static str,
name: &'static str,
is_ext_doc: bool,
}
impl Step for DocTest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Run `rustdoc --test` for all documentation in `src/doc`.
///
/// This will run all tests in our markdown documentation (e.g. the book)
/// located in `src/doc`. The `rustdoc` that's run is the one that sits next to
/// `compiler`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Test {
compiler,
target: compiler.host,
});
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![builder.src.join(self.path)];
let _time = util::timeit(&builder);
let _folder = builder.fold_output(|| format!("test_{}", self.name));
let mut files = Vec::new();
while let Some(p) = stack.pop() {
if p.is_dir() {
stack.extend(t!(p.read_dir()).map(|p| t!(p).path()));
continue;
}
if p.extension().and_then(|s| s.to_str()) != Some("md") {
continue;
}
// The nostarch directory in the book is for no starch, and so isn't
// guaranteed to builder. We don't care if it doesn't build, so skip it.
if p.to_str().map_or(false, |p| p.contains("nostarch")) {
continue;
}
files.push(p);
}
files.sort();
let mut toolstate = ToolState::TestPass;
for file in files {
if !markdown_test(builder, compiler, &file) {
toolstate = ToolState::TestFail;
}
}
if self.is_ext_doc {
builder.save_toolstate(self.name, toolstate);
}
}
}
macro_rules! test_book {
($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => {
$(
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
compiler: Compiler,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path($path)
}
fn make_run(run: RunConfig) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
fn run(self, builder: &Builder) {
builder.ensure(DocTest {
compiler: self.compiler,
path: $path,
name: $book_name,
is_ext_doc: !$default,
});
}
}
)+
}
}
test_book!(
Nomicon, "src/doc/nomicon", "nomicon", default=false;
Reference, "src/doc/reference", "reference", default=false;
RustdocBook, "src/doc/rustdoc", "rustdoc", default=true;
RustcBook, "src/doc/rustc", "rustc", default=true;
RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false;
TheBook, "src/doc/book", "book", default=false;
UnstableBook, "src/doc/unstable-book", "unstable-book", default=true;
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ErrorIndex {
compiler: Compiler,
}
impl Step for ErrorIndex {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig) {
run.builder.ensure(ErrorIndex {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
/// Run the error index generator tool to execute the tests located in the error
/// index.
///
/// The `error_index_generator` tool lives in `src/tools` and is used to
/// generate a markdown file from the error indexes of the code base which is
/// then passed to `rustdoc --test`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Std {
compiler,
target: compiler.host,
});
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
let mut tool = builder.tool_cmd(Tool::ErrorIndex);
tool.arg("markdown")
.arg(&output)
.env("CFG_BUILD", &builder.config.build)
.env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir());
let _folder = builder.fold_output(|| "test_error_index");
builder.info(&format!("Testing error-index stage{}", compiler.stage));
let _time = util::timeit(&builder);
builder.run(&mut tool);
markdown_test(builder, compiler, &output);
}
}
fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool {
match File::open(markdown) {
Ok(mut file) => {
let mut contents = String::new();
t!(file.read_to_string(&mut contents));
if !contents.contains("```") {
return true;
}
}
Err(_) => {}
}
builder.info(&format!("doc tests for: {}", markdown.display()));
let mut cmd = builder.rustdoc_cmd(compiler.host);
builder.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = builder.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if builder.config.verbose_tests {
try_run(builder, &mut cmd)
} else {
try_run_quiet(builder, &mut cmd)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateLibrustc {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: Interned<String>,
}
impl Step for CrateLibrustc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.krate("rustc-main")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
target: run.target,
test_kind,
krate: krate.name,
});
}
}
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Rustc,
test_kind: self.test_kind,
krate: self.krate,
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateNotDefault {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: &'static str,
}
impl Step for CrateNotDefault {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/liballoc_jemalloc")
.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
target: run.target,
test_kind,
krate: match run.path {
_ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc",
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
_ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan",
_ => panic!("unexpected path {:?}", run.path),
},
});
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Std,
test_kind: self.test_kind,
krate: INTERNER.intern_str(self.krate),
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Crate {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
pub test_kind: TestKind,
pub krate: Interned<String>,
}
impl Step for Crate {
type Output = ();
const DEFAULT: bool = true;
fn should_run(mut run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run = run.krate("test");
for krate in run.builder.in_tree_crates("std") {
if krate.is_local(&run.builder)
&& !krate.name.contains("jemalloc")
&& !(krate.name.starts_with("rustc_") && krate.name.ends_with("san"))
&& krate.name != "dlmalloc"
{
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}
run
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
target: run.target,
mode,
test_kind,
krate: krate.name,
});
};
for krate in builder.in_tree_crates("std") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Std, krate);
}
}
for krate in builder.in_tree_crates("test") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Test, krate);
}
}
}
/// Run all unit tests plus documentation tests for a given crate defined
/// by a `Cargo.toml` (single manifest)
///
/// This is what runs tests for crates like the standard library, compiler, etc.
/// It essentially is the driver for running `cargo test`.
///
/// Currently this runs all tests for a DAG by passing a bunch of `-p foo`
/// arguments, and those arguments are discovered from `cargo metadata`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let test_kind = self.test_kind;
let krate = self.krate;
builder.ensure(compile::Test { compiler, target });
builder.ensure(RemoteCopyLibs { compiler, target });
// If we're not doing a full bootstrap but we're testing a stage2 version of
// libstd, then what we're actually testing is the libstd produced in
// stage1. Reflect that here by updating the compiler that we're working
// with automatically.
let compiler = if builder.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {
compiler.clone()
};
let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand());
match mode {
Mode::Std => {
compile::std_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Test => {
compile::test_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Rustc => {
builder.ensure(compile::Rustc { compiler, target });
compile::rustc_cargo(builder, &mut cargo);
}
_ => panic!("can only test libraries"),
};
// Build up the base `cargo test` command.
//
// Pass in some standard flags then iterate over the graph we've discovered
// in `cargo metadata` with the maps above and figure out what `-p`
// arguments need to get passed.
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
match builder.doc_tests {
DocTests::Only => {
cargo.arg("--doc");
}
DocTests::No => {
cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]);
}
DocTests::Yes => {}
}
cargo.arg("-p").arg(krate);
// The tests are going to run with the *target* libraries, so we need to
// ensure that those libraries show up in the LD_LIBRARY_PATH equivalent.
//
// Note that to run the compiler we need to run with the *host* libraries,
// but our wrapper scripts arrange for that to be the case anyway.
let mut dylib_path = dylib_path();
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
if target.contains("emscripten") {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured"),
);
} else if target.starts_with("wasm32") {
// Warn about running tests without the `wasm_syscall` feature enabled.
// The javascript shim implements the syscall interface so that test
// output can be correctly reported.
if !builder.config.wasm_syscall {
builder.info(&format!(
"Libstd was built without `wasm_syscall` feature enabled: \
test output may not be visible."
));
}
// On the wasm32-unknown-unknown target we're using LTO which is
// incompatible with `-C prefer-dynamic`, so disable that here
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
let node = builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured");
let runner = format!(
"{} {}/src/etc/wasm32-shim.js",
node.display(),
builder.src.display()
);
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner);
} else if builder.remote_tested(target) {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run", builder.tool_exe(Tool::RemoteTestClient).display()),
);
}
let _folder = builder.fold_output(|| {
format!(
"{}_stage{}-{}",
test_kind.subcommand(),
compiler.stage,
krate
)
});
builder.info(&format!(
"{} {} stage{} ({} -> {})",
test_kind, krate, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateRustdoc {
host: Interned<String>,
test_kind: TestKind,
}
impl Step for CrateRustdoc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.paths(&["src/librustdoc", "src/tools/rustdoc"])
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc {
host: run.host,
test_kind,
});
}
fn run(self, builder: &Builder) {
let test_kind = self.test_kind;
let compiler = builder.compiler(builder.top_stage, self.host);
let target = compiler.host;
builder.ensure(compile::Rustc { compiler, target });
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
target,
test_kind.subcommand(),
"src/tools/rustdoc",
SourceType::InTree);
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
cargo.arg("-p").arg("rustdoc:0.0.0");
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
let _folder = builder
.fold_output(|| format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage));
builder.info(&format!(
"{} rustdoc stage{} ({} -> {})",
test_kind, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
fn envify(s: &str) -> String {
s.chars()
.map(|c| match c {
'-' => '_',
c => c,
})
.flat_map(|c| c.to_uppercase())
.collect()
}
/// Some test suites are run inside emulators or on remote devices, and most
/// of our test binaries are linked dynamically which means we need to ship
/// the standard library and such to the emulator ahead of time. This step
/// represents this and is a dependency of all test suites.
///
/// Most of the time this is a noop. For some steps such as shipping data to
/// QEMU we have to build our own tools so we've got conditional dependencies
/// on those programs as well. Note that the remote test client is built for
/// the build target (us) and the server is built for the target.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RemoteCopyLibs {
compiler: Compiler,
target: Interned<String>,
}
impl Step for RemoteCopyLibs {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
if !builder.remote_tested(target) {
return;
}
builder.ensure(compile::Test { compiler, target });
builder.info(&format!("REMOTE copy libs to emulator ({})", target));
t!(fs::create_dir_all(builder.out.join("tmp")));
let server = builder.ensure(tool::RemoteTestServer {
compiler: compiler.with_stage(0),
target,
});
// Spawn the emulator and wait for it to come online
let tool = builder.tool_exe(Tool::RemoteTestClient);
let mut cmd = Command::new(&tool);
cmd.arg("spawn-emulator")
.arg(target)
.arg(&server)
.arg(builder.out.join("tmp"));
if let Some(rootfs) = builder.qemu_rootfs(target) {
cmd.arg(rootfs);
}
builder.run(&mut cmd);
// Push all our dylibs to the emulator
for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) {
let f = t!(f);
let name = f.file_name().into_string().unwrap();
if util::is_dylib(&name) {
builder.run(Command::new(&tool).arg("push").arg(f.path()));
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("distcheck")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Distcheck);
}
/// Run "distcheck", a 'make check' from a tarball
fn run(self, builder: &Builder) {
builder.info(&format!("Distcheck"));
let dir = builder.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
// Guarantee that these are built before we begin running.
builder.ensure(dist::PlainSourceTarball);
builder.ensure(dist::Src);
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::PlainSourceTarball))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
builder.run(
Command::new("./configure")
.args(&builder.config.configure_args)
.arg("--enable-vendor")
.current_dir(&dir),
);
builder.run(
Command::new(build_helper::make(&builder.config.build))
.arg("check")
.current_dir(&dir),
);
// Now make sure that rust-src has all of libstd's dependencies
builder.info(&format!("Distcheck rust-src"));
let dir = builder.out.join("tmp").join("distcheck-src");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::Src))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml");
builder.run(
Command::new(&builder.initial_cargo)
.arg("generate-lockfile")
.arg("--manifest-path")
.arg(&toml)
.current_dir(&dir),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Bootstrap;
impl Step for Bootstrap {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Test the build system itself
fn run(self, builder: &Builder) {
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("test")
.current_dir(builder.src.join("src/bootstrap"))
.env("RUSTFLAGS", "-Cdebuginfo=2")
.env("CARGO_TARGET_DIR", builder.out.join("bootstrap"))
.env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &builder.initial_rustc);
if let Some(flags) = option_env!("RUSTFLAGS") {
// Use the same rustc flags for testing as for "normal" compilation,
// so that Cargo doesn’t recompile the entire dependency graph every time:
// https://github.com/rust-lang/rust/issues/49215
cmd.env("RUSTFLAGS", flags);
}
if !builder.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&builder.config.cmd.test_args());
// rustbuild tests are racy on directory creation so just run them one at a time.
// Since there's not many this shouldn't be a problem.
cmd.arg("--test-threads=1");
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/bootstrap")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Bootstrap);
}
}
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs::{self, File};
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output};
use builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use cache::{Interned, INTERNER};
use compile;
use dist;
use flags::Subcommand;
use native;
use tool::{self, Tool, SourceType};
use toolstate::ToolState;
use util::{self, dylib_path, dylib_path_var};
use Crate as CargoCrate;
use {DocTests, Mode};
const ADB_TEST_DIR: &str = "/data/tmp/work";
/// The two modes of the test runner; tests or benchmarks.
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)]
pub enum TestKind {
/// Run `cargo test`
Test,
/// Run `cargo bench`
Bench,
}
impl From<Kind> for TestKind {
fn from(kind: Kind) -> Self {
match kind {
Kind::Test => TestKind::Test,
Kind::Bench => TestKind::Bench,
_ => panic!("unexpected kind in crate: {:?}", kind),
}
}
}
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
match self {
TestKind::Test => "test",
TestKind::Bench => "bench",
}
}
}
impl fmt::Display for TestKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
TestKind::Test => "Testing",
TestKind::Bench => "Benchmarking",
})
}
}
fn try_run(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run(cmd);
}
true
}
fn try_run_quiet(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run_quiet(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run_quiet(cmd);
}
true
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Linkcheck {
host: Interned<String>,
}
impl Step for Linkcheck {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will verify the validity of all our links in the
/// documentation to ensure we don't have a bunch of dead ones.
fn run(self, builder: &Builder) {
let host = self.host;
builder.info(&format!("Linkcheck ({})", host));
builder.default_doc(None);
let _time = util::timeit(&builder);
try_run(
builder,
builder
.tool_cmd(Tool::Linkchecker)
.arg(builder.out.join(host).join("doc")),
);
}
fn should_run(run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run.path("src/tools/linkchecker")
.default_condition(builder.config.docs)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Linkcheck { host: run.target });
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargotest {
stage: u32,
host: Interned<String>,
}
impl Step for Cargotest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargotest")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargotest {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will check out a few Rust projects and run `cargo
/// test` to ensure that we don't regress the test suites there.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(compile::Rustc {
compiler,
target: compiler.host,
});
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let out_dir = builder.out.join("ct");
t!(fs::create_dir_all(&out_dir));
let _time = util::timeit(&builder);
let mut cmd = builder.tool_cmd(Tool::CargoTest);
try_run(
builder,
cmd.arg(&builder.initial_cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler.host)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargo {
stage: u32,
host: Interned<String>,
}
impl Step for Cargo {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargo")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargo {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for `cargo` packaged with Rust.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(tool::Cargo {
compiler,
target: self.host,
});
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
self.host,
"test",
"src/tools/cargo",
SourceType::Submodule);
if !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
// Don't run cross-compile tests, we may not have cross-compiled libstd libs
// available.
cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
try_run(
builder,
cargo.env("PATH", &path_for_cargo(builder, compiler)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rls {
stage: u32,
host: Interned<String>,
}
impl Step for Rls {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rls")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rls {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for the rls.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rls {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rls: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rls",
SourceType::Submodule);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("rls", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustfmt {
stage: u32,
host: Interned<String>,
}
impl Step for Rustfmt {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustfmt")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rustfmt {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for rustfmt.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rustfmt {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rustfmt: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rustfmt",
SourceType::Submodule);
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
cargo.env("RUSTFMT_TEST_DIR", dir);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("rustfmt", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
let test_miri = run.builder.config.test_miri;
run.path("src/tools/miri").default_condition(test_miri)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Miri {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let miri = builder.ensure(tool::Miri {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(miri) = miri {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/miri",
SourceType::Submodule);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI_PATH", miri);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("miri", ToolState::TestPass);
}
} else {
eprintln!("failed to test miri: could not build");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
host: Interned<String>,
}
impl Step for Clippy {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = false;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/clippy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Clippy {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for clippy.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let clippy = builder.ensure(tool::Clippy {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(clippy) = clippy {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/clippy",
SourceType::Submodule);
// clippy tests need to know about the stage sysroot
cargo.env("SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
let host_libs = builder
.stage_out(compiler, Mode::ToolRustc)
.join(builder.cargo_dir());
cargo.env("HOST_LIBS", host_libs);
// clippy tests need to find the driver
cargo.env("CLIPPY_DRIVER_PATH", clippy);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("clippy-driver", ToolState::TestPass);
}
} else {
eprintln!("failed to test clippy: could not build");
}
}
}
fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString {
// Configure PATH to find the right rustc. NB. we have to use PATH
// and not RUSTC because the Cargo test suite has tests that will
// fail if rustc is not spelled `rustc`.
let path = builder.sysroot(compiler).join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("")
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocTheme {
pub compiler: Compiler,
}
impl Step for RustdocTheme {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustdoc-themes")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocTheme { compiler: compiler });
}
fn run(self, builder: &Builder) {
let rustdoc = builder.out.join("bootstrap/debug/rustdoc");
let mut cmd = builder.tool_cmd(Tool::RustdocTheme);
cmd.arg(rustdoc.to_str().unwrap())
.arg(
builder
.src
.join("src/librustdoc/html/static/themes")
.to_str()
.unwrap(),
)
.env("RUSTC_STAGE", self.compiler.stage.to_string())
.env("RUSTC_SYSROOT", builder.sysroot(self.compiler))
.env(
"RUSTDOC_LIBDIR",
builder.sysroot_libdir(self.compiler, self.compiler.host),
)
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host))
.env("RUSTDOC_CRATE_VERSION", builder.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
if let Some(linker) = builder.linker(self.compiler.host) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
try_run(builder, &mut cmd);
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJS {
pub host: Interned<String>,
pub target: Interned<String>,
}
impl Step for RustdocJS {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-js")
}
fn make_run(run: RunConfig) {
run.builder.ensure(RustdocJS {
host: run.host,
target: run.target,
});
}
fn run(self, builder: &Builder) {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]);
builder.ensure(::doc::Std {
target: self.target,
stage: builder.top_stage,
});
builder.run(&mut command);
} else {
builder.info(&format!(
"No nodejs found, skipping \"src/test/rustdoc-js\" tests"
));
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocUi {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocUi {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-ui")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocUi {
host: run.host,
target: run.target,
compiler,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "ui",
suite: "rustdoc-ui",
path: None,
compare_mode: None,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Tidy;
impl Step for Tidy {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Runs the `tidy` tool.
///
/// This tool in `src/tools` checks up on various bits and pieces of style and
/// otherwise just implements a few lint-like checks that are specific to the
/// compiler itself.
fn run(self, builder: &Builder) {
let mut cmd = builder.tool_cmd(Tool::Tidy);
cmd.arg(builder.src.join("src"));
cmd.arg(&builder.initial_cargo);
if !builder.config.vendor {
cmd.arg("--no-vendor");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
let _folder = builder.fold_output(|| "tidy");
builder.info(&format!("tidy check"));
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/tidy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Tidy);
}
}
fn testdir(builder: &Builder, host: Interned<String>) -> PathBuf {
builder.out.join(host).join("test")
}
macro_rules! default_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false });
}
}
macro_rules! default_test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr,
compare_mode: $compare_mode:expr }) => {
test_with_compare_mode!($name { path: $path, mode: $mode, suite: $suite, default: true,
host: false, compare_mode: $compare_mode });
}
}
macro_rules! host_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true });
}
}
macro_rules! test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: None });
}
}
macro_rules! test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr, compare_mode: $compare_mode:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: Some($compare_mode) });
}
}
macro_rules! test_definitions {
($name:ident {
path: $path:expr,
mode: $mode:expr,
suite: $suite:expr,
default: $default:expr,
host: $host:expr,
compare_mode: $compare_mode:expr
}) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub compiler: Compiler,
pub target: Interned<String>,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = $host;
fn should_run(run: ShouldRun) -> ShouldRun {
run.suite_path($path)
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure($name {
compiler,
target: run.target,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: $mode,
suite: $suite,
path: Some($path),
compare_mode: $compare_mode,
})
}
}
}
}
default_test_with_compare_mode!(Ui {
path: "src/test/ui",
mode: "ui",
suite: "ui",
compare_mode: "nll"
});
default_test!(RunPass {
path: "src/test/run-pass",
mode: "run-pass",
suite: "run-pass"
});
default_test!(CompileFail {
path: "src/test/compile-fail",
mode: "compile-fail",
suite: "compile-fail"
});
default_test!(ParseFail {
path: "src/test/parse-fail",
mode: "parse-fail",
suite: "parse-fail"
});
default_test!(RunFail {
path: "src/test/run-fail",
mode: "run-fail",
suite: "run-fail"
});
default_test!(RunPassValgrind {
path: "src/test/run-pass-valgrind",
mode: "run-pass-valgrind",
suite: "run-pass-valgrind"
});
default_test!(MirOpt {
path: "src/test/mir-opt",
mode: "mir-opt",
suite: "mir-opt"
});
default_test!(Codegen {
path: "src/test/codegen",
mode: "codegen",
suite: "codegen"
});
default_test!(CodegenUnits {
path: "src/test/codegen-units",
mode: "codegen-units",
suite: "codegen-units"
});
default_test!(Incremental {
path: "src/test/incremental",
mode: "incremental",
suite: "incremental"
});
default_test!(Debuginfo {
path: "src/test/debuginfo",
// What this runs varies depending on the native platform being apple
mode: "debuginfo-XXX",
suite: "debuginfo"
});
host_test!(UiFullDeps {
path: "src/test/ui-fulldeps",
mode: "ui",
suite: "ui-fulldeps"
});
host_test!(RunPassFullDeps {
path: "src/test/run-pass-fulldeps",
mode: "run-pass",
suite: "run-pass-fulldeps"
});
host_test!(RunFailFullDeps {
path: "src/test/run-fail-fulldeps",
mode: "run-fail",
suite: "run-fail-fulldeps"
});
host_test!(CompileFailFullDeps {
path: "src/test/compile-fail-fulldeps",
mode: "compile-fail",
suite: "compile-fail-fulldeps"
});
host_test!(IncrementalFullDeps {
path: "src/test/incremental-fulldeps",
mode: "incremental",
suite: "incremental-fulldeps"
});
host_test!(Rustdoc {
path: "src/test/rustdoc",
mode: "rustdoc",
suite: "rustdoc"
});
test!(Pretty {
path: "src/test/pretty",
mode: "pretty",
suite: "pretty",
default: false,
host: true
});
test!(RunPassPretty {
path: "src/test/run-pass/pretty",
mode: "pretty",
suite: "run-pass",
default: false,
host: true
});
test!(RunFailPretty {
path: "src/test/run-fail/pretty",
mode: "pretty",
suite: "run-fail",
default: false,
host: true
});
test!(RunPassValgrindPretty {
path: "src/test/run-pass-valgrind/pretty",
mode: "pretty",
suite: "run-pass-valgrind",
default: false,
host: true
});
test!(RunPassFullDepsPretty {
path: "src/test/run-pass-fulldeps/pretty",
mode: "pretty",
suite: "run-pass-fulldeps",
default: false,
host: true
});
test!(RunFailFullDepsPretty {
path: "src/test/run-fail-fulldeps/pretty",
mode: "pretty",
suite: "run-fail-fulldeps",
default: false,
host: true
});
default_test!(RunMake {
path: "src/test/run-make",
mode: "run-make",
suite: "run-make"
});
host_test!(RunMakeFullDeps {
path: "src/test/run-make-fulldeps",
mode: "run-make",
suite: "run-make-fulldeps"
});
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct Compiletest {
compiler: Compiler,
target: Interned<String>,
mode: &'static str,
suite: &'static str,
path: Option<&'static str>,
compare_mode: Option<&'static str>,
}
impl Step for Compiletest {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Executes the `compiletest` tool to run a suite of tests.
///
/// Compiles all tests with `compiler` for `target` with the specified
/// compiletest `mode` and `suite` arguments. For example `mode` can be
/// "run-pass" or `suite` can be something like `debuginfo`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let suite = self.suite;
// Path for test suite
let suite_path = self.path.unwrap_or("");
// Skip codegen tests if they aren't enabled in configuration.
if !builder.config.codegen_tests && suite == "codegen" {
return;
}
if suite == "debuginfo" {
// Skip debuginfo tests on MSVC
if builder.config.build.contains("msvc") {
return;
}
if mode == "debuginfo-XXX" {
return if builder.config.build.contains("apple") {
builder.ensure(Compiletest {
mode: "debuginfo-lldb",
..self
});
} else {
builder.ensure(Compiletest {
mode: "debuginfo-gdb",
..self
});
};
}
builder.ensure(dist::DebuggerScripts {
sysroot: builder.sysroot(compiler),
host: target,
});
}
if suite.ends_with("fulldeps") ||
// FIXME: Does pretty need librustc compiled? Note that there are
// fulldeps test suites with mode = pretty as well.
mode == "pretty"
{
builder.ensure(compile::Rustc { compiler, target });
}
if builder.no_std(target) != Some(true) {
builder.ensure(compile::Test { compiler, target });
}
builder.ensure(native::TestHelpers { target });
builder.ensure(RemoteCopyLibs { compiler, target });
let mut cmd = builder.tool_cmd(Tool::Compiletest);
// compiletest currently has... a lot of arguments, so let's just pass all
// of them!
cmd.arg("--compile-lib-path")
.arg(builder.rustc_libdir(compiler));
cmd.arg("--run-lib-path")
.arg(builder.sysroot_libdir(compiler, target));
cmd.arg("--rustc-path").arg(builder.rustc(compiler));
let is_rustdoc_ui = suite.ends_with("rustdoc-ui");
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc"
|| (mode == "run-make" && suite.ends_with("fulldeps"))
|| (mode == "ui" && is_rustdoc_ui)
{
cmd.arg("--rustdoc-path")
.arg(builder.rustdoc(compiler.host));
}
cmd.arg("--src-base")
.arg(builder.src.join("src/test").join(suite));
cmd.arg("--build-base")
.arg(testdir(builder, compiler.host).join(suite));
cmd.arg("--stage-id")
.arg(format!("stage{}-{}", compiler.stage, target));
cmd.arg("--mode").arg(mode);
cmd.arg("--target").arg(target);
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck")
.arg(builder.llvm_filecheck(builder.config.build));
if builder.config.cmd.bless() {
cmd.arg("--bless");
}
let compare_mode = builder.config.cmd.compare_mode().or(self.compare_mode);
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
let mut flags = if is_rustdoc_ui {
Vec::new()
} else {
vec!["-Crpath".to_string()]
};
if !is_rustdoc_ui {
if builder.config.rust_optimize_tests {
flags.push("-O".to_string());
}
if builder.config.rust_debuginfo_tests {
flags.push("-g".to_string());
}
}
flags.push("-Zunstable-options".to_string());
flags.push(builder.config.cmd.rustc_args().join(" "));
if let Some(linker) = builder.linker(target) {
cmd.arg("--linker").arg(linker);
}
let hostflags = flags.clone();
cmd.arg("--host-rustcflags").arg(hostflags.join(" "));
let mut targetflags = flags.clone();
targetflags.push(format!(
"-Lnative={}",
builder.test_helpers_out(target).display()
));
cmd.arg("--target-rustcflags").arg(targetflags.join(" "));
cmd.arg("--docck-python").arg(builder.python());
if builder.config.build.ends_with("apple-darwin") {
// Force /usr/bin/python on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python");
} else {
cmd.arg("--lldb-python").arg(builder.python());
}
if let Some(ref gdb) = builder.config.gdb {
cmd.arg("--gdb").arg(gdb);
}
if let Some(ref vers) = builder.lldb_version {
cmd.arg("--lldb-version").arg(vers);
}
if let Some(ref dir) = builder.lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
// Get paths from cmd args
let paths = match &builder.config.cmd {
Subcommand::Test { ref paths, .. } => &paths[..],
_ => &[],
};
// Get test-args by striping suite path
let mut test_args: Vec<&str> = paths
.iter()
.map(|p| {
match p.strip_prefix(".") {
Ok(path) => path,
Err(_) => p,
}
})
.filter(|p| p.starts_with(suite_path) && p.is_file())
.map(|p| p.strip_prefix(suite_path).unwrap().to_str().unwrap())
.collect();
test_args.append(&mut builder.config.cmd.test_args());
cmd.args(&test_args);
if builder.is_verbose() {
cmd.arg("--verbose");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
if builder.config.llvm_enabled {
let llvm_config = builder.ensure(native::Llvm {
target: builder.config.build,
emscripten: false,
});
if !builder.config.dry_run {
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
cmd.arg("--llvm-version").arg(llvm_version);
}
if !builder.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if !builder.config.dry_run && suite == "run-make-fulldeps" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags"));
cmd.arg("--cc")
.arg(builder.cc(target))
.arg("--cxx")
.arg(builder.cxx(target).unwrap())
.arg("--cflags")
.arg(builder.cflags(target).join(" "))
.arg("--llvm-components")
.arg(llvm_components.trim())
.arg("--llvm-cxxflags")
.arg(llvm_cxxflags.trim());
if let Some(ar) = builder.ar(target) {
cmd.arg("--ar").arg(ar);
}
}
}
if suite == "run-make-fulldeps" && !builder.config.llvm_enabled {
builder.info(&format!(
"Ignoring run-make test suite as they generally don't work without LLVM"
));
return;
}
if suite != "run-make-fulldeps" {
cmd.arg("--cc")
.arg("")
.arg("--cxx")
.arg("")
.arg("--cflags")
.arg("")
.arg("--llvm-components")
.arg("")
.arg("--llvm-cxxflags")
.arg("");
}
if builder.remote_tested(target) {
cmd.arg("--remote-test-client")
.arg(builder.tool_exe(Tool::RemoteTestClient));
}
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
//
// Note that if we encounter `PATH` we make sure to append to our own `PATH`
// rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in builder.cc[&target].env() {
if k != "PATH" {
cmd.env(k, v);
}
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
builder.add_rust_test_threads(&mut cmd);
if builder.config.sanitizers {
cmd.env("SANITIZER_SUPPORT", "1");
}
if builder.config.profiler {
cmd.env("PROFILER_SUPPORT", "1");
}
cmd.env("RUST_TEST_TMPDIR", builder.out.join("tmp"));
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
// Assume that cc for this target comes from the android sysroot
cmd.arg("--android-cross-path")
.arg(builder.cc(target).parent().unwrap().parent().unwrap());
} else {
cmd.arg("--android-cross-path").arg("");
}
builder.ci_env.force_coloring_in_ci(&mut cmd);
let _folder = builder.fold_output(|| format!("test_{}", suite));
builder.info(&format!(
"Check compiletest suite={} mode={} ({} -> {})",
suite, mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
if let Some(compare_mode) = compare_mode {
cmd.arg("--compare-mode").arg(compare_mode);
let _folder = builder.fold_output(|| format!("test_{}_{}", suite, compare_mode));
builder.info(&format!(
"Check compiletest suite={} mode={} compare_mode={} ({} -> {})",
suite, mode, compare_mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct DocTest {
compiler: Compiler,
path: &'static str,
name: &'static str,
is_ext_doc: bool,
}
impl Step for DocTest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Run `rustdoc --test` for all documentation in `src/doc`.
///
/// This will run all tests in our markdown documentation (e.g. the book)
/// located in `src/doc`. The `rustdoc` that's run is the one that sits next to
/// `compiler`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Test {
compiler,
target: compiler.host,
});
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![builder.src.join(self.path)];
let _time = util::timeit(&builder);
let _folder = builder.fold_output(|| format!("test_{}", self.name));
let mut files = Vec::new();
while let Some(p) = stack.pop() {
if p.is_dir() {
stack.extend(t!(p.read_dir()).map(|p| t!(p).path()));
continue;
}
if p.extension().and_then(|s| s.to_str()) != Some("md") {
continue;
}
// The nostarch directory in the book is for no starch, and so isn't
// guaranteed to builder. We don't care if it doesn't build, so skip it.
if p.to_str().map_or(false, |p| p.contains("nostarch")) {
continue;
}
files.push(p);
}
files.sort();
let mut toolstate = ToolState::TestPass;
for file in files {
if !markdown_test(builder, compiler, &file) {
toolstate = ToolState::TestFail;
}
}
if self.is_ext_doc {
builder.save_toolstate(self.name, toolstate);
}
}
}
macro_rules! test_book {
($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => {
$(
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
compiler: Compiler,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path($path)
}
fn make_run(run: RunConfig) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
fn run(self, builder: &Builder) {
builder.ensure(DocTest {
compiler: self.compiler,
path: $path,
name: $book_name,
is_ext_doc: !$default,
});
}
}
)+
}
}
test_book!(
Nomicon, "src/doc/nomicon", "nomicon", default=false;
Reference, "src/doc/reference", "reference", default=false;
RustdocBook, "src/doc/rustdoc", "rustdoc", default=true;
RustcBook, "src/doc/rustc", "rustc", default=true;
RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false;
TheBook, "src/doc/book", "book", default=false;
UnstableBook, "src/doc/unstable-book", "unstable-book", default=true;
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ErrorIndex {
compiler: Compiler,
}
impl Step for ErrorIndex {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig) {
run.builder.ensure(ErrorIndex {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
/// Run the error index generator tool to execute the tests located in the error
/// index.
///
/// The `error_index_generator` tool lives in `src/tools` and is used to
/// generate a markdown file from the error indexes of the code base which is
/// then passed to `rustdoc --test`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Std {
compiler,
target: compiler.host,
});
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
let mut tool = builder.tool_cmd(Tool::ErrorIndex);
tool.arg("markdown")
.arg(&output)
.env("CFG_BUILD", &builder.config.build)
.env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir());
let _folder = builder.fold_output(|| "test_error_index");
builder.info(&format!("Testing error-index stage{}", compiler.stage));
let _time = util::timeit(&builder);
builder.run(&mut tool);
markdown_test(builder, compiler, &output);
}
}
fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool {
match File::open(markdown) {
Ok(mut file) => {
let mut contents = String::new();
t!(file.read_to_string(&mut contents));
if !contents.contains("```") {
return true;
}
}
Err(_) => {}
}
builder.info(&format!("doc tests for: {}", markdown.display()));
let mut cmd = builder.rustdoc_cmd(compiler.host);
builder.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = builder.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if builder.config.verbose_tests {
try_run(builder, &mut cmd)
} else {
try_run_quiet(builder, &mut cmd)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateLibrustc {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: Interned<String>,
}
impl Step for CrateLibrustc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.krate("rustc-main")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
target: run.target,
test_kind,
krate: krate.name,
});
}
}
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Rustc,
test_kind: self.test_kind,
krate: self.krate,
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateNotDefault {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: &'static str,
}
impl Step for CrateNotDefault {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/liballoc_jemalloc")
.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
target: run.target,
test_kind,
krate: match run.path {
_ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc",
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
_ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan",
_ => panic!("unexpected path {:?}", run.path),
},
});
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Std,
test_kind: self.test_kind,
krate: INTERNER.intern_str(self.krate),
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Crate {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
pub test_kind: TestKind,
pub krate: Interned<String>,
}
impl Step for Crate {
type Output = ();
const DEFAULT: bool = true;
fn should_run(mut run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run = run.krate("test");
for krate in run.builder.in_tree_crates("std") {
if krate.is_local(&run.builder)
&& !krate.name.contains("jemalloc")
&& !(krate.name.starts_with("rustc_") && krate.name.ends_with("san"))
&& krate.name != "dlmalloc"
{
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}
run
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
target: run.target,
mode,
test_kind,
krate: krate.name,
});
};
for krate in builder.in_tree_crates("std") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Std, krate);
}
}
for krate in builder.in_tree_crates("test") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Test, krate);
}
}
}
/// Run all unit tests plus documentation tests for a given crate defined
/// by a `Cargo.toml` (single manifest)
///
/// This is what runs tests for crates like the standard library, compiler, etc.
/// It essentially is the driver for running `cargo test`.
///
/// Currently this runs all tests for a DAG by passing a bunch of `-p foo`
/// arguments, and those arguments are discovered from `cargo metadata`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let test_kind = self.test_kind;
let krate = self.krate;
builder.ensure(compile::Test { compiler, target });
builder.ensure(RemoteCopyLibs { compiler, target });
// If we're not doing a full bootstrap but we're testing a stage2 version of
// libstd, then what we're actually testing is the libstd produced in
// stage1. Reflect that here by updating the compiler that we're working
// with automatically.
let compiler = if builder.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {
compiler.clone()
};
let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand());
match mode {
Mode::Std => {
compile::std_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Test => {
compile::test_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Rustc => {
builder.ensure(compile::Rustc { compiler, target });
compile::rustc_cargo(builder, &mut cargo);
}
_ => panic!("can only test libraries"),
};
// Build up the base `cargo test` command.
//
// Pass in some standard flags then iterate over the graph we've discovered
// in `cargo metadata` with the maps above and figure out what `-p`
// arguments need to get passed.
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
match builder.doc_tests {
DocTests::Only => {
cargo.arg("--doc");
}
DocTests::No => {
cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]);
}
DocTests::Yes => {}
}
cargo.arg("-p").arg(krate);
// The tests are going to run with the *target* libraries, so we need to
// ensure that those libraries show up in the LD_LIBRARY_PATH equivalent.
//
// Note that to run the compiler we need to run with the *host* libraries,
// but our wrapper scripts arrange for that to be the case anyway.
let mut dylib_path = dylib_path();
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
if target.contains("emscripten") {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured"),
);
} else if target.starts_with("wasm32") {
// Warn about running tests without the `wasm_syscall` feature enabled.
// The javascript shim implements the syscall interface so that test
// output can be correctly reported.
if !builder.config.wasm_syscall {
builder.info(&format!(
"Libstd was built without `wasm_syscall` feature enabled: \
test output may not be visible."
));
}
// On the wasm32-unknown-unknown target we're using LTO which is
// incompatible with `-C prefer-dynamic`, so disable that here
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
let node = builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured");
let runner = format!(
"{} {}/src/etc/wasm32-shim.js",
node.display(),
builder.src.display()
);
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner);
} else if builder.remote_tested(target) {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run", builder.tool_exe(Tool::RemoteTestClient).display()),
);
}
let _folder = builder.fold_output(|| {
format!(
"{}_stage{}-{}",
test_kind.subcommand(),
compiler.stage,
krate
)
});
builder.info(&format!(
"{} {} stage{} ({} -> {})",
test_kind, krate, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateRustdoc {
host: Interned<String>,
test_kind: TestKind,
}
impl Step for CrateRustdoc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.paths(&["src/librustdoc", "src/tools/rustdoc"])
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc {
host: run.host,
test_kind,
});
}
fn run(self, builder: &Builder) {
let test_kind = self.test_kind;
let compiler = builder.compiler(builder.top_stage, self.host);
let target = compiler.host;
builder.ensure(compile::Rustc { compiler, target });
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
target,
test_kind.subcommand(),
"src/tools/rustdoc",
SourceType::InTree);
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
cargo.arg("-p").arg("rustdoc:0.0.0");
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
let _folder = builder
.fold_output(|| format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage));
builder.info(&format!(
"{} rustdoc stage{} ({} -> {})",
test_kind, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
fn envify(s: &str) -> String {
s.chars()
.map(|c| match c {
'-' => '_',
c => c,
})
.flat_map(|c| c.to_uppercase())
.collect()
}
/// Some test suites are run inside emulators or on remote devices, and most
/// of our test binaries are linked dynamically which means we need to ship
/// the standard library and such to the emulator ahead of time. This step
/// represents this and is a dependency of all test suites.
///
/// Most of the time this is a noop. For some steps such as shipping data to
/// QEMU we have to build our own tools so we've got conditional dependencies
/// on those programs as well. Note that the remote test client is built for
/// the build target (us) and the server is built for the target.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RemoteCopyLibs {
compiler: Compiler,
target: Interned<String>,
}
impl Step for RemoteCopyLibs {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
if !builder.remote_tested(target) {
return;
}
builder.ensure(compile::Test { compiler, target });
builder.info(&format!("REMOTE copy libs to emulator ({})", target));
t!(fs::create_dir_all(builder.out.join("tmp")));
let server = builder.ensure(tool::RemoteTestServer {
compiler: compiler.with_stage(0),
target,
});
// Spawn the emulator and wait for it to come online
let tool = builder.tool_exe(Tool::RemoteTestClient);
let mut cmd = Command::new(&tool);
cmd.arg("spawn-emulator")
.arg(target)
.arg(&server)
.arg(builder.out.join("tmp"));
if let Some(rootfs) = builder.qemu_rootfs(target) {
cmd.arg(rootfs);
}
builder.run(&mut cmd);
// Push all our dylibs to the emulator
for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) {
let f = t!(f);
let name = f.file_name().into_string().unwrap();
if util::is_dylib(&name) {
builder.run(Command::new(&tool).arg("push").arg(f.path()));
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("distcheck")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Distcheck);
}
/// Run "distcheck", a 'make check' from a tarball
fn run(self, builder: &Builder) {
builder.info(&format!("Distcheck"));
let dir = builder.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
// Guarantee that these are built before we begin running.
builder.ensure(dist::PlainSourceTarball);
builder.ensure(dist::Src);
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::PlainSourceTarball))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
builder.run(
Command::new("./configure")
.args(&builder.config.configure_args)
.arg("--enable-vendor")
.current_dir(&dir),
);
builder.run(
Command::new(build_helper::make(&builder.config.build))
.arg("check")
.current_dir(&dir),
);
// Now make sure that rust-src has all of libstd's dependencies
builder.info(&format!("Distcheck rust-src"));
let dir = builder.out.join("tmp").join("distcheck-src");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::Src))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml");
builder.run(
Command::new(&builder.initial_cargo)
.arg("generate-lockfile")
.arg("--manifest-path")
.arg(&toml)
.current_dir(&dir),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Bootstrap;
impl Step for Bootstrap {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Test the build system itself
fn run(self, builder: &Builder) {
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("test")
.current_dir(builder.src.join("src/bootstrap"))
.env("RUSTFLAGS", "-Cdebuginfo=2")
.env("CARGO_TARGET_DIR", builder.out.join("bootstrap"))
.env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &builder.initial_rustc);
if let Some(flags) = option_env!("RUSTFLAGS") {
// Use the same rustc flags for testing as for "normal" compilation,
// so that Cargo doesn’t recompile the entire dependency graph every time:
// https://github.com/rust-lang/rust/issues/49215
cmd.env("RUSTFLAGS", flags);
}
if !builder.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&builder.config.cmd.test_args());
// rustbuild tests are racy on directory creation so just run them one at a time.
// Since there's not many this shouldn't be a problem.
cmd.arg("--test-threads=1");
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/bootstrap")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Bootstrap);
}
}
Copy the test_data/ RLS tests into a writable directory.
See rust-lang-nursery/rls#966 for details.
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs::{self, File};
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output};
use builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use cache::{Interned, INTERNER};
use compile;
use dist;
use flags::Subcommand;
use native;
use tool::{self, Tool, SourceType};
use toolstate::ToolState;
use util::{self, dylib_path, dylib_path_var};
use Crate as CargoCrate;
use {DocTests, Mode};
const ADB_TEST_DIR: &str = "/data/tmp/work";
/// The two modes of the test runner; tests or benchmarks.
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)]
pub enum TestKind {
/// Run `cargo test`
Test,
/// Run `cargo bench`
Bench,
}
impl From<Kind> for TestKind {
fn from(kind: Kind) -> Self {
match kind {
Kind::Test => TestKind::Test,
Kind::Bench => TestKind::Bench,
_ => panic!("unexpected kind in crate: {:?}", kind),
}
}
}
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
match self {
TestKind::Test => "test",
TestKind::Bench => "bench",
}
}
}
impl fmt::Display for TestKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
TestKind::Test => "Testing",
TestKind::Bench => "Benchmarking",
})
}
}
fn try_run(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run(cmd);
}
true
}
fn try_run_quiet(builder: &Builder, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run_quiet(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run_quiet(cmd);
}
true
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Linkcheck {
host: Interned<String>,
}
impl Step for Linkcheck {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will verify the validity of all our links in the
/// documentation to ensure we don't have a bunch of dead ones.
fn run(self, builder: &Builder) {
let host = self.host;
builder.info(&format!("Linkcheck ({})", host));
builder.default_doc(None);
let _time = util::timeit(&builder);
try_run(
builder,
builder
.tool_cmd(Tool::Linkchecker)
.arg(builder.out.join(host).join("doc")),
);
}
fn should_run(run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run.path("src/tools/linkchecker")
.default_condition(builder.config.docs)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Linkcheck { host: run.target });
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargotest {
stage: u32,
host: Interned<String>,
}
impl Step for Cargotest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargotest")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargotest {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will check out a few Rust projects and run `cargo
/// test` to ensure that we don't regress the test suites there.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(compile::Rustc {
compiler,
target: compiler.host,
});
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let out_dir = builder.out.join("ct");
t!(fs::create_dir_all(&out_dir));
let _time = util::timeit(&builder);
let mut cmd = builder.tool_cmd(Tool::CargoTest);
try_run(
builder,
cmd.arg(&builder.initial_cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler.host)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargo {
stage: u32,
host: Interned<String>,
}
impl Step for Cargo {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/cargo")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Cargo {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for `cargo` packaged with Rust.
fn run(self, builder: &Builder) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(tool::Cargo {
compiler,
target: self.host,
});
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
self.host,
"test",
"src/tools/cargo",
SourceType::Submodule);
if !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
// Don't run cross-compile tests, we may not have cross-compiled libstd libs
// available.
cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
try_run(
builder,
cargo.env("PATH", &path_for_cargo(builder, compiler)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rls {
stage: u32,
host: Interned<String>,
}
impl Step for Rls {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rls")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rls {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for the rls.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rls {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rls: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rls",
SourceType::Submodule);
// Copy `src/tools/rls/test_data` to a writable drive.
let test_workspace_path = builder.out.join("rls-test-data");
let test_data_path = test_workspace_path.join("test_data");
builder.create_dir(&test_data_path);
builder.cp_r(&builder.src.join("src/tools/rls/test_data"), &test_data_path);
cargo.env("RLS_TEST_WORKSPACE_DIR", test_workspace_path);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("rls", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustfmt {
stage: u32,
host: Interned<String>,
}
impl Step for Rustfmt {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustfmt")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Rustfmt {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for rustfmt.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rustfmt {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rustfmt: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rustfmt",
SourceType::Submodule);
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
cargo.env("RUSTFMT_TEST_DIR", dir);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("rustfmt", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
let test_miri = run.builder.config.test_miri;
run.path("src/tools/miri").default_condition(test_miri)
}
fn make_run(run: RunConfig) {
run.builder.ensure(Miri {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let miri = builder.ensure(tool::Miri {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(miri) = miri {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/miri",
SourceType::Submodule);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI_PATH", miri);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("miri", ToolState::TestPass);
}
} else {
eprintln!("failed to test miri: could not build");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
host: Interned<String>,
}
impl Step for Clippy {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = false;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/clippy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Clippy {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for clippy.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let clippy = builder.ensure(tool::Clippy {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let Some(clippy) = clippy {
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/clippy",
SourceType::Submodule);
// clippy tests need to know about the stage sysroot
cargo.env("SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
let host_libs = builder
.stage_out(compiler, Mode::ToolRustc)
.join(builder.cargo_dir());
cargo.env("HOST_LIBS", host_libs);
// clippy tests need to find the driver
cargo.env("CLIPPY_DRIVER_PATH", clippy);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo) {
builder.save_toolstate("clippy-driver", ToolState::TestPass);
}
} else {
eprintln!("failed to test clippy: could not build");
}
}
}
fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString {
// Configure PATH to find the right rustc. NB. we have to use PATH
// and not RUSTC because the Cargo test suite has tests that will
// fail if rustc is not spelled `rustc`.
let path = builder.sysroot(compiler).join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("")
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocTheme {
pub compiler: Compiler,
}
impl Step for RustdocTheme {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/rustdoc-themes")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocTheme { compiler: compiler });
}
fn run(self, builder: &Builder) {
let rustdoc = builder.out.join("bootstrap/debug/rustdoc");
let mut cmd = builder.tool_cmd(Tool::RustdocTheme);
cmd.arg(rustdoc.to_str().unwrap())
.arg(
builder
.src
.join("src/librustdoc/html/static/themes")
.to_str()
.unwrap(),
)
.env("RUSTC_STAGE", self.compiler.stage.to_string())
.env("RUSTC_SYSROOT", builder.sysroot(self.compiler))
.env(
"RUSTDOC_LIBDIR",
builder.sysroot_libdir(self.compiler, self.compiler.host),
)
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host))
.env("RUSTDOC_CRATE_VERSION", builder.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
if let Some(linker) = builder.linker(self.compiler.host) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
try_run(builder, &mut cmd);
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJS {
pub host: Interned<String>,
pub target: Interned<String>,
}
impl Step for RustdocJS {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-js")
}
fn make_run(run: RunConfig) {
run.builder.ensure(RustdocJS {
host: run.host,
target: run.target,
});
}
fn run(self, builder: &Builder) {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]);
builder.ensure(::doc::Std {
target: self.target,
stage: builder.top_stage,
});
builder.run(&mut command);
} else {
builder.info(&format!(
"No nodejs found, skipping \"src/test/rustdoc-js\" tests"
));
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocUi {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocUi {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/test/rustdoc-ui")
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocUi {
host: run.host,
target: run.target,
compiler,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "ui",
suite: "rustdoc-ui",
path: None,
compare_mode: None,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Tidy;
impl Step for Tidy {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Runs the `tidy` tool.
///
/// This tool in `src/tools` checks up on various bits and pieces of style and
/// otherwise just implements a few lint-like checks that are specific to the
/// compiler itself.
fn run(self, builder: &Builder) {
let mut cmd = builder.tool_cmd(Tool::Tidy);
cmd.arg(builder.src.join("src"));
cmd.arg(&builder.initial_cargo);
if !builder.config.vendor {
cmd.arg("--no-vendor");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
let _folder = builder.fold_output(|| "tidy");
builder.info(&format!("tidy check"));
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/tidy")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Tidy);
}
}
fn testdir(builder: &Builder, host: Interned<String>) -> PathBuf {
builder.out.join(host).join("test")
}
macro_rules! default_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false });
}
}
macro_rules! default_test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr,
compare_mode: $compare_mode:expr }) => {
test_with_compare_mode!($name { path: $path, mode: $mode, suite: $suite, default: true,
host: false, compare_mode: $compare_mode });
}
}
macro_rules! host_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true });
}
}
macro_rules! test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: None });
}
}
macro_rules! test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr, compare_mode: $compare_mode:expr }) => {
test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default,
host: $host, compare_mode: Some($compare_mode) });
}
}
macro_rules! test_definitions {
($name:ident {
path: $path:expr,
mode: $mode:expr,
suite: $suite:expr,
default: $default:expr,
host: $host:expr,
compare_mode: $compare_mode:expr
}) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub compiler: Compiler,
pub target: Interned<String>,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = $host;
fn should_run(run: ShouldRun) -> ShouldRun {
run.suite_path($path)
}
fn make_run(run: RunConfig) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure($name {
compiler,
target: run.target,
});
}
fn run(self, builder: &Builder) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: $mode,
suite: $suite,
path: Some($path),
compare_mode: $compare_mode,
})
}
}
}
}
default_test_with_compare_mode!(Ui {
path: "src/test/ui",
mode: "ui",
suite: "ui",
compare_mode: "nll"
});
default_test!(RunPass {
path: "src/test/run-pass",
mode: "run-pass",
suite: "run-pass"
});
default_test!(CompileFail {
path: "src/test/compile-fail",
mode: "compile-fail",
suite: "compile-fail"
});
default_test!(ParseFail {
path: "src/test/parse-fail",
mode: "parse-fail",
suite: "parse-fail"
});
default_test!(RunFail {
path: "src/test/run-fail",
mode: "run-fail",
suite: "run-fail"
});
default_test!(RunPassValgrind {
path: "src/test/run-pass-valgrind",
mode: "run-pass-valgrind",
suite: "run-pass-valgrind"
});
default_test!(MirOpt {
path: "src/test/mir-opt",
mode: "mir-opt",
suite: "mir-opt"
});
default_test!(Codegen {
path: "src/test/codegen",
mode: "codegen",
suite: "codegen"
});
default_test!(CodegenUnits {
path: "src/test/codegen-units",
mode: "codegen-units",
suite: "codegen-units"
});
default_test!(Incremental {
path: "src/test/incremental",
mode: "incremental",
suite: "incremental"
});
default_test!(Debuginfo {
path: "src/test/debuginfo",
// What this runs varies depending on the native platform being apple
mode: "debuginfo-XXX",
suite: "debuginfo"
});
host_test!(UiFullDeps {
path: "src/test/ui-fulldeps",
mode: "ui",
suite: "ui-fulldeps"
});
host_test!(RunPassFullDeps {
path: "src/test/run-pass-fulldeps",
mode: "run-pass",
suite: "run-pass-fulldeps"
});
host_test!(RunFailFullDeps {
path: "src/test/run-fail-fulldeps",
mode: "run-fail",
suite: "run-fail-fulldeps"
});
host_test!(CompileFailFullDeps {
path: "src/test/compile-fail-fulldeps",
mode: "compile-fail",
suite: "compile-fail-fulldeps"
});
host_test!(IncrementalFullDeps {
path: "src/test/incremental-fulldeps",
mode: "incremental",
suite: "incremental-fulldeps"
});
host_test!(Rustdoc {
path: "src/test/rustdoc",
mode: "rustdoc",
suite: "rustdoc"
});
test!(Pretty {
path: "src/test/pretty",
mode: "pretty",
suite: "pretty",
default: false,
host: true
});
test!(RunPassPretty {
path: "src/test/run-pass/pretty",
mode: "pretty",
suite: "run-pass",
default: false,
host: true
});
test!(RunFailPretty {
path: "src/test/run-fail/pretty",
mode: "pretty",
suite: "run-fail",
default: false,
host: true
});
test!(RunPassValgrindPretty {
path: "src/test/run-pass-valgrind/pretty",
mode: "pretty",
suite: "run-pass-valgrind",
default: false,
host: true
});
test!(RunPassFullDepsPretty {
path: "src/test/run-pass-fulldeps/pretty",
mode: "pretty",
suite: "run-pass-fulldeps",
default: false,
host: true
});
test!(RunFailFullDepsPretty {
path: "src/test/run-fail-fulldeps/pretty",
mode: "pretty",
suite: "run-fail-fulldeps",
default: false,
host: true
});
default_test!(RunMake {
path: "src/test/run-make",
mode: "run-make",
suite: "run-make"
});
host_test!(RunMakeFullDeps {
path: "src/test/run-make-fulldeps",
mode: "run-make",
suite: "run-make-fulldeps"
});
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct Compiletest {
compiler: Compiler,
target: Interned<String>,
mode: &'static str,
suite: &'static str,
path: Option<&'static str>,
compare_mode: Option<&'static str>,
}
impl Step for Compiletest {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Executes the `compiletest` tool to run a suite of tests.
///
/// Compiles all tests with `compiler` for `target` with the specified
/// compiletest `mode` and `suite` arguments. For example `mode` can be
/// "run-pass" or `suite` can be something like `debuginfo`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let suite = self.suite;
// Path for test suite
let suite_path = self.path.unwrap_or("");
// Skip codegen tests if they aren't enabled in configuration.
if !builder.config.codegen_tests && suite == "codegen" {
return;
}
if suite == "debuginfo" {
// Skip debuginfo tests on MSVC
if builder.config.build.contains("msvc") {
return;
}
if mode == "debuginfo-XXX" {
return if builder.config.build.contains("apple") {
builder.ensure(Compiletest {
mode: "debuginfo-lldb",
..self
});
} else {
builder.ensure(Compiletest {
mode: "debuginfo-gdb",
..self
});
};
}
builder.ensure(dist::DebuggerScripts {
sysroot: builder.sysroot(compiler),
host: target,
});
}
if suite.ends_with("fulldeps") ||
// FIXME: Does pretty need librustc compiled? Note that there are
// fulldeps test suites with mode = pretty as well.
mode == "pretty"
{
builder.ensure(compile::Rustc { compiler, target });
}
if builder.no_std(target) != Some(true) {
builder.ensure(compile::Test { compiler, target });
}
builder.ensure(native::TestHelpers { target });
builder.ensure(RemoteCopyLibs { compiler, target });
let mut cmd = builder.tool_cmd(Tool::Compiletest);
// compiletest currently has... a lot of arguments, so let's just pass all
// of them!
cmd.arg("--compile-lib-path")
.arg(builder.rustc_libdir(compiler));
cmd.arg("--run-lib-path")
.arg(builder.sysroot_libdir(compiler, target));
cmd.arg("--rustc-path").arg(builder.rustc(compiler));
let is_rustdoc_ui = suite.ends_with("rustdoc-ui");
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc"
|| (mode == "run-make" && suite.ends_with("fulldeps"))
|| (mode == "ui" && is_rustdoc_ui)
{
cmd.arg("--rustdoc-path")
.arg(builder.rustdoc(compiler.host));
}
cmd.arg("--src-base")
.arg(builder.src.join("src/test").join(suite));
cmd.arg("--build-base")
.arg(testdir(builder, compiler.host).join(suite));
cmd.arg("--stage-id")
.arg(format!("stage{}-{}", compiler.stage, target));
cmd.arg("--mode").arg(mode);
cmd.arg("--target").arg(target);
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck")
.arg(builder.llvm_filecheck(builder.config.build));
if builder.config.cmd.bless() {
cmd.arg("--bless");
}
let compare_mode = builder.config.cmd.compare_mode().or(self.compare_mode);
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
let mut flags = if is_rustdoc_ui {
Vec::new()
} else {
vec!["-Crpath".to_string()]
};
if !is_rustdoc_ui {
if builder.config.rust_optimize_tests {
flags.push("-O".to_string());
}
if builder.config.rust_debuginfo_tests {
flags.push("-g".to_string());
}
}
flags.push("-Zunstable-options".to_string());
flags.push(builder.config.cmd.rustc_args().join(" "));
if let Some(linker) = builder.linker(target) {
cmd.arg("--linker").arg(linker);
}
let hostflags = flags.clone();
cmd.arg("--host-rustcflags").arg(hostflags.join(" "));
let mut targetflags = flags.clone();
targetflags.push(format!(
"-Lnative={}",
builder.test_helpers_out(target).display()
));
cmd.arg("--target-rustcflags").arg(targetflags.join(" "));
cmd.arg("--docck-python").arg(builder.python());
if builder.config.build.ends_with("apple-darwin") {
// Force /usr/bin/python on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python");
} else {
cmd.arg("--lldb-python").arg(builder.python());
}
if let Some(ref gdb) = builder.config.gdb {
cmd.arg("--gdb").arg(gdb);
}
if let Some(ref vers) = builder.lldb_version {
cmd.arg("--lldb-version").arg(vers);
}
if let Some(ref dir) = builder.lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
// Get paths from cmd args
let paths = match &builder.config.cmd {
Subcommand::Test { ref paths, .. } => &paths[..],
_ => &[],
};
// Get test-args by striping suite path
let mut test_args: Vec<&str> = paths
.iter()
.map(|p| {
match p.strip_prefix(".") {
Ok(path) => path,
Err(_) => p,
}
})
.filter(|p| p.starts_with(suite_path) && p.is_file())
.map(|p| p.strip_prefix(suite_path).unwrap().to_str().unwrap())
.collect();
test_args.append(&mut builder.config.cmd.test_args());
cmd.args(&test_args);
if builder.is_verbose() {
cmd.arg("--verbose");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
if builder.config.llvm_enabled {
let llvm_config = builder.ensure(native::Llvm {
target: builder.config.build,
emscripten: false,
});
if !builder.config.dry_run {
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
cmd.arg("--llvm-version").arg(llvm_version);
}
if !builder.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if !builder.config.dry_run && suite == "run-make-fulldeps" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags"));
cmd.arg("--cc")
.arg(builder.cc(target))
.arg("--cxx")
.arg(builder.cxx(target).unwrap())
.arg("--cflags")
.arg(builder.cflags(target).join(" "))
.arg("--llvm-components")
.arg(llvm_components.trim())
.arg("--llvm-cxxflags")
.arg(llvm_cxxflags.trim());
if let Some(ar) = builder.ar(target) {
cmd.arg("--ar").arg(ar);
}
}
}
if suite == "run-make-fulldeps" && !builder.config.llvm_enabled {
builder.info(&format!(
"Ignoring run-make test suite as they generally don't work without LLVM"
));
return;
}
if suite != "run-make-fulldeps" {
cmd.arg("--cc")
.arg("")
.arg("--cxx")
.arg("")
.arg("--cflags")
.arg("")
.arg("--llvm-components")
.arg("")
.arg("--llvm-cxxflags")
.arg("");
}
if builder.remote_tested(target) {
cmd.arg("--remote-test-client")
.arg(builder.tool_exe(Tool::RemoteTestClient));
}
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
//
// Note that if we encounter `PATH` we make sure to append to our own `PATH`
// rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in builder.cc[&target].env() {
if k != "PATH" {
cmd.env(k, v);
}
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
builder.add_rust_test_threads(&mut cmd);
if builder.config.sanitizers {
cmd.env("SANITIZER_SUPPORT", "1");
}
if builder.config.profiler {
cmd.env("PROFILER_SUPPORT", "1");
}
cmd.env("RUST_TEST_TMPDIR", builder.out.join("tmp"));
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
// Assume that cc for this target comes from the android sysroot
cmd.arg("--android-cross-path")
.arg(builder.cc(target).parent().unwrap().parent().unwrap());
} else {
cmd.arg("--android-cross-path").arg("");
}
builder.ci_env.force_coloring_in_ci(&mut cmd);
let _folder = builder.fold_output(|| format!("test_{}", suite));
builder.info(&format!(
"Check compiletest suite={} mode={} ({} -> {})",
suite, mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
if let Some(compare_mode) = compare_mode {
cmd.arg("--compare-mode").arg(compare_mode);
let _folder = builder.fold_output(|| format!("test_{}_{}", suite, compare_mode));
builder.info(&format!(
"Check compiletest suite={} mode={} compare_mode={} ({} -> {})",
suite, mode, compare_mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct DocTest {
compiler: Compiler,
path: &'static str,
name: &'static str,
is_ext_doc: bool,
}
impl Step for DocTest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
/// Run `rustdoc --test` for all documentation in `src/doc`.
///
/// This will run all tests in our markdown documentation (e.g. the book)
/// located in `src/doc`. The `rustdoc` that's run is the one that sits next to
/// `compiler`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Test {
compiler,
target: compiler.host,
});
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![builder.src.join(self.path)];
let _time = util::timeit(&builder);
let _folder = builder.fold_output(|| format!("test_{}", self.name));
let mut files = Vec::new();
while let Some(p) = stack.pop() {
if p.is_dir() {
stack.extend(t!(p.read_dir()).map(|p| t!(p).path()));
continue;
}
if p.extension().and_then(|s| s.to_str()) != Some("md") {
continue;
}
// The nostarch directory in the book is for no starch, and so isn't
// guaranteed to builder. We don't care if it doesn't build, so skip it.
if p.to_str().map_or(false, |p| p.contains("nostarch")) {
continue;
}
files.push(p);
}
files.sort();
let mut toolstate = ToolState::TestPass;
for file in files {
if !markdown_test(builder, compiler, &file) {
toolstate = ToolState::TestFail;
}
}
if self.is_ext_doc {
builder.save_toolstate(self.name, toolstate);
}
}
}
macro_rules! test_book {
($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => {
$(
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
compiler: Compiler,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path($path)
}
fn make_run(run: RunConfig) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
fn run(self, builder: &Builder) {
builder.ensure(DocTest {
compiler: self.compiler,
path: $path,
name: $book_name,
is_ext_doc: !$default,
});
}
}
)+
}
}
test_book!(
Nomicon, "src/doc/nomicon", "nomicon", default=false;
Reference, "src/doc/reference", "reference", default=false;
RustdocBook, "src/doc/rustdoc", "rustdoc", default=true;
RustcBook, "src/doc/rustc", "rustc", default=true;
RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false;
TheBook, "src/doc/book", "book", default=false;
UnstableBook, "src/doc/unstable-book", "unstable-book", default=true;
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ErrorIndex {
compiler: Compiler,
}
impl Step for ErrorIndex {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig) {
run.builder.ensure(ErrorIndex {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
/// Run the error index generator tool to execute the tests located in the error
/// index.
///
/// The `error_index_generator` tool lives in `src/tools` and is used to
/// generate a markdown file from the error indexes of the code base which is
/// then passed to `rustdoc --test`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
builder.ensure(compile::Std {
compiler,
target: compiler.host,
});
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
let mut tool = builder.tool_cmd(Tool::ErrorIndex);
tool.arg("markdown")
.arg(&output)
.env("CFG_BUILD", &builder.config.build)
.env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir());
let _folder = builder.fold_output(|| "test_error_index");
builder.info(&format!("Testing error-index stage{}", compiler.stage));
let _time = util::timeit(&builder);
builder.run(&mut tool);
markdown_test(builder, compiler, &output);
}
}
fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool {
match File::open(markdown) {
Ok(mut file) => {
let mut contents = String::new();
t!(file.read_to_string(&mut contents));
if !contents.contains("```") {
return true;
}
}
Err(_) => {}
}
builder.info(&format!("doc tests for: {}", markdown.display()));
let mut cmd = builder.rustdoc_cmd(compiler.host);
builder.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = builder.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if builder.config.verbose_tests {
try_run(builder, &mut cmd)
} else {
try_run_quiet(builder, &mut cmd)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateLibrustc {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: Interned<String>,
}
impl Step for CrateLibrustc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.krate("rustc-main")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
target: run.target,
test_kind,
krate: krate.name,
});
}
}
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Rustc,
test_kind: self.test_kind,
krate: self.krate,
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateNotDefault {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: &'static str,
}
impl Step for CrateNotDefault {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/liballoc_jemalloc")
.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
target: run.target,
test_kind,
krate: match run.path {
_ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc",
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
_ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan",
_ => panic!("unexpected path {:?}", run.path),
},
});
}
fn run(self, builder: &Builder) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Std,
test_kind: self.test_kind,
krate: INTERNER.intern_str(self.krate),
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Crate {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
pub test_kind: TestKind,
pub krate: Interned<String>,
}
impl Step for Crate {
type Output = ();
const DEFAULT: bool = true;
fn should_run(mut run: ShouldRun) -> ShouldRun {
let builder = run.builder;
run = run.krate("test");
for krate in run.builder.in_tree_crates("std") {
if krate.is_local(&run.builder)
&& !krate.name.contains("jemalloc")
&& !(krate.name.starts_with("rustc_") && krate.name.ends_with("san"))
&& krate.name != "dlmalloc"
{
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}
run
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
target: run.target,
mode,
test_kind,
krate: krate.name,
});
};
for krate in builder.in_tree_crates("std") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Std, krate);
}
}
for krate in builder.in_tree_crates("test") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Test, krate);
}
}
}
/// Run all unit tests plus documentation tests for a given crate defined
/// by a `Cargo.toml` (single manifest)
///
/// This is what runs tests for crates like the standard library, compiler, etc.
/// It essentially is the driver for running `cargo test`.
///
/// Currently this runs all tests for a DAG by passing a bunch of `-p foo`
/// arguments, and those arguments are discovered from `cargo metadata`.
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let test_kind = self.test_kind;
let krate = self.krate;
builder.ensure(compile::Test { compiler, target });
builder.ensure(RemoteCopyLibs { compiler, target });
// If we're not doing a full bootstrap but we're testing a stage2 version of
// libstd, then what we're actually testing is the libstd produced in
// stage1. Reflect that here by updating the compiler that we're working
// with automatically.
let compiler = if builder.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {
compiler.clone()
};
let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand());
match mode {
Mode::Std => {
compile::std_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Test => {
compile::test_cargo(builder, &compiler, target, &mut cargo);
}
Mode::Rustc => {
builder.ensure(compile::Rustc { compiler, target });
compile::rustc_cargo(builder, &mut cargo);
}
_ => panic!("can only test libraries"),
};
// Build up the base `cargo test` command.
//
// Pass in some standard flags then iterate over the graph we've discovered
// in `cargo metadata` with the maps above and figure out what `-p`
// arguments need to get passed.
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
match builder.doc_tests {
DocTests::Only => {
cargo.arg("--doc");
}
DocTests::No => {
cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]);
}
DocTests::Yes => {}
}
cargo.arg("-p").arg(krate);
// The tests are going to run with the *target* libraries, so we need to
// ensure that those libraries show up in the LD_LIBRARY_PATH equivalent.
//
// Note that to run the compiler we need to run with the *host* libraries,
// but our wrapper scripts arrange for that to be the case anyway.
let mut dylib_path = dylib_path();
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
if target.contains("emscripten") {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured"),
);
} else if target.starts_with("wasm32") {
// Warn about running tests without the `wasm_syscall` feature enabled.
// The javascript shim implements the syscall interface so that test
// output can be correctly reported.
if !builder.config.wasm_syscall {
builder.info(&format!(
"Libstd was built without `wasm_syscall` feature enabled: \
test output may not be visible."
));
}
// On the wasm32-unknown-unknown target we're using LTO which is
// incompatible with `-C prefer-dynamic`, so disable that here
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
let node = builder
.config
.nodejs
.as_ref()
.expect("nodejs not configured");
let runner = format!(
"{} {}/src/etc/wasm32-shim.js",
node.display(),
builder.src.display()
);
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner);
} else if builder.remote_tested(target) {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run", builder.tool_exe(Tool::RemoteTestClient).display()),
);
}
let _folder = builder.fold_output(|| {
format!(
"{}_stage{}-{}",
test_kind.subcommand(),
compiler.stage,
krate
)
});
builder.info(&format!(
"{} {} stage{} ({} -> {})",
test_kind, krate, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateRustdoc {
host: Interned<String>,
test_kind: TestKind,
}
impl Step for CrateRustdoc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.paths(&["src/librustdoc", "src/tools/rustdoc"])
}
fn make_run(run: RunConfig) {
let builder = run.builder;
let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc {
host: run.host,
test_kind,
});
}
fn run(self, builder: &Builder) {
let test_kind = self.test_kind;
let compiler = builder.compiler(builder.top_stage, self.host);
let target = compiler.host;
builder.ensure(compile::Rustc { compiler, target });
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolRustc,
target,
test_kind.subcommand(),
"src/tools/rustdoc",
SourceType::InTree);
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
cargo.arg("-p").arg("rustdoc:0.0.0");
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
let _folder = builder
.fold_output(|| format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage));
builder.info(&format!(
"{} rustdoc stage{} ({} -> {})",
test_kind, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo);
}
}
fn envify(s: &str) -> String {
s.chars()
.map(|c| match c {
'-' => '_',
c => c,
})
.flat_map(|c| c.to_uppercase())
.collect()
}
/// Some test suites are run inside emulators or on remote devices, and most
/// of our test binaries are linked dynamically which means we need to ship
/// the standard library and such to the emulator ahead of time. This step
/// represents this and is a dependency of all test suites.
///
/// Most of the time this is a noop. For some steps such as shipping data to
/// QEMU we have to build our own tools so we've got conditional dependencies
/// on those programs as well. Note that the remote test client is built for
/// the build target (us) and the server is built for the target.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RemoteCopyLibs {
compiler: Compiler,
target: Interned<String>,
}
impl Step for RemoteCopyLibs {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.never()
}
fn run(self, builder: &Builder) {
let compiler = self.compiler;
let target = self.target;
if !builder.remote_tested(target) {
return;
}
builder.ensure(compile::Test { compiler, target });
builder.info(&format!("REMOTE copy libs to emulator ({})", target));
t!(fs::create_dir_all(builder.out.join("tmp")));
let server = builder.ensure(tool::RemoteTestServer {
compiler: compiler.with_stage(0),
target,
});
// Spawn the emulator and wait for it to come online
let tool = builder.tool_exe(Tool::RemoteTestClient);
let mut cmd = Command::new(&tool);
cmd.arg("spawn-emulator")
.arg(target)
.arg(&server)
.arg(builder.out.join("tmp"));
if let Some(rootfs) = builder.qemu_rootfs(target) {
cmd.arg(rootfs);
}
builder.run(&mut cmd);
// Push all our dylibs to the emulator
for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) {
let f = t!(f);
let name = f.file_name().into_string().unwrap();
if util::is_dylib(&name) {
builder.run(Command::new(&tool).arg("push").arg(f.path()));
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("distcheck")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Distcheck);
}
/// Run "distcheck", a 'make check' from a tarball
fn run(self, builder: &Builder) {
builder.info(&format!("Distcheck"));
let dir = builder.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
// Guarantee that these are built before we begin running.
builder.ensure(dist::PlainSourceTarball);
builder.ensure(dist::Src);
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::PlainSourceTarball))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
builder.run(
Command::new("./configure")
.args(&builder.config.configure_args)
.arg("--enable-vendor")
.current_dir(&dir),
);
builder.run(
Command::new(build_helper::make(&builder.config.build))
.arg("check")
.current_dir(&dir),
);
// Now make sure that rust-src has all of libstd's dependencies
builder.info(&format!("Distcheck rust-src"));
let dir = builder.out.join("tmp").join("distcheck-src");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::Src))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml");
builder.run(
Command::new(&builder.initial_cargo)
.arg("generate-lockfile")
.arg("--manifest-path")
.arg(&toml)
.current_dir(&dir),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Bootstrap;
impl Step for Bootstrap {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Test the build system itself
fn run(self, builder: &Builder) {
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("test")
.current_dir(builder.src.join("src/bootstrap"))
.env("RUSTFLAGS", "-Cdebuginfo=2")
.env("CARGO_TARGET_DIR", builder.out.join("bootstrap"))
.env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &builder.initial_rustc);
if let Some(flags) = option_env!("RUSTFLAGS") {
// Use the same rustc flags for testing as for "normal" compilation,
// so that Cargo doesn’t recompile the entire dependency graph every time:
// https://github.com/rust-lang/rust/issues/49215
cmd.env("RUSTFLAGS", flags);
}
if !builder.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&builder.config.cmd.test_args());
// rustbuild tests are racy on directory creation so just run them one at a time.
// Since there's not many this shouldn't be a problem.
cmd.arg("--test-threads=1");
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/bootstrap")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Bootstrap);
}
}
|
//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output, t};
use crate::builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::compile;
use crate::dist;
use crate::flags::Subcommand;
use crate::native;
use crate::tool::{self, SourceType, Tool};
use crate::toolstate::ToolState;
use crate::util::{self, add_link_lib_path, dylib_path, dylib_path_var};
use crate::Crate as CargoCrate;
use crate::{envify, DocTests, GitRepo, Mode};
const ADB_TEST_DIR: &str = "/data/tmp/work";
/// The two modes of the test runner; tests or benchmarks.
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)]
pub enum TestKind {
/// Run `cargo test`.
Test,
/// Run `cargo bench`.
Bench,
}
impl From<Kind> for TestKind {
fn from(kind: Kind) -> Self {
match kind {
Kind::Test => TestKind::Test,
Kind::Bench => TestKind::Bench,
_ => panic!("unexpected kind in crate: {:?}", kind),
}
}
}
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
match self {
TestKind::Test => "test",
TestKind::Bench => "bench",
}
}
}
impl fmt::Display for TestKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
TestKind::Test => "Testing",
TestKind::Bench => "Benchmarking",
})
}
}
fn try_run(builder: &Builder<'_>, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run(cmd);
}
true
}
fn try_run_quiet(builder: &Builder<'_>, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run_quiet(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run_quiet(cmd);
}
true
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Linkcheck {
host: Interned<String>,
}
impl Step for Linkcheck {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will verify the validity of all our links in the
/// documentation to ensure we don't have a bunch of dead ones.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
builder.info(&format!("Linkcheck ({})", host));
builder.default_doc(None);
let _time = util::timeit(&builder);
try_run(
builder,
builder.tool_cmd(Tool::Linkchecker).arg(builder.out.join(host).join("doc")),
);
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src/tools/linkchecker").default_condition(builder.config.docs)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Linkcheck { host: run.target });
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargotest {
stage: u32,
host: Interned<String>,
}
impl Step for Cargotest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/cargotest")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargotest { stage: run.builder.top_stage, host: run.target });
}
/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will check out a few Rust projects and run `cargo
/// test` to ensure that we don't regress the test suites there.
fn run(self, builder: &Builder<'_>) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(compile::Rustc { compiler, target: compiler.host });
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let out_dir = builder.out.join("ct");
t!(fs::create_dir_all(&out_dir));
let _time = util::timeit(&builder);
let mut cmd = builder.tool_cmd(Tool::CargoTest);
try_run(
builder,
cmd.arg(&builder.initial_cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargo {
stage: u32,
host: Interned<String>,
}
impl Step for Cargo {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/cargo")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for `cargo` packaged with Rust.
fn run(self, builder: &Builder<'_>) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(tool::Cargo { compiler, target: self.host });
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
self.host,
"test",
"src/tools/cargo",
SourceType::Submodule,
&[],
);
if !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
// Don't run cross-compile tests, we may not have cross-compiled libstd libs
// available.
cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
// Disable a test that has issues with mingw.
cargo.env("CARGO_TEST_DISABLE_GIT_CLI", "1");
// Forcibly disable tests using nightly features since any changes to
// those features won't be able to land.
cargo.env("CARGO_TEST_DISABLE_NIGHTLY", "1");
cargo.env("PATH", &path_for_cargo(builder, compiler));
try_run(builder, &mut cargo.into());
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rls {
stage: u32,
host: Interned<String>,
}
impl Step for Rls {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rls")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rls { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for the rls.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result =
builder.ensure(tool::Rls { compiler, target: self.host, extra_features: Vec::new() });
if build_result.is_none() {
eprintln!("failed to test rls: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rls",
SourceType::Submodule,
&[],
);
builder.add_rustc_lib_path(compiler, &mut cargo);
cargo.arg("--").args(builder.config.cmd.test_args());
if try_run(builder, &mut cargo.into()) {
builder.save_toolstate("rls", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustfmt {
stage: u32,
host: Interned<String>,
}
impl Step for Rustfmt {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rustfmt")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustfmt { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for rustfmt.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rustfmt {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rustfmt: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rustfmt",
SourceType::Submodule,
&[],
);
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
cargo.env("RUSTFMT_TEST_DIR", dir);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo.into()) {
builder.save_toolstate("rustfmt", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/miri")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Miri { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let miri =
builder.ensure(tool::Miri { compiler, target: self.host, extra_features: Vec::new() });
let cargo_miri = builder.ensure(tool::CargoMiri {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let (Some(miri), Some(_cargo_miri)) = (miri, cargo_miri) {
let mut cargo = builder.cargo(compiler, Mode::ToolRustc, host, "install");
cargo.arg("xargo");
// Configure `cargo install` path. cargo adds a `bin/`.
cargo.env("CARGO_INSTALL_ROOT", &builder.out);
let mut cargo = Command::from(cargo);
if !try_run(builder, &mut cargo) {
return;
}
// # Run `cargo miri setup`.
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"run",
"src/tools/miri/cargo-miri",
SourceType::Submodule,
&[],
);
cargo.arg("--").arg("miri").arg("setup");
// Tell `cargo miri setup` where to find the sources.
cargo.env("XARGO_RUST_SRC", builder.src.join("src"));
// Tell it where to find Miri.
cargo.env("MIRI", &miri);
// Debug things.
cargo.env("RUST_BACKTRACE", "1");
// Overwrite bootstrap's `rustc` wrapper overwriting our flags.
cargo.env("RUSTC_DEBUG_ASSERTIONS", "true");
// Let cargo-miri know where xargo ended up.
cargo.env("XARGO_CHECK", builder.out.join("bin").join("xargo-check"));
let mut cargo = Command::from(cargo);
if !try_run(builder, &mut cargo) {
return;
}
// # Determine where Miri put its sysroot.
// To this end, we run `cargo miri setup --print-sysroot` and capture the output.
// (We do this separately from the above so that when the setup actually
// happens we get some output.)
// We re-use the `cargo` from above.
cargo.arg("--print-sysroot");
// FIXME: Is there a way in which we can re-use the usual `run` helpers?
let miri_sysroot = if builder.config.dry_run {
String::new()
} else {
builder.verbose(&format!("running: {:?}", cargo));
let out = cargo
.output()
.expect("We already ran `cargo miri setup` before and that worked");
assert!(out.status.success(), "`cargo miri setup` returned with non-0 exit code");
// Output is "<sysroot>\n".
let stdout = String::from_utf8(out.stdout)
.expect("`cargo miri setup` stdout is not valid UTF-8");
let sysroot = stdout.trim_end();
builder.verbose(&format!("`cargo miri setup --print-sysroot` said: {:?}", sysroot));
sysroot.to_owned()
};
// # Run `cargo test`.
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/miri",
SourceType::Submodule,
&[],
);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", miri_sysroot);
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI", miri);
cargo.arg("--").args(builder.config.cmd.test_args());
builder.add_rustc_lib_path(compiler, &mut cargo);
if !try_run(builder, &mut cargo.into()) {
return;
}
// # Done!
builder.save_toolstate("miri", ToolState::TestPass);
} else {
eprintln!("failed to test miri: could not build");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CompiletestTest {
host: Interned<String>,
}
impl Step for CompiletestTest {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/compiletest")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(CompiletestTest { host: run.target });
}
/// Runs `cargo test` for compiletest.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
let compiler = builder.compiler(0, host);
let cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolBootstrap,
host,
"test",
"src/tools/compiletest",
SourceType::InTree,
&[],
);
try_run(builder, &mut cargo.into());
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
host: Interned<String>,
}
impl Step for Clippy {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = false;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/clippy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Clippy { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for clippy.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let clippy = builder
.ensure(tool::Clippy { compiler, target: self.host, extra_features: Vec::new() })
.expect("in-tree tool");
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/clippy",
SourceType::InTree,
&[],
);
// clippy tests need to know about the stage sysroot
cargo.env("SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
let host_libs = builder.stage_out(compiler, Mode::ToolRustc).join(builder.cargo_dir());
let target_libs =
builder.stage_out(compiler, Mode::ToolRustc).join(&self.host).join(builder.cargo_dir());
cargo.env("HOST_LIBS", host_libs);
cargo.env("TARGET_LIBS", target_libs);
// clippy tests need to find the driver
cargo.env("CLIPPY_DRIVER_PATH", clippy);
cargo.arg("--").args(builder.config.cmd.test_args());
builder.add_rustc_lib_path(compiler, &mut cargo);
builder.run(&mut cargo.into());
}
}
fn path_for_cargo(builder: &Builder<'_>, compiler: Compiler) -> OsString {
// Configure PATH to find the right rustc. NB. we have to use PATH
// and not RUSTC because the Cargo test suite has tests that will
// fail if rustc is not spelled `rustc`.
let path = builder.sysroot(compiler).join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("")
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocTheme {
pub compiler: Compiler,
}
impl Step for RustdocTheme {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rustdoc-themes")
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocTheme { compiler });
}
fn run(self, builder: &Builder<'_>) {
let rustdoc = builder.out.join("bootstrap/debug/rustdoc");
let mut cmd = builder.tool_cmd(Tool::RustdocTheme);
cmd.arg(rustdoc.to_str().unwrap())
.arg(builder.src.join("src/librustdoc/html/static/themes").to_str().unwrap())
.env("RUSTC_STAGE", self.compiler.stage.to_string())
.env("RUSTC_SYSROOT", builder.sysroot(self.compiler))
.env("RUSTDOC_LIBDIR", builder.sysroot_libdir(self.compiler, self.compiler.host))
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("RUSTDOC_REAL", builder.rustdoc(self.compiler))
.env("RUSTDOC_CRATE_VERSION", builder.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
if let Some(linker) = builder.linker(self.compiler.host, true) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
try_run(builder, &mut cmd);
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJSStd {
pub target: Interned<String>,
}
impl Step for RustdocJSStd {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/test/rustdoc-js-std")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustdocJSStd { target: run.target });
}
fn run(self, builder: &Builder<'_>) {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command
.arg(builder.src.join("src/tools/rustdoc-js/tester.js"))
.arg("--crate-name")
.arg("std")
.arg("--resource-suffix")
.arg(crate::channel::CFG_RELEASE_NUM)
.arg("--doc-folder")
.arg(builder.doc_out(self.target))
.arg("--test-folder")
.arg(builder.src.join("src/test/rustdoc-js-std"));
builder.ensure(crate::doc::Std { target: self.target, stage: builder.top_stage });
builder.run(&mut command);
} else {
builder.info("No nodejs found, skipping \"src/test/rustdoc-js-std\" tests");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJSNotStd {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocJSNotStd {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/test/rustdoc-js")
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocJSNotStd { host: run.host, target: run.target, compiler });
}
fn run(self, builder: &Builder<'_>) {
if builder.config.nodejs.is_some() {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "js-doc-test",
suite: "rustdoc-js",
path: "src/test/rustdoc-js",
compare_mode: None,
});
} else {
builder.info("No nodejs found, skipping \"src/test/rustdoc-js\" tests");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocUi {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocUi {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/test/rustdoc-ui")
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocUi { host: run.host, target: run.target, compiler });
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "ui",
suite: "rustdoc-ui",
path: "src/test/rustdoc-ui",
compare_mode: None,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Tidy;
impl Step for Tidy {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Runs the `tidy` tool.
///
/// This tool in `src/tools` checks up on various bits and pieces of style and
/// otherwise just implements a few lint-like checks that are specific to the
/// compiler itself.
///
/// Once tidy passes, this step also runs `fmt --check` if tests are being run
/// for the `dev` or `nightly` channels.
fn run(self, builder: &Builder<'_>) {
let mut cmd = builder.tool_cmd(Tool::Tidy);
cmd.arg(builder.src.join("src"));
cmd.arg(&builder.initial_cargo);
if builder.is_verbose() {
cmd.arg("--verbose");
}
builder.info("tidy check");
try_run(builder, &mut cmd);
if builder.config.channel == "dev" || builder.config.channel == "nightly" {
builder.info("fmt check");
crate::format::format(&builder.build, !builder.config.cmd.bless());
}
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/tidy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Tidy);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ExpandYamlAnchors;
impl Step for ExpandYamlAnchors {
type Output = ();
const ONLY_HOSTS: bool = true;
/// Ensure the `generate-ci-config` tool was run locally.
///
/// The tool in `src/tools` reads the CI definition in `src/ci/builders.yml` and generates the
/// appropriate configuration for all our CI providers. This step ensures the tool was called
/// by the user before committing CI changes.
fn run(self, builder: &Builder<'_>) {
builder.info("Ensuring the YAML anchors in the GitHub Actions config were expanded");
try_run(
builder,
&mut builder.tool_cmd(Tool::ExpandYamlAnchors).arg("check").arg(&builder.src),
);
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/expand-yaml-anchors")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(ExpandYamlAnchors);
}
}
fn testdir(builder: &Builder<'_>, host: Interned<String>) -> PathBuf {
builder.out.join(host).join("test")
}
macro_rules! default_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false });
};
}
macro_rules! default_test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr,
compare_mode: $compare_mode:expr }) => {
test_with_compare_mode!($name {
path: $path,
mode: $mode,
suite: $suite,
default: true,
host: false,
compare_mode: $compare_mode
});
};
}
macro_rules! host_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true });
};
}
macro_rules! test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr }) => {
test_definitions!($name {
path: $path,
mode: $mode,
suite: $suite,
default: $default,
host: $host,
compare_mode: None
});
};
}
macro_rules! test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr, compare_mode: $compare_mode:expr }) => {
test_definitions!($name {
path: $path,
mode: $mode,
suite: $suite,
default: $default,
host: $host,
compare_mode: Some($compare_mode)
});
};
}
macro_rules! test_definitions {
($name:ident {
path: $path:expr,
mode: $mode:expr,
suite: $suite:expr,
default: $default:expr,
host: $host:expr,
compare_mode: $compare_mode:expr
}) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub compiler: Compiler,
pub target: Interned<String>,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = $host;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.suite_path($path)
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure($name { compiler, target: run.target });
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: $mode,
suite: $suite,
path: $path,
compare_mode: $compare_mode,
})
}
}
};
}
default_test_with_compare_mode!(Ui {
path: "src/test/ui",
mode: "ui",
suite: "ui",
compare_mode: "nll"
});
default_test!(CompileFail {
path: "src/test/compile-fail",
mode: "compile-fail",
suite: "compile-fail"
});
default_test!(RunPassValgrind {
path: "src/test/run-pass-valgrind",
mode: "run-pass-valgrind",
suite: "run-pass-valgrind"
});
default_test!(MirOpt { path: "src/test/mir-opt", mode: "mir-opt", suite: "mir-opt" });
default_test!(Codegen { path: "src/test/codegen", mode: "codegen", suite: "codegen" });
default_test!(CodegenUnits {
path: "src/test/codegen-units",
mode: "codegen-units",
suite: "codegen-units"
});
default_test!(Incremental {
path: "src/test/incremental",
mode: "incremental",
suite: "incremental"
});
default_test!(Debuginfo { path: "src/test/debuginfo", mode: "debuginfo", suite: "debuginfo" });
host_test!(UiFullDeps { path: "src/test/ui-fulldeps", mode: "ui", suite: "ui-fulldeps" });
host_test!(Rustdoc { path: "src/test/rustdoc", mode: "rustdoc", suite: "rustdoc" });
host_test!(Pretty { path: "src/test/pretty", mode: "pretty", suite: "pretty" });
test!(RunPassValgrindPretty {
path: "src/test/run-pass-valgrind/pretty",
mode: "pretty",
suite: "run-pass-valgrind",
default: false,
host: true
});
default_test!(RunMake { path: "src/test/run-make", mode: "run-make", suite: "run-make" });
host_test!(RunMakeFullDeps {
path: "src/test/run-make-fulldeps",
mode: "run-make",
suite: "run-make-fulldeps"
});
default_test!(Assembly { path: "src/test/assembly", mode: "assembly", suite: "assembly" });
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct Compiletest {
compiler: Compiler,
target: Interned<String>,
mode: &'static str,
suite: &'static str,
path: &'static str,
compare_mode: Option<&'static str>,
}
impl Step for Compiletest {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
/// Executes the `compiletest` tool to run a suite of tests.
///
/// Compiles all tests with `compiler` for `target` with the specified
/// compiletest `mode` and `suite` arguments. For example `mode` can be
/// "run-pass" or `suite` can be something like `debuginfo`.
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let suite = self.suite;
// Path for test suite
let suite_path = self.path;
// Skip codegen tests if they aren't enabled in configuration.
if !builder.config.codegen_tests && suite == "codegen" {
return;
}
if suite == "debuginfo" {
builder
.ensure(dist::DebuggerScripts { sysroot: builder.sysroot(compiler), host: target });
}
if suite.ends_with("fulldeps") {
builder.ensure(compile::Rustc { compiler, target });
}
builder.ensure(compile::Std { compiler, target });
// ensure that `libproc_macro` is available on the host.
builder.ensure(compile::Std { compiler, target: compiler.host });
// Also provide `rust_test_helpers` for the host.
builder.ensure(native::TestHelpers { target: compiler.host });
// As well as the target, except for plain wasm32, which can't build it
if !target.contains("wasm32") || target.contains("emscripten") {
builder.ensure(native::TestHelpers { target });
}
builder.ensure(RemoteCopyLibs { compiler, target });
let mut cmd = builder.tool_cmd(Tool::Compiletest);
// compiletest currently has... a lot of arguments, so let's just pass all
// of them!
cmd.arg("--compile-lib-path").arg(builder.rustc_libdir(compiler));
cmd.arg("--run-lib-path").arg(builder.sysroot_libdir(compiler, target));
cmd.arg("--rustc-path").arg(builder.rustc(compiler));
let is_rustdoc = suite.ends_with("rustdoc-ui") || suite.ends_with("rustdoc-js");
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc"
|| (mode == "run-make" && suite.ends_with("fulldeps"))
|| (mode == "ui" && is_rustdoc)
|| mode == "js-doc-test"
{
cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler));
}
cmd.arg("--src-base").arg(builder.src.join("src/test").join(suite));
cmd.arg("--build-base").arg(testdir(builder, compiler.host).join(suite));
cmd.arg("--stage-id").arg(format!("stage{}-{}", compiler.stage, target));
cmd.arg("--mode").arg(mode);
cmd.arg("--target").arg(target);
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck").arg(builder.llvm_filecheck(builder.config.build));
if builder.config.cmd.bless() {
cmd.arg("--bless");
}
let compare_mode =
builder.config.cmd.compare_mode().or_else(|| {
if builder.config.test_compare_mode { self.compare_mode } else { None }
});
if let Some(ref pass) = builder.config.cmd.pass() {
cmd.arg("--pass");
cmd.arg(pass);
}
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
let mut flags = if is_rustdoc { Vec::new() } else { vec!["-Crpath".to_string()] };
if !is_rustdoc {
if builder.config.rust_optimize_tests {
flags.push("-O".to_string());
}
}
flags.push(format!("-Cdebuginfo={}", builder.config.rust_debuginfo_level_tests));
flags.push("-Zunstable-options".to_string());
flags.push(builder.config.cmd.rustc_args().join(" "));
// Don't use LLD here since we want to test that rustc finds and uses a linker by itself.
if let Some(linker) = builder.linker(target, false) {
cmd.arg("--linker").arg(linker);
}
let mut hostflags = flags.clone();
hostflags.push(format!("-Lnative={}", builder.test_helpers_out(compiler.host).display()));
cmd.arg("--host-rustcflags").arg(hostflags.join(" "));
let mut targetflags = flags;
targetflags.push(format!("-Lnative={}", builder.test_helpers_out(target).display()));
cmd.arg("--target-rustcflags").arg(targetflags.join(" "));
cmd.arg("--docck-python").arg(builder.python());
if builder.config.build.ends_with("apple-darwin") {
// Force /usr/bin/python3 on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python3");
} else {
cmd.arg("--lldb-python").arg(builder.python());
}
if let Some(ref gdb) = builder.config.gdb {
cmd.arg("--gdb").arg(gdb);
}
let run = |cmd: &mut Command| {
cmd.output().map(|output| {
String::from_utf8_lossy(&output.stdout)
.lines()
.next()
.unwrap_or_else(|| panic!("{:?} failed {:?}", cmd, output))
.to_string()
})
};
let lldb_exe = "lldb";
let lldb_version = Command::new(lldb_exe)
.arg("--version")
.output()
.map(|output| String::from_utf8_lossy(&output.stdout).to_string())
.ok();
if let Some(ref vers) = lldb_version {
cmd.arg("--lldb-version").arg(vers);
let lldb_python_dir = run(Command::new(lldb_exe).arg("-P")).ok();
if let Some(ref dir) = lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
}
if util::forcing_clang_based_tests() {
let clang_exe = builder.llvm_out(target).join("bin").join("clang");
cmd.arg("--run-clang-based-tests-with").arg(clang_exe);
}
// Get paths from cmd args
let paths = match &builder.config.cmd {
Subcommand::Test { ref paths, .. } => &paths[..],
_ => &[],
};
// Get test-args by striping suite path
let mut test_args: Vec<&str> = paths
.iter()
.map(|p| match p.strip_prefix(".") {
Ok(path) => path,
Err(_) => p,
})
.filter(|p| p.starts_with(suite_path) && (p.is_dir() || p.is_file()))
.filter_map(|p| {
// Since test suite paths are themselves directories, if we don't
// specify a directory or file, we'll get an empty string here
// (the result of the test suite directory without its suite prefix).
// Therefore, we need to filter these out, as only the first --test-args
// flag is respected, so providing an empty --test-args conflicts with
// any following it.
match p.strip_prefix(suite_path).ok().and_then(|p| p.to_str()) {
Some(s) if s != "" => Some(s),
_ => None,
}
})
.collect();
test_args.append(&mut builder.config.cmd.test_args());
cmd.args(&test_args);
if builder.is_verbose() {
cmd.arg("--verbose");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
if builder.config.llvm_enabled() {
let llvm_config = builder.ensure(native::Llvm { target: builder.config.build });
if !builder.config.dry_run {
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
// Remove trailing newline from llvm-config output.
let llvm_version = llvm_version.trim_end();
cmd.arg("--llvm-version").arg(llvm_version);
}
if !builder.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Tests that use compiler libraries may inherit the `-lLLVM` link
// requirement, but the `-L` library path is not propagated across
// separate compilations. We can add LLVM's library path to the
// platform-specific environment variable as a workaround.
if !builder.config.dry_run && suite.ends_with("fulldeps") {
let llvm_libdir = output(Command::new(&llvm_config).arg("--libdir"));
add_link_lib_path(vec![llvm_libdir.trim().into()], &mut cmd);
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if !builder.config.dry_run && suite == "run-make-fulldeps" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
cmd.arg("--cc")
.arg(builder.cc(target))
.arg("--cxx")
.arg(builder.cxx(target).unwrap())
.arg("--cflags")
.arg(builder.cflags(target, GitRepo::Rustc).join(" "))
.arg("--llvm-components")
.arg(llvm_components.trim());
if let Some(ar) = builder.ar(target) {
cmd.arg("--ar").arg(ar);
}
// The llvm/bin directory contains many useful cross-platform
// tools. Pass the path to run-make tests so they can use them.
let llvm_bin_path = llvm_config
.parent()
.expect("Expected llvm-config to be contained in directory");
assert!(llvm_bin_path.is_dir());
cmd.arg("--llvm-bin-dir").arg(llvm_bin_path);
// If LLD is available, add it to the PATH
if builder.config.lld_enabled {
let lld_install_root =
builder.ensure(native::Lld { target: builder.config.build });
let lld_bin_path = lld_install_root.join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
let new_path = env::join_paths(
std::iter::once(lld_bin_path).chain(env::split_paths(&old_path)),
)
.expect("Could not add LLD bin path to PATH");
cmd.env("PATH", new_path);
}
}
}
if suite != "run-make-fulldeps" {
cmd.arg("--cc")
.arg("")
.arg("--cxx")
.arg("")
.arg("--cflags")
.arg("")
.arg("--llvm-components")
.arg("");
}
if builder.remote_tested(target) {
cmd.arg("--remote-test-client").arg(builder.tool_exe(Tool::RemoteTestClient));
}
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
//
// Note that if we encounter `PATH` we make sure to append to our own `PATH`
// rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in builder.cc[&target].env() {
if k != "PATH" {
cmd.env(k, v);
}
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
builder.add_rust_test_threads(&mut cmd);
if builder.config.sanitizers {
cmd.env("RUSTC_SANITIZER_SUPPORT", "1");
}
if builder.config.profiler {
cmd.env("RUSTC_PROFILER_SUPPORT", "1");
}
let tmp = builder.out.join("tmp");
std::fs::create_dir_all(&tmp).unwrap();
cmd.env("RUST_TEST_TMPDIR", tmp);
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
// Assume that cc for this target comes from the android sysroot
cmd.arg("--android-cross-path")
.arg(builder.cc(target).parent().unwrap().parent().unwrap());
} else {
cmd.arg("--android-cross-path").arg("");
}
if builder.config.cmd.rustfix_coverage() {
cmd.arg("--rustfix-coverage");
}
builder.ci_env.force_coloring_in_ci(&mut cmd);
builder.info(&format!(
"Check compiletest suite={} mode={} ({} -> {})",
suite, mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
if let Some(compare_mode) = compare_mode {
cmd.arg("--compare-mode").arg(compare_mode);
builder.info(&format!(
"Check compiletest suite={} mode={} compare_mode={} ({} -> {})",
suite, mode, compare_mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct BookTest {
compiler: Compiler,
path: PathBuf,
name: &'static str,
is_ext_doc: bool,
}
impl Step for BookTest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
/// Runs the documentation tests for a book in `src/doc`.
///
/// This uses the `rustdoc` that sits next to `compiler`.
fn run(self, builder: &Builder<'_>) {
// External docs are different from local because:
// - Some books need pre-processing by mdbook before being tested.
// - They need to save their state to toolstate.
// - They are only tested on the "checktools" builders.
//
// The local docs are tested by default, and we don't want to pay the
// cost of building mdbook, so they use `rustdoc --test` directly.
// Also, the unstable book is special because SUMMARY.md is generated,
// so it is easier to just run `rustdoc` on its files.
if self.is_ext_doc {
self.run_ext_doc(builder);
} else {
self.run_local_doc(builder);
}
}
}
impl BookTest {
/// This runs the equivalent of `mdbook test` (via the rustbook wrapper)
/// which in turn runs `rustdoc --test` on each file in the book.
fn run_ext_doc(self, builder: &Builder<'_>) {
let compiler = self.compiler;
builder.ensure(compile::Std { compiler, target: compiler.host });
// mdbook just executes a binary named "rustdoc", so we need to update
// PATH so that it points to our rustdoc.
let mut rustdoc_path = builder.rustdoc(compiler);
rustdoc_path.pop();
let old_path = env::var_os("PATH").unwrap_or_default();
let new_path = env::join_paths(iter::once(rustdoc_path).chain(env::split_paths(&old_path)))
.expect("could not add rustdoc to PATH");
let mut rustbook_cmd = builder.tool_cmd(Tool::Rustbook);
let path = builder.src.join(&self.path);
rustbook_cmd.env("PATH", new_path).arg("test").arg(path);
builder.add_rust_test_threads(&mut rustbook_cmd);
builder.info(&format!("Testing rustbook {}", self.path.display()));
let _time = util::timeit(&builder);
let toolstate = if try_run(builder, &mut rustbook_cmd) {
ToolState::TestPass
} else {
ToolState::TestFail
};
builder.save_toolstate(self.name, toolstate);
}
/// This runs `rustdoc --test` on all `.md` files in the path.
fn run_local_doc(self, builder: &Builder<'_>) {
let compiler = self.compiler;
builder.ensure(compile::Std { compiler, target: compiler.host });
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![builder.src.join(self.path)];
let _time = util::timeit(&builder);
let mut files = Vec::new();
while let Some(p) = stack.pop() {
if p.is_dir() {
stack.extend(t!(p.read_dir()).map(|p| t!(p).path()));
continue;
}
if p.extension().and_then(|s| s.to_str()) != Some("md") {
continue;
}
files.push(p);
}
files.sort();
for file in files {
markdown_test(builder, compiler, &file);
}
}
}
macro_rules! test_book {
($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => {
$(
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
compiler: Compiler,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path($path)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(BookTest {
compiler: self.compiler,
path: PathBuf::from($path),
name: $book_name,
is_ext_doc: !$default,
});
}
}
)+
}
}
test_book!(
Nomicon, "src/doc/nomicon", "nomicon", default=false;
Reference, "src/doc/reference", "reference", default=false;
RustdocBook, "src/doc/rustdoc", "rustdoc", default=true;
RustcBook, "src/doc/rustc", "rustc", default=true;
RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false;
EmbeddedBook, "src/doc/embedded-book", "embedded-book", default=false;
TheBook, "src/doc/book", "book", default=false;
UnstableBook, "src/doc/unstable-book", "unstable-book", default=true;
EditionGuide, "src/doc/edition-guide", "edition-guide", default=false;
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ErrorIndex {
compiler: Compiler,
}
impl Step for ErrorIndex {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig<'_>) {
run.builder
.ensure(ErrorIndex { compiler: run.builder.compiler(run.builder.top_stage, run.host) });
}
/// Runs the error index generator tool to execute the tests located in the error
/// index.
///
/// The `error_index_generator` tool lives in `src/tools` and is used to
/// generate a markdown file from the error indexes of the code base which is
/// then passed to `rustdoc --test`.
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
builder.ensure(compile::Std { compiler, target: compiler.host });
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
let mut tool = tool::ErrorIndex::command(
builder,
builder.compiler(compiler.stage, builder.config.build),
);
tool.arg("markdown").arg(&output).env("CFG_BUILD", &builder.config.build);
builder.info(&format!("Testing error-index stage{}", compiler.stage));
let _time = util::timeit(&builder);
builder.run_quiet(&mut tool);
markdown_test(builder, compiler, &output);
}
}
fn markdown_test(builder: &Builder<'_>, compiler: Compiler, markdown: &Path) -> bool {
if let Ok(contents) = fs::read_to_string(markdown) {
if !contents.contains("```") {
return true;
}
}
builder.info(&format!("doc tests for: {}", markdown.display()));
let mut cmd = builder.rustdoc_cmd(compiler);
builder.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = builder.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if builder.config.verbose_tests {
try_run(builder, &mut cmd)
} else {
try_run_quiet(builder, &mut cmd)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RustcGuide;
impl Step for RustcGuide {
type Output = ();
const DEFAULT: bool = false;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/doc/rustc-dev-guide")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcGuide);
}
fn run(self, builder: &Builder<'_>) {
let src = builder.src.join("src/doc/rustc-dev-guide");
let mut rustbook_cmd = builder.tool_cmd(Tool::Rustbook);
let toolstate = if try_run(builder, rustbook_cmd.arg("linkcheck").arg(&src)) {
ToolState::TestPass
} else {
ToolState::TestFail
};
builder.save_toolstate("rustc-dev-guide", toolstate);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateLibrustc {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: Interned<String>,
}
impl Step for CrateLibrustc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.krate("rustc-main")
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
target: run.target,
test_kind,
krate: krate.name,
});
}
}
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Rustc,
test_kind: self.test_kind,
krate: self.krate,
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateNotDefault {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: &'static str,
}
impl Step for CrateNotDefault {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
target: run.target,
test_kind,
krate: match run.path {
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
_ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan",
_ => panic!("unexpected path {:?}", run.path),
},
});
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Std,
test_kind: self.test_kind,
krate: INTERNER.intern_str(self.krate),
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Crate {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
pub test_kind: TestKind,
pub krate: Interned<String>,
}
impl Step for Crate {
type Output = ();
const DEFAULT: bool = true;
fn should_run(mut run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
for krate in run.builder.in_tree_crates("test") {
if !(krate.name.starts_with("rustc_") && krate.name.ends_with("san")) {
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}
run
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
target: run.target,
mode,
test_kind,
krate: krate.name,
});
};
for krate in builder.in_tree_crates("test") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Std, krate);
}
}
}
/// Runs all unit tests plus documentation tests for a given crate defined
/// by a `Cargo.toml` (single manifest)
///
/// This is what runs tests for crates like the standard library, compiler, etc.
/// It essentially is the driver for running `cargo test`.
///
/// Currently this runs all tests for a DAG by passing a bunch of `-p foo`
/// arguments, and those arguments are discovered from `cargo metadata`.
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let test_kind = self.test_kind;
let krate = self.krate;
builder.ensure(compile::Std { compiler, target });
builder.ensure(RemoteCopyLibs { compiler, target });
// If we're not doing a full bootstrap but we're testing a stage2
// version of libstd, then what we're actually testing is the libstd
// produced in stage1. Reflect that here by updating the compiler that
// we're working with automatically.
let compiler = builder.compiler_for(compiler.stage, compiler.host, target);
let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand());
match mode {
Mode::Std => {
compile::std_cargo(builder, target, compiler.stage, &mut cargo);
}
Mode::Rustc => {
builder.ensure(compile::Rustc { compiler, target });
compile::rustc_cargo(builder, &mut cargo, target);
}
_ => panic!("can only test libraries"),
};
// Build up the base `cargo test` command.
//
// Pass in some standard flags then iterate over the graph we've discovered
// in `cargo metadata` with the maps above and figure out what `-p`
// arguments need to get passed.
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
match builder.doc_tests {
DocTests::Only => {
cargo.arg("--doc");
}
DocTests::No => {
cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]);
}
DocTests::Yes => {}
}
cargo.arg("-p").arg(krate);
// The tests are going to run with the *target* libraries, so we need to
// ensure that those libraries show up in the LD_LIBRARY_PATH equivalent.
//
// Note that to run the compiler we need to run with the *host* libraries,
// but our wrapper scripts arrange for that to be the case anyway.
let mut dylib_path = dylib_path();
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
if target.contains("emscripten") {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
builder.config.nodejs.as_ref().expect("nodejs not configured"),
);
} else if target.starts_with("wasm32") {
let node = builder.config.nodejs.as_ref().expect("nodejs not configured");
let runner =
format!("{} {}/src/etc/wasm32-shim.js", node.display(), builder.src.display());
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner);
} else if builder.remote_tested(target) {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run 0", builder.tool_exe(Tool::RemoteTestClient).display()),
);
}
builder.info(&format!(
"{} {} stage{} ({} -> {})",
test_kind, krate, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo.into());
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateRustdoc {
host: Interned<String>,
test_kind: TestKind,
}
impl Step for CrateRustdoc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.paths(&["src/librustdoc", "src/tools/rustdoc"])
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc { host: run.host, test_kind });
}
fn run(self, builder: &Builder<'_>) {
let test_kind = self.test_kind;
let compiler = builder.compiler(builder.top_stage, self.host);
let target = compiler.host;
builder.ensure(compile::Rustc { compiler, target });
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
target,
test_kind.subcommand(),
"src/tools/rustdoc",
SourceType::InTree,
&[],
);
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
cargo.arg("-p").arg("rustdoc:0.0.0");
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if self.host.contains("musl") {
cargo.arg("'-Ctarget-feature=-crt-static'");
}
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
builder.info(&format!(
"{} rustdoc stage{} ({} -> {})",
test_kind, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo.into());
}
}
/// Some test suites are run inside emulators or on remote devices, and most
/// of our test binaries are linked dynamically which means we need to ship
/// the standard library and such to the emulator ahead of time. This step
/// represents this and is a dependency of all test suites.
///
/// Most of the time this is a no-op. For some steps such as shipping data to
/// QEMU we have to build our own tools so we've got conditional dependencies
/// on those programs as well. Note that the remote test client is built for
/// the build target (us) and the server is built for the target.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RemoteCopyLibs {
compiler: Compiler,
target: Interned<String>,
}
impl Step for RemoteCopyLibs {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
if !builder.remote_tested(target) {
return;
}
builder.ensure(compile::Std { compiler, target });
builder.info(&format!("REMOTE copy libs to emulator ({})", target));
t!(fs::create_dir_all(builder.out.join("tmp")));
let server =
builder.ensure(tool::RemoteTestServer { compiler: compiler.with_stage(0), target });
// Spawn the emulator and wait for it to come online
let tool = builder.tool_exe(Tool::RemoteTestClient);
let mut cmd = Command::new(&tool);
cmd.arg("spawn-emulator").arg(target).arg(&server).arg(builder.out.join("tmp"));
if let Some(rootfs) = builder.qemu_rootfs(target) {
cmd.arg(rootfs);
}
builder.run(&mut cmd);
// Push all our dylibs to the emulator
for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) {
let f = t!(f);
let name = f.file_name().into_string().unwrap();
if util::is_dylib(&name) {
builder.run(Command::new(&tool).arg("push").arg(f.path()));
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("distcheck")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Distcheck);
}
/// Runs "distcheck", a 'make check' from a tarball
fn run(self, builder: &Builder<'_>) {
builder.info("Distcheck");
let dir = builder.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
// Guarantee that these are built before we begin running.
builder.ensure(dist::PlainSourceTarball);
builder.ensure(dist::Src);
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::PlainSourceTarball))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
builder.run(
Command::new("./configure")
.args(&builder.config.configure_args)
.arg("--enable-vendor")
.current_dir(&dir),
);
builder.run(
Command::new(build_helper::make(&builder.config.build)).arg("check").current_dir(&dir),
);
// Now make sure that rust-src has all of libstd's dependencies
builder.info("Distcheck rust-src");
let dir = builder.out.join("tmp").join("distcheck-src");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::Src))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml");
builder.run(
Command::new(&builder.initial_cargo)
.arg("generate-lockfile")
.arg("--manifest-path")
.arg(&toml)
.current_dir(&dir),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Bootstrap;
impl Step for Bootstrap {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Tests the build system itself.
fn run(self, builder: &Builder<'_>) {
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("test")
.current_dir(builder.src.join("src/bootstrap"))
.env("RUSTFLAGS", "-Cdebuginfo=2")
.env("CARGO_TARGET_DIR", builder.out.join("bootstrap"))
.env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &builder.initial_rustc);
if let Some(flags) = option_env!("RUSTFLAGS") {
// Use the same rustc flags for testing as for "normal" compilation,
// so that Cargo doesn’t recompile the entire dependency graph every time:
// https://github.com/rust-lang/rust/issues/49215
cmd.env("RUSTFLAGS", flags);
}
if !builder.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&builder.config.cmd.test_args());
// rustbuild tests are racy on directory creation so just run them one at a time.
// Since there's not many this shouldn't be a problem.
cmd.arg("--test-threads=1");
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/bootstrap")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Bootstrap);
}
}
Rollup merge of #73267 - ehuss:cargotest-this-cargo, r=Mark-Simulacrum
Use the built cargo for cargotest.
cargotest was using the beta (bootstrap) cargo. This changes it so that it will use the locally built cargo. This is intended to provide a sort of smoke test to ensure Cargo is functional. This *shouldn't* have any real impact on the CI build time. The cargotest job also happens to run cargo's testsuite, so it should already be building cargo.
Note: This will fail until #73266 is merged.
//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output, t};
use crate::builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::compile;
use crate::dist;
use crate::flags::Subcommand;
use crate::native;
use crate::tool::{self, SourceType, Tool};
use crate::toolstate::ToolState;
use crate::util::{self, add_link_lib_path, dylib_path, dylib_path_var};
use crate::Crate as CargoCrate;
use crate::{envify, DocTests, GitRepo, Mode};
const ADB_TEST_DIR: &str = "/data/tmp/work";
/// The two modes of the test runner; tests or benchmarks.
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)]
pub enum TestKind {
/// Run `cargo test`.
Test,
/// Run `cargo bench`.
Bench,
}
impl From<Kind> for TestKind {
fn from(kind: Kind) -> Self {
match kind {
Kind::Test => TestKind::Test,
Kind::Bench => TestKind::Bench,
_ => panic!("unexpected kind in crate: {:?}", kind),
}
}
}
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
match self {
TestKind::Test => "test",
TestKind::Bench => "bench",
}
}
}
impl fmt::Display for TestKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
TestKind::Test => "Testing",
TestKind::Bench => "Benchmarking",
})
}
}
fn try_run(builder: &Builder<'_>, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run(cmd);
}
true
}
fn try_run_quiet(builder: &Builder<'_>, cmd: &mut Command) -> bool {
if !builder.fail_fast {
if !builder.try_run_quiet(cmd) {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{:?}", cmd));
return false;
}
} else {
builder.run_quiet(cmd);
}
true
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Linkcheck {
host: Interned<String>,
}
impl Step for Linkcheck {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will verify the validity of all our links in the
/// documentation to ensure we don't have a bunch of dead ones.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
builder.info(&format!("Linkcheck ({})", host));
builder.default_doc(None);
let _time = util::timeit(&builder);
try_run(
builder,
builder.tool_cmd(Tool::Linkchecker).arg(builder.out.join(host).join("doc")),
);
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src/tools/linkchecker").default_condition(builder.config.docs)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Linkcheck { host: run.target });
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargotest {
stage: u32,
host: Interned<String>,
}
impl Step for Cargotest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/cargotest")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargotest { stage: run.builder.top_stage, host: run.target });
}
/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler.
///
/// This tool in `src/tools` will check out a few Rust projects and run `cargo
/// test` to ensure that we don't regress the test suites there.
fn run(self, builder: &Builder<'_>) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(compile::Rustc { compiler, target: compiler.host });
let cargo = builder.ensure(tool::Cargo { compiler, target: compiler.host });
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let out_dir = builder.out.join("ct");
t!(fs::create_dir_all(&out_dir));
let _time = util::timeit(&builder);
let mut cmd = builder.tool_cmd(Tool::CargoTest);
try_run(
builder,
cmd.arg(&cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler)),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Cargo {
stage: u32,
host: Interned<String>,
}
impl Step for Cargo {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/cargo")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for `cargo` packaged with Rust.
fn run(self, builder: &Builder<'_>) {
let compiler = builder.compiler(self.stage, self.host);
builder.ensure(tool::Cargo { compiler, target: self.host });
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
self.host,
"test",
"src/tools/cargo",
SourceType::Submodule,
&[],
);
if !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
// Don't run cross-compile tests, we may not have cross-compiled libstd libs
// available.
cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
// Disable a test that has issues with mingw.
cargo.env("CARGO_TEST_DISABLE_GIT_CLI", "1");
// Forcibly disable tests using nightly features since any changes to
// those features won't be able to land.
cargo.env("CARGO_TEST_DISABLE_NIGHTLY", "1");
cargo.env("PATH", &path_for_cargo(builder, compiler));
try_run(builder, &mut cargo.into());
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rls {
stage: u32,
host: Interned<String>,
}
impl Step for Rls {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rls")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rls { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for the rls.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result =
builder.ensure(tool::Rls { compiler, target: self.host, extra_features: Vec::new() });
if build_result.is_none() {
eprintln!("failed to test rls: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rls",
SourceType::Submodule,
&[],
);
builder.add_rustc_lib_path(compiler, &mut cargo);
cargo.arg("--").args(builder.config.cmd.test_args());
if try_run(builder, &mut cargo.into()) {
builder.save_toolstate("rls", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Rustfmt {
stage: u32,
host: Interned<String>,
}
impl Step for Rustfmt {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rustfmt")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustfmt { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for rustfmt.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let build_result = builder.ensure(tool::Rustfmt {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if build_result.is_none() {
eprintln!("failed to test rustfmt: could not build");
return;
}
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/rustfmt",
SourceType::Submodule,
&[],
);
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
cargo.env("RUSTFMT_TEST_DIR", dir);
builder.add_rustc_lib_path(compiler, &mut cargo);
if try_run(builder, &mut cargo.into()) {
builder.save_toolstate("rustfmt", ToolState::TestPass);
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
impl Step for Miri {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/miri")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Miri { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let miri =
builder.ensure(tool::Miri { compiler, target: self.host, extra_features: Vec::new() });
let cargo_miri = builder.ensure(tool::CargoMiri {
compiler,
target: self.host,
extra_features: Vec::new(),
});
if let (Some(miri), Some(_cargo_miri)) = (miri, cargo_miri) {
let mut cargo = builder.cargo(compiler, Mode::ToolRustc, host, "install");
cargo.arg("xargo");
// Configure `cargo install` path. cargo adds a `bin/`.
cargo.env("CARGO_INSTALL_ROOT", &builder.out);
let mut cargo = Command::from(cargo);
if !try_run(builder, &mut cargo) {
return;
}
// # Run `cargo miri setup`.
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"run",
"src/tools/miri/cargo-miri",
SourceType::Submodule,
&[],
);
cargo.arg("--").arg("miri").arg("setup");
// Tell `cargo miri setup` where to find the sources.
cargo.env("XARGO_RUST_SRC", builder.src.join("src"));
// Tell it where to find Miri.
cargo.env("MIRI", &miri);
// Debug things.
cargo.env("RUST_BACKTRACE", "1");
// Overwrite bootstrap's `rustc` wrapper overwriting our flags.
cargo.env("RUSTC_DEBUG_ASSERTIONS", "true");
// Let cargo-miri know where xargo ended up.
cargo.env("XARGO_CHECK", builder.out.join("bin").join("xargo-check"));
let mut cargo = Command::from(cargo);
if !try_run(builder, &mut cargo) {
return;
}
// # Determine where Miri put its sysroot.
// To this end, we run `cargo miri setup --print-sysroot` and capture the output.
// (We do this separately from the above so that when the setup actually
// happens we get some output.)
// We re-use the `cargo` from above.
cargo.arg("--print-sysroot");
// FIXME: Is there a way in which we can re-use the usual `run` helpers?
let miri_sysroot = if builder.config.dry_run {
String::new()
} else {
builder.verbose(&format!("running: {:?}", cargo));
let out = cargo
.output()
.expect("We already ran `cargo miri setup` before and that worked");
assert!(out.status.success(), "`cargo miri setup` returned with non-0 exit code");
// Output is "<sysroot>\n".
let stdout = String::from_utf8(out.stdout)
.expect("`cargo miri setup` stdout is not valid UTF-8");
let sysroot = stdout.trim_end();
builder.verbose(&format!("`cargo miri setup --print-sysroot` said: {:?}", sysroot));
sysroot.to_owned()
};
// # Run `cargo test`.
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/miri",
SourceType::Submodule,
&[],
);
// miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", miri_sysroot);
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI", miri);
cargo.arg("--").args(builder.config.cmd.test_args());
builder.add_rustc_lib_path(compiler, &mut cargo);
if !try_run(builder, &mut cargo.into()) {
return;
}
// # Done!
builder.save_toolstate("miri", ToolState::TestPass);
} else {
eprintln!("failed to test miri: could not build");
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CompiletestTest {
host: Interned<String>,
}
impl Step for CompiletestTest {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/compiletest")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(CompiletestTest { host: run.target });
}
/// Runs `cargo test` for compiletest.
fn run(self, builder: &Builder<'_>) {
let host = self.host;
let compiler = builder.compiler(0, host);
let cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolBootstrap,
host,
"test",
"src/tools/compiletest",
SourceType::InTree,
&[],
);
try_run(builder, &mut cargo.into());
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
host: Interned<String>,
}
impl Step for Clippy {
type Output = ();
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = false;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/clippy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Clippy { stage: run.builder.top_stage, host: run.target });
}
/// Runs `cargo test` for clippy.
fn run(self, builder: &Builder<'_>) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let clippy = builder
.ensure(tool::Clippy { compiler, target: self.host, extra_features: Vec::new() })
.expect("in-tree tool");
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
host,
"test",
"src/tools/clippy",
SourceType::InTree,
&[],
);
// clippy tests need to know about the stage sysroot
cargo.env("SYSROOT", builder.sysroot(compiler));
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
let host_libs = builder.stage_out(compiler, Mode::ToolRustc).join(builder.cargo_dir());
let target_libs =
builder.stage_out(compiler, Mode::ToolRustc).join(&self.host).join(builder.cargo_dir());
cargo.env("HOST_LIBS", host_libs);
cargo.env("TARGET_LIBS", target_libs);
// clippy tests need to find the driver
cargo.env("CLIPPY_DRIVER_PATH", clippy);
cargo.arg("--").args(builder.config.cmd.test_args());
builder.add_rustc_lib_path(compiler, &mut cargo);
builder.run(&mut cargo.into());
}
}
fn path_for_cargo(builder: &Builder<'_>, compiler: Compiler) -> OsString {
// Configure PATH to find the right rustc. NB. we have to use PATH
// and not RUSTC because the Cargo test suite has tests that will
// fail if rustc is not spelled `rustc`.
let path = builder.sysroot(compiler).join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("")
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocTheme {
pub compiler: Compiler,
}
impl Step for RustdocTheme {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rustdoc-themes")
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocTheme { compiler });
}
fn run(self, builder: &Builder<'_>) {
let rustdoc = builder.out.join("bootstrap/debug/rustdoc");
let mut cmd = builder.tool_cmd(Tool::RustdocTheme);
cmd.arg(rustdoc.to_str().unwrap())
.arg(builder.src.join("src/librustdoc/html/static/themes").to_str().unwrap())
.env("RUSTC_STAGE", self.compiler.stage.to_string())
.env("RUSTC_SYSROOT", builder.sysroot(self.compiler))
.env("RUSTDOC_LIBDIR", builder.sysroot_libdir(self.compiler, self.compiler.host))
.env("CFG_RELEASE_CHANNEL", &builder.config.channel)
.env("RUSTDOC_REAL", builder.rustdoc(self.compiler))
.env("RUSTDOC_CRATE_VERSION", builder.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
if let Some(linker) = builder.linker(self.compiler.host, true) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
try_run(builder, &mut cmd);
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJSStd {
pub target: Interned<String>,
}
impl Step for RustdocJSStd {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/test/rustdoc-js-std")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustdocJSStd { target: run.target });
}
fn run(self, builder: &Builder<'_>) {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command
.arg(builder.src.join("src/tools/rustdoc-js/tester.js"))
.arg("--crate-name")
.arg("std")
.arg("--resource-suffix")
.arg(crate::channel::CFG_RELEASE_NUM)
.arg("--doc-folder")
.arg(builder.doc_out(self.target))
.arg("--test-folder")
.arg(builder.src.join("src/test/rustdoc-js-std"));
builder.ensure(crate::doc::Std { target: self.target, stage: builder.top_stage });
builder.run(&mut command);
} else {
builder.info("No nodejs found, skipping \"src/test/rustdoc-js-std\" tests");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocJSNotStd {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocJSNotStd {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/test/rustdoc-js")
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocJSNotStd { host: run.host, target: run.target, compiler });
}
fn run(self, builder: &Builder<'_>) {
if builder.config.nodejs.is_some() {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "js-doc-test",
suite: "rustdoc-js",
path: "src/test/rustdoc-js",
compare_mode: None,
});
} else {
builder.info("No nodejs found, skipping \"src/test/rustdoc-js\" tests");
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RustdocUi {
pub host: Interned<String>,
pub target: Interned<String>,
pub compiler: Compiler,
}
impl Step for RustdocUi {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/test/rustdoc-ui")
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure(RustdocUi { host: run.host, target: run.target, compiler });
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: "ui",
suite: "rustdoc-ui",
path: "src/test/rustdoc-ui",
compare_mode: None,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Tidy;
impl Step for Tidy {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Runs the `tidy` tool.
///
/// This tool in `src/tools` checks up on various bits and pieces of style and
/// otherwise just implements a few lint-like checks that are specific to the
/// compiler itself.
///
/// Once tidy passes, this step also runs `fmt --check` if tests are being run
/// for the `dev` or `nightly` channels.
fn run(self, builder: &Builder<'_>) {
let mut cmd = builder.tool_cmd(Tool::Tidy);
cmd.arg(builder.src.join("src"));
cmd.arg(&builder.initial_cargo);
if builder.is_verbose() {
cmd.arg("--verbose");
}
builder.info("tidy check");
try_run(builder, &mut cmd);
if builder.config.channel == "dev" || builder.config.channel == "nightly" {
builder.info("fmt check");
crate::format::format(&builder.build, !builder.config.cmd.bless());
}
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/tidy")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Tidy);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ExpandYamlAnchors;
impl Step for ExpandYamlAnchors {
type Output = ();
const ONLY_HOSTS: bool = true;
/// Ensure the `generate-ci-config` tool was run locally.
///
/// The tool in `src/tools` reads the CI definition in `src/ci/builders.yml` and generates the
/// appropriate configuration for all our CI providers. This step ensures the tool was called
/// by the user before committing CI changes.
fn run(self, builder: &Builder<'_>) {
builder.info("Ensuring the YAML anchors in the GitHub Actions config were expanded");
try_run(
builder,
&mut builder.tool_cmd(Tool::ExpandYamlAnchors).arg("check").arg(&builder.src),
);
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/expand-yaml-anchors")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(ExpandYamlAnchors);
}
}
fn testdir(builder: &Builder<'_>, host: Interned<String>) -> PathBuf {
builder.out.join(host).join("test")
}
macro_rules! default_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false });
};
}
macro_rules! default_test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr,
compare_mode: $compare_mode:expr }) => {
test_with_compare_mode!($name {
path: $path,
mode: $mode,
suite: $suite,
default: true,
host: false,
compare_mode: $compare_mode
});
};
}
macro_rules! host_test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => {
test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true });
};
}
macro_rules! test {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr }) => {
test_definitions!($name {
path: $path,
mode: $mode,
suite: $suite,
default: $default,
host: $host,
compare_mode: None
});
};
}
macro_rules! test_with_compare_mode {
($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr,
host: $host:expr, compare_mode: $compare_mode:expr }) => {
test_definitions!($name {
path: $path,
mode: $mode,
suite: $suite,
default: $default,
host: $host,
compare_mode: Some($compare_mode)
});
};
}
macro_rules! test_definitions {
($name:ident {
path: $path:expr,
mode: $mode:expr,
suite: $suite:expr,
default: $default:expr,
host: $host:expr,
compare_mode: $compare_mode:expr
}) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub compiler: Compiler,
pub target: Interned<String>,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = $host;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.suite_path($path)
}
fn make_run(run: RunConfig<'_>) {
let compiler = run.builder.compiler(run.builder.top_stage, run.host);
run.builder.ensure($name { compiler, target: run.target });
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Compiletest {
compiler: self.compiler,
target: self.target,
mode: $mode,
suite: $suite,
path: $path,
compare_mode: $compare_mode,
})
}
}
};
}
default_test_with_compare_mode!(Ui {
path: "src/test/ui",
mode: "ui",
suite: "ui",
compare_mode: "nll"
});
default_test!(CompileFail {
path: "src/test/compile-fail",
mode: "compile-fail",
suite: "compile-fail"
});
default_test!(RunPassValgrind {
path: "src/test/run-pass-valgrind",
mode: "run-pass-valgrind",
suite: "run-pass-valgrind"
});
default_test!(MirOpt { path: "src/test/mir-opt", mode: "mir-opt", suite: "mir-opt" });
default_test!(Codegen { path: "src/test/codegen", mode: "codegen", suite: "codegen" });
default_test!(CodegenUnits {
path: "src/test/codegen-units",
mode: "codegen-units",
suite: "codegen-units"
});
default_test!(Incremental {
path: "src/test/incremental",
mode: "incremental",
suite: "incremental"
});
default_test!(Debuginfo { path: "src/test/debuginfo", mode: "debuginfo", suite: "debuginfo" });
host_test!(UiFullDeps { path: "src/test/ui-fulldeps", mode: "ui", suite: "ui-fulldeps" });
host_test!(Rustdoc { path: "src/test/rustdoc", mode: "rustdoc", suite: "rustdoc" });
host_test!(Pretty { path: "src/test/pretty", mode: "pretty", suite: "pretty" });
test!(RunPassValgrindPretty {
path: "src/test/run-pass-valgrind/pretty",
mode: "pretty",
suite: "run-pass-valgrind",
default: false,
host: true
});
default_test!(RunMake { path: "src/test/run-make", mode: "run-make", suite: "run-make" });
host_test!(RunMakeFullDeps {
path: "src/test/run-make-fulldeps",
mode: "run-make",
suite: "run-make-fulldeps"
});
default_test!(Assembly { path: "src/test/assembly", mode: "assembly", suite: "assembly" });
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
struct Compiletest {
compiler: Compiler,
target: Interned<String>,
mode: &'static str,
suite: &'static str,
path: &'static str,
compare_mode: Option<&'static str>,
}
impl Step for Compiletest {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
/// Executes the `compiletest` tool to run a suite of tests.
///
/// Compiles all tests with `compiler` for `target` with the specified
/// compiletest `mode` and `suite` arguments. For example `mode` can be
/// "run-pass" or `suite` can be something like `debuginfo`.
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let suite = self.suite;
// Path for test suite
let suite_path = self.path;
// Skip codegen tests if they aren't enabled in configuration.
if !builder.config.codegen_tests && suite == "codegen" {
return;
}
if suite == "debuginfo" {
builder
.ensure(dist::DebuggerScripts { sysroot: builder.sysroot(compiler), host: target });
}
if suite.ends_with("fulldeps") {
builder.ensure(compile::Rustc { compiler, target });
}
builder.ensure(compile::Std { compiler, target });
// ensure that `libproc_macro` is available on the host.
builder.ensure(compile::Std { compiler, target: compiler.host });
// Also provide `rust_test_helpers` for the host.
builder.ensure(native::TestHelpers { target: compiler.host });
// As well as the target, except for plain wasm32, which can't build it
if !target.contains("wasm32") || target.contains("emscripten") {
builder.ensure(native::TestHelpers { target });
}
builder.ensure(RemoteCopyLibs { compiler, target });
let mut cmd = builder.tool_cmd(Tool::Compiletest);
// compiletest currently has... a lot of arguments, so let's just pass all
// of them!
cmd.arg("--compile-lib-path").arg(builder.rustc_libdir(compiler));
cmd.arg("--run-lib-path").arg(builder.sysroot_libdir(compiler, target));
cmd.arg("--rustc-path").arg(builder.rustc(compiler));
let is_rustdoc = suite.ends_with("rustdoc-ui") || suite.ends_with("rustdoc-js");
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc"
|| (mode == "run-make" && suite.ends_with("fulldeps"))
|| (mode == "ui" && is_rustdoc)
|| mode == "js-doc-test"
{
cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler));
}
cmd.arg("--src-base").arg(builder.src.join("src/test").join(suite));
cmd.arg("--build-base").arg(testdir(builder, compiler.host).join(suite));
cmd.arg("--stage-id").arg(format!("stage{}-{}", compiler.stage, target));
cmd.arg("--mode").arg(mode);
cmd.arg("--target").arg(target);
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck").arg(builder.llvm_filecheck(builder.config.build));
if builder.config.cmd.bless() {
cmd.arg("--bless");
}
let compare_mode =
builder.config.cmd.compare_mode().or_else(|| {
if builder.config.test_compare_mode { self.compare_mode } else { None }
});
if let Some(ref pass) = builder.config.cmd.pass() {
cmd.arg("--pass");
cmd.arg(pass);
}
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
let mut flags = if is_rustdoc { Vec::new() } else { vec!["-Crpath".to_string()] };
if !is_rustdoc {
if builder.config.rust_optimize_tests {
flags.push("-O".to_string());
}
}
flags.push(format!("-Cdebuginfo={}", builder.config.rust_debuginfo_level_tests));
flags.push("-Zunstable-options".to_string());
flags.push(builder.config.cmd.rustc_args().join(" "));
// Don't use LLD here since we want to test that rustc finds and uses a linker by itself.
if let Some(linker) = builder.linker(target, false) {
cmd.arg("--linker").arg(linker);
}
let mut hostflags = flags.clone();
hostflags.push(format!("-Lnative={}", builder.test_helpers_out(compiler.host).display()));
cmd.arg("--host-rustcflags").arg(hostflags.join(" "));
let mut targetflags = flags;
targetflags.push(format!("-Lnative={}", builder.test_helpers_out(target).display()));
cmd.arg("--target-rustcflags").arg(targetflags.join(" "));
cmd.arg("--docck-python").arg(builder.python());
if builder.config.build.ends_with("apple-darwin") {
// Force /usr/bin/python3 on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python3");
} else {
cmd.arg("--lldb-python").arg(builder.python());
}
if let Some(ref gdb) = builder.config.gdb {
cmd.arg("--gdb").arg(gdb);
}
let run = |cmd: &mut Command| {
cmd.output().map(|output| {
String::from_utf8_lossy(&output.stdout)
.lines()
.next()
.unwrap_or_else(|| panic!("{:?} failed {:?}", cmd, output))
.to_string()
})
};
let lldb_exe = "lldb";
let lldb_version = Command::new(lldb_exe)
.arg("--version")
.output()
.map(|output| String::from_utf8_lossy(&output.stdout).to_string())
.ok();
if let Some(ref vers) = lldb_version {
cmd.arg("--lldb-version").arg(vers);
let lldb_python_dir = run(Command::new(lldb_exe).arg("-P")).ok();
if let Some(ref dir) = lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
}
if util::forcing_clang_based_tests() {
let clang_exe = builder.llvm_out(target).join("bin").join("clang");
cmd.arg("--run-clang-based-tests-with").arg(clang_exe);
}
// Get paths from cmd args
let paths = match &builder.config.cmd {
Subcommand::Test { ref paths, .. } => &paths[..],
_ => &[],
};
// Get test-args by striping suite path
let mut test_args: Vec<&str> = paths
.iter()
.map(|p| match p.strip_prefix(".") {
Ok(path) => path,
Err(_) => p,
})
.filter(|p| p.starts_with(suite_path) && (p.is_dir() || p.is_file()))
.filter_map(|p| {
// Since test suite paths are themselves directories, if we don't
// specify a directory or file, we'll get an empty string here
// (the result of the test suite directory without its suite prefix).
// Therefore, we need to filter these out, as only the first --test-args
// flag is respected, so providing an empty --test-args conflicts with
// any following it.
match p.strip_prefix(suite_path).ok().and_then(|p| p.to_str()) {
Some(s) if s != "" => Some(s),
_ => None,
}
})
.collect();
test_args.append(&mut builder.config.cmd.test_args());
cmd.args(&test_args);
if builder.is_verbose() {
cmd.arg("--verbose");
}
if !builder.config.verbose_tests {
cmd.arg("--quiet");
}
if builder.config.llvm_enabled() {
let llvm_config = builder.ensure(native::Llvm { target: builder.config.build });
if !builder.config.dry_run {
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
// Remove trailing newline from llvm-config output.
let llvm_version = llvm_version.trim_end();
cmd.arg("--llvm-version").arg(llvm_version);
}
if !builder.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Tests that use compiler libraries may inherit the `-lLLVM` link
// requirement, but the `-L` library path is not propagated across
// separate compilations. We can add LLVM's library path to the
// platform-specific environment variable as a workaround.
if !builder.config.dry_run && suite.ends_with("fulldeps") {
let llvm_libdir = output(Command::new(&llvm_config).arg("--libdir"));
add_link_lib_path(vec![llvm_libdir.trim().into()], &mut cmd);
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if !builder.config.dry_run && suite == "run-make-fulldeps" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
cmd.arg("--cc")
.arg(builder.cc(target))
.arg("--cxx")
.arg(builder.cxx(target).unwrap())
.arg("--cflags")
.arg(builder.cflags(target, GitRepo::Rustc).join(" "))
.arg("--llvm-components")
.arg(llvm_components.trim());
if let Some(ar) = builder.ar(target) {
cmd.arg("--ar").arg(ar);
}
// The llvm/bin directory contains many useful cross-platform
// tools. Pass the path to run-make tests so they can use them.
let llvm_bin_path = llvm_config
.parent()
.expect("Expected llvm-config to be contained in directory");
assert!(llvm_bin_path.is_dir());
cmd.arg("--llvm-bin-dir").arg(llvm_bin_path);
// If LLD is available, add it to the PATH
if builder.config.lld_enabled {
let lld_install_root =
builder.ensure(native::Lld { target: builder.config.build });
let lld_bin_path = lld_install_root.join("bin");
let old_path = env::var_os("PATH").unwrap_or_default();
let new_path = env::join_paths(
std::iter::once(lld_bin_path).chain(env::split_paths(&old_path)),
)
.expect("Could not add LLD bin path to PATH");
cmd.env("PATH", new_path);
}
}
}
if suite != "run-make-fulldeps" {
cmd.arg("--cc")
.arg("")
.arg("--cxx")
.arg("")
.arg("--cflags")
.arg("")
.arg("--llvm-components")
.arg("");
}
if builder.remote_tested(target) {
cmd.arg("--remote-test-client").arg(builder.tool_exe(Tool::RemoteTestClient));
}
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
//
// Note that if we encounter `PATH` we make sure to append to our own `PATH`
// rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in builder.cc[&target].env() {
if k != "PATH" {
cmd.env(k, v);
}
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
builder.add_rust_test_threads(&mut cmd);
if builder.config.sanitizers {
cmd.env("RUSTC_SANITIZER_SUPPORT", "1");
}
if builder.config.profiler {
cmd.env("RUSTC_PROFILER_SUPPORT", "1");
}
let tmp = builder.out.join("tmp");
std::fs::create_dir_all(&tmp).unwrap();
cmd.env("RUST_TEST_TMPDIR", tmp);
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
// Assume that cc for this target comes from the android sysroot
cmd.arg("--android-cross-path")
.arg(builder.cc(target).parent().unwrap().parent().unwrap());
} else {
cmd.arg("--android-cross-path").arg("");
}
if builder.config.cmd.rustfix_coverage() {
cmd.arg("--rustfix-coverage");
}
builder.ci_env.force_coloring_in_ci(&mut cmd);
builder.info(&format!(
"Check compiletest suite={} mode={} ({} -> {})",
suite, mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
if let Some(compare_mode) = compare_mode {
cmd.arg("--compare-mode").arg(compare_mode);
builder.info(&format!(
"Check compiletest suite={} mode={} compare_mode={} ({} -> {})",
suite, mode, compare_mode, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cmd);
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct BookTest {
compiler: Compiler,
path: PathBuf,
name: &'static str,
is_ext_doc: bool,
}
impl Step for BookTest {
type Output = ();
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
/// Runs the documentation tests for a book in `src/doc`.
///
/// This uses the `rustdoc` that sits next to `compiler`.
fn run(self, builder: &Builder<'_>) {
// External docs are different from local because:
// - Some books need pre-processing by mdbook before being tested.
// - They need to save their state to toolstate.
// - They are only tested on the "checktools" builders.
//
// The local docs are tested by default, and we don't want to pay the
// cost of building mdbook, so they use `rustdoc --test` directly.
// Also, the unstable book is special because SUMMARY.md is generated,
// so it is easier to just run `rustdoc` on its files.
if self.is_ext_doc {
self.run_ext_doc(builder);
} else {
self.run_local_doc(builder);
}
}
}
impl BookTest {
/// This runs the equivalent of `mdbook test` (via the rustbook wrapper)
/// which in turn runs `rustdoc --test` on each file in the book.
fn run_ext_doc(self, builder: &Builder<'_>) {
let compiler = self.compiler;
builder.ensure(compile::Std { compiler, target: compiler.host });
// mdbook just executes a binary named "rustdoc", so we need to update
// PATH so that it points to our rustdoc.
let mut rustdoc_path = builder.rustdoc(compiler);
rustdoc_path.pop();
let old_path = env::var_os("PATH").unwrap_or_default();
let new_path = env::join_paths(iter::once(rustdoc_path).chain(env::split_paths(&old_path)))
.expect("could not add rustdoc to PATH");
let mut rustbook_cmd = builder.tool_cmd(Tool::Rustbook);
let path = builder.src.join(&self.path);
rustbook_cmd.env("PATH", new_path).arg("test").arg(path);
builder.add_rust_test_threads(&mut rustbook_cmd);
builder.info(&format!("Testing rustbook {}", self.path.display()));
let _time = util::timeit(&builder);
let toolstate = if try_run(builder, &mut rustbook_cmd) {
ToolState::TestPass
} else {
ToolState::TestFail
};
builder.save_toolstate(self.name, toolstate);
}
/// This runs `rustdoc --test` on all `.md` files in the path.
fn run_local_doc(self, builder: &Builder<'_>) {
let compiler = self.compiler;
builder.ensure(compile::Std { compiler, target: compiler.host });
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![builder.src.join(self.path)];
let _time = util::timeit(&builder);
let mut files = Vec::new();
while let Some(p) = stack.pop() {
if p.is_dir() {
stack.extend(t!(p.read_dir()).map(|p| t!(p).path()));
continue;
}
if p.extension().and_then(|s| s.to_str()) != Some("md") {
continue;
}
files.push(p);
}
files.sort();
for file in files {
markdown_test(builder, compiler, &file);
}
}
}
macro_rules! test_book {
($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => {
$(
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
compiler: Compiler,
}
impl Step for $name {
type Output = ();
const DEFAULT: bool = $default;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path($path)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.host),
});
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(BookTest {
compiler: self.compiler,
path: PathBuf::from($path),
name: $book_name,
is_ext_doc: !$default,
});
}
}
)+
}
}
test_book!(
Nomicon, "src/doc/nomicon", "nomicon", default=false;
Reference, "src/doc/reference", "reference", default=false;
RustdocBook, "src/doc/rustdoc", "rustdoc", default=true;
RustcBook, "src/doc/rustc", "rustc", default=true;
RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false;
EmbeddedBook, "src/doc/embedded-book", "embedded-book", default=false;
TheBook, "src/doc/book", "book", default=false;
UnstableBook, "src/doc/unstable-book", "unstable-book", default=true;
EditionGuide, "src/doc/edition-guide", "edition-guide", default=false;
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ErrorIndex {
compiler: Compiler,
}
impl Step for ErrorIndex {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig<'_>) {
run.builder
.ensure(ErrorIndex { compiler: run.builder.compiler(run.builder.top_stage, run.host) });
}
/// Runs the error index generator tool to execute the tests located in the error
/// index.
///
/// The `error_index_generator` tool lives in `src/tools` and is used to
/// generate a markdown file from the error indexes of the code base which is
/// then passed to `rustdoc --test`.
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
builder.ensure(compile::Std { compiler, target: compiler.host });
let dir = testdir(builder, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
let mut tool = tool::ErrorIndex::command(
builder,
builder.compiler(compiler.stage, builder.config.build),
);
tool.arg("markdown").arg(&output).env("CFG_BUILD", &builder.config.build);
builder.info(&format!("Testing error-index stage{}", compiler.stage));
let _time = util::timeit(&builder);
builder.run_quiet(&mut tool);
markdown_test(builder, compiler, &output);
}
}
fn markdown_test(builder: &Builder<'_>, compiler: Compiler, markdown: &Path) -> bool {
if let Ok(contents) = fs::read_to_string(markdown) {
if !contents.contains("```") {
return true;
}
}
builder.info(&format!("doc tests for: {}", markdown.display()));
let mut cmd = builder.rustdoc_cmd(compiler);
builder.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = builder.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if builder.config.verbose_tests {
try_run(builder, &mut cmd)
} else {
try_run_quiet(builder, &mut cmd)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RustcGuide;
impl Step for RustcGuide {
type Output = ();
const DEFAULT: bool = false;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/doc/rustc-dev-guide")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RustcGuide);
}
fn run(self, builder: &Builder<'_>) {
let src = builder.src.join("src/doc/rustc-dev-guide");
let mut rustbook_cmd = builder.tool_cmd(Tool::Rustbook);
let toolstate = if try_run(builder, rustbook_cmd.arg("linkcheck").arg(&src)) {
ToolState::TestPass
} else {
ToolState::TestFail
};
builder.save_toolstate("rustc-dev-guide", toolstate);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateLibrustc {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: Interned<String>,
}
impl Step for CrateLibrustc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.krate("rustc-main")
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
target: run.target,
test_kind,
krate: krate.name,
});
}
}
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Rustc,
test_kind: self.test_kind,
krate: self.krate,
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateNotDefault {
compiler: Compiler,
target: Interned<String>,
test_kind: TestKind,
krate: &'static str,
}
impl Step for CrateNotDefault {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
target: run.target,
test_kind,
krate: match run.path {
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
_ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan",
_ => panic!("unexpected path {:?}", run.path),
},
});
}
fn run(self, builder: &Builder<'_>) {
builder.ensure(Crate {
compiler: self.compiler,
target: self.target,
mode: Mode::Std,
test_kind: self.test_kind,
krate: INTERNER.intern_str(self.krate),
});
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Crate {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
pub test_kind: TestKind,
pub krate: Interned<String>,
}
impl Step for Crate {
type Output = ();
const DEFAULT: bool = true;
fn should_run(mut run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
for krate in run.builder.in_tree_crates("test") {
if !(krate.name.starts_with("rustc_") && krate.name.ends_with("san")) {
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}
run
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
target: run.target,
mode,
test_kind,
krate: krate.name,
});
};
for krate in builder.in_tree_crates("test") {
if run.path.ends_with(&krate.local_path(&builder)) {
make(Mode::Std, krate);
}
}
}
/// Runs all unit tests plus documentation tests for a given crate defined
/// by a `Cargo.toml` (single manifest)
///
/// This is what runs tests for crates like the standard library, compiler, etc.
/// It essentially is the driver for running `cargo test`.
///
/// Currently this runs all tests for a DAG by passing a bunch of `-p foo`
/// arguments, and those arguments are discovered from `cargo metadata`.
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
let mode = self.mode;
let test_kind = self.test_kind;
let krate = self.krate;
builder.ensure(compile::Std { compiler, target });
builder.ensure(RemoteCopyLibs { compiler, target });
// If we're not doing a full bootstrap but we're testing a stage2
// version of libstd, then what we're actually testing is the libstd
// produced in stage1. Reflect that here by updating the compiler that
// we're working with automatically.
let compiler = builder.compiler_for(compiler.stage, compiler.host, target);
let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand());
match mode {
Mode::Std => {
compile::std_cargo(builder, target, compiler.stage, &mut cargo);
}
Mode::Rustc => {
builder.ensure(compile::Rustc { compiler, target });
compile::rustc_cargo(builder, &mut cargo, target);
}
_ => panic!("can only test libraries"),
};
// Build up the base `cargo test` command.
//
// Pass in some standard flags then iterate over the graph we've discovered
// in `cargo metadata` with the maps above and figure out what `-p`
// arguments need to get passed.
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
match builder.doc_tests {
DocTests::Only => {
cargo.arg("--doc");
}
DocTests::No => {
cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]);
}
DocTests::Yes => {}
}
cargo.arg("-p").arg(krate);
// The tests are going to run with the *target* libraries, so we need to
// ensure that those libraries show up in the LD_LIBRARY_PATH equivalent.
//
// Note that to run the compiler we need to run with the *host* libraries,
// but our wrapper scripts arrange for that to be the case anyway.
let mut dylib_path = dylib_path();
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
if target.contains("emscripten") {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
builder.config.nodejs.as_ref().expect("nodejs not configured"),
);
} else if target.starts_with("wasm32") {
let node = builder.config.nodejs.as_ref().expect("nodejs not configured");
let runner =
format!("{} {}/src/etc/wasm32-shim.js", node.display(), builder.src.display());
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner);
} else if builder.remote_tested(target) {
cargo.env(
format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run 0", builder.tool_exe(Tool::RemoteTestClient).display()),
);
}
builder.info(&format!(
"{} {} stage{} ({} -> {})",
test_kind, krate, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo.into());
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CrateRustdoc {
host: Interned<String>,
test_kind: TestKind,
}
impl Step for CrateRustdoc {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.paths(&["src/librustdoc", "src/tools/rustdoc"])
}
fn make_run(run: RunConfig<'_>) {
let builder = run.builder;
let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc { host: run.host, test_kind });
}
fn run(self, builder: &Builder<'_>) {
let test_kind = self.test_kind;
let compiler = builder.compiler(builder.top_stage, self.host);
let target = compiler.host;
builder.ensure(compile::Rustc { compiler, target });
let mut cargo = tool::prepare_tool_cargo(
builder,
compiler,
Mode::ToolRustc,
target,
test_kind.subcommand(),
"src/tools/rustdoc",
SourceType::InTree,
&[],
);
if test_kind.subcommand() == "test" && !builder.fail_fast {
cargo.arg("--no-fail-fast");
}
cargo.arg("-p").arg("rustdoc:0.0.0");
cargo.arg("--");
cargo.args(&builder.config.cmd.test_args());
if self.host.contains("musl") {
cargo.arg("'-Ctarget-feature=-crt-static'");
}
if !builder.config.verbose_tests {
cargo.arg("--quiet");
}
builder.info(&format!(
"{} rustdoc stage{} ({} -> {})",
test_kind, compiler.stage, &compiler.host, target
));
let _time = util::timeit(&builder);
try_run(builder, &mut cargo.into());
}
}
/// Some test suites are run inside emulators or on remote devices, and most
/// of our test binaries are linked dynamically which means we need to ship
/// the standard library and such to the emulator ahead of time. This step
/// represents this and is a dependency of all test suites.
///
/// Most of the time this is a no-op. For some steps such as shipping data to
/// QEMU we have to build our own tools so we've got conditional dependencies
/// on those programs as well. Note that the remote test client is built for
/// the build target (us) and the server is built for the target.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RemoteCopyLibs {
compiler: Compiler,
target: Interned<String>,
}
impl Step for RemoteCopyLibs {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
fn run(self, builder: &Builder<'_>) {
let compiler = self.compiler;
let target = self.target;
if !builder.remote_tested(target) {
return;
}
builder.ensure(compile::Std { compiler, target });
builder.info(&format!("REMOTE copy libs to emulator ({})", target));
t!(fs::create_dir_all(builder.out.join("tmp")));
let server =
builder.ensure(tool::RemoteTestServer { compiler: compiler.with_stage(0), target });
// Spawn the emulator and wait for it to come online
let tool = builder.tool_exe(Tool::RemoteTestClient);
let mut cmd = Command::new(&tool);
cmd.arg("spawn-emulator").arg(target).arg(&server).arg(builder.out.join("tmp"));
if let Some(rootfs) = builder.qemu_rootfs(target) {
cmd.arg(rootfs);
}
builder.run(&mut cmd);
// Push all our dylibs to the emulator
for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) {
let f = t!(f);
let name = f.file_name().into_string().unwrap();
if util::is_dylib(&name) {
builder.run(Command::new(&tool).arg("push").arg(f.path()));
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("distcheck")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Distcheck);
}
/// Runs "distcheck", a 'make check' from a tarball
fn run(self, builder: &Builder<'_>) {
builder.info("Distcheck");
let dir = builder.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
// Guarantee that these are built before we begin running.
builder.ensure(dist::PlainSourceTarball);
builder.ensure(dist::Src);
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::PlainSourceTarball))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
builder.run(
Command::new("./configure")
.args(&builder.config.configure_args)
.arg("--enable-vendor")
.current_dir(&dir),
);
builder.run(
Command::new(build_helper::make(&builder.config.build)).arg("check").current_dir(&dir),
);
// Now make sure that rust-src has all of libstd's dependencies
builder.info("Distcheck rust-src");
let dir = builder.out.join("tmp").join("distcheck-src");
let _ = fs::remove_dir_all(&dir);
t!(fs::create_dir_all(&dir));
let mut cmd = Command::new("tar");
cmd.arg("-xzf")
.arg(builder.ensure(dist::Src))
.arg("--strip-components=1")
.current_dir(&dir);
builder.run(&mut cmd);
let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml");
builder.run(
Command::new(&builder.initial_cargo)
.arg("generate-lockfile")
.arg("--manifest-path")
.arg(&toml)
.current_dir(&dir),
);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Bootstrap;
impl Step for Bootstrap {
type Output = ();
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
/// Tests the build system itself.
fn run(self, builder: &Builder<'_>) {
let mut cmd = Command::new(&builder.initial_cargo);
cmd.arg("test")
.current_dir(builder.src.join("src/bootstrap"))
.env("RUSTFLAGS", "-Cdebuginfo=2")
.env("CARGO_TARGET_DIR", builder.out.join("bootstrap"))
.env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &builder.initial_rustc);
if let Some(flags) = option_env!("RUSTFLAGS") {
// Use the same rustc flags for testing as for "normal" compilation,
// so that Cargo doesn’t recompile the entire dependency graph every time:
// https://github.com/rust-lang/rust/issues/49215
cmd.env("RUSTFLAGS", flags);
}
if !builder.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&builder.config.cmd.test_args());
// rustbuild tests are racy on directory creation so just run them one at a time.
// Since there's not many this shouldn't be a problem.
cmd.arg("--test-threads=1");
try_run(builder, &mut cmd);
}
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/bootstrap")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Bootstrap);
}
}
|
use std::collections::HashSet;
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process::{exit, Command};
use build_helper::t;
use crate::builder::{Builder, Cargo as CargoCommand, RunConfig, ShouldRun, Step};
use crate::channel::GitInfo;
use crate::compile;
use crate::config::TargetSelection;
use crate::toolstate::ToolState;
use crate::util::{add_dylib_path, exe};
use crate::Compiler;
use crate::Mode;
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum SourceType {
InTree,
Submodule,
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
struct ToolBuild {
compiler: Compiler,
target: TargetSelection,
tool: &'static str,
path: &'static str,
mode: Mode,
is_optional_tool: bool,
source_type: SourceType,
extra_features: Vec<String>,
}
impl Step for ToolBuild {
type Output = Option<PathBuf>;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
/// Builds a tool in `src/tools`
///
/// This will build the specified tool with the specified `host` compiler in
/// `stage` into the normal cargo output directory.
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
let mut tool = self.tool;
let path = self.path;
let is_optional_tool = self.is_optional_tool;
match self.mode {
Mode::ToolRustc => builder.ensure(compile::Rustc { compiler, target }),
Mode::ToolStd => builder.ensure(compile::Std { compiler, target }),
Mode::ToolBootstrap => {} // uses downloaded stage0 compiler libs
_ => panic!("unexpected Mode for tool build"),
}
let cargo = prepare_tool_cargo(
builder,
compiler,
self.mode,
target,
"build",
path,
self.source_type,
&self.extra_features,
);
builder.info(&format!("Building stage{} tool {} ({})", compiler.stage, tool, target));
let mut duplicates = Vec::new();
let is_expected = compile::stream_cargo(builder, cargo, vec![], &mut |msg| {
// Only care about big things like the RLS/Cargo for now
match tool {
"rls" | "cargo" | "clippy-driver" | "miri" | "rustfmt" => {}
_ => return,
}
let (id, features, filenames) = match msg {
compile::CargoMessage::CompilerArtifact {
package_id,
features,
filenames,
target: _,
} => (package_id, features, filenames),
_ => return,
};
let features = features.iter().map(|s| s.to_string()).collect::<Vec<_>>();
for path in filenames {
let val = (tool, PathBuf::from(&*path), features.clone());
// we're only interested in deduplicating rlibs for now
if val.1.extension().and_then(|s| s.to_str()) != Some("rlib") {
continue;
}
// Don't worry about compiles that turn out to be host
// dependencies or build scripts. To skip these we look for
// anything that goes in `.../release/deps` but *doesn't* go in
// `$target/release/deps`. This ensure that outputs in
// `$target/release` are still considered candidates for
// deduplication.
if let Some(parent) = val.1.parent() {
if parent.ends_with("release/deps") {
let maybe_target = parent
.parent()
.and_then(|p| p.parent())
.and_then(|p| p.file_name())
.and_then(|p| p.to_str())
.unwrap();
if maybe_target != &*target.triple {
continue;
}
}
}
// Record that we've built an artifact for `id`, and if one was
// already listed then we need to see if we reused the same
// artifact or produced a duplicate.
let mut artifacts = builder.tool_artifacts.borrow_mut();
let prev_artifacts = artifacts.entry(target).or_default();
let prev = match prev_artifacts.get(&*id) {
Some(prev) => prev,
None => {
prev_artifacts.insert(id.to_string(), val);
continue;
}
};
if prev.1 == val.1 {
return; // same path, same artifact
}
// If the paths are different and one of them *isn't* inside of
// `release/deps`, then it means it's probably in
// `$target/release`, or it's some final artifact like
// `libcargo.rlib`. In these situations Cargo probably just
// copied it up from `$target/release/deps/libcargo-xxxx.rlib`,
// so if the features are equal we can just skip it.
let prev_no_hash = prev.1.parent().unwrap().ends_with("release/deps");
let val_no_hash = val.1.parent().unwrap().ends_with("release/deps");
if prev.2 == val.2 || !prev_no_hash || !val_no_hash {
return;
}
// ... and otherwise this looks like we duplicated some sort of
// compilation, so record it to generate an error later.
duplicates.push((id.to_string(), val, prev.clone()));
}
});
if is_expected && !duplicates.is_empty() {
println!(
"duplicate artifacts found when compiling a tool, this \
typically means that something was recompiled because \
a transitive dependency has different features activated \
than in a previous build:\n"
);
println!(
"the following dependencies are duplicated although they \
have the same features enabled:"
);
let (same, different): (Vec<_>, Vec<_>) =
duplicates.into_iter().partition(|(_, cur, prev)| cur.2 == prev.2);
for (id, cur, prev) in same {
println!(" {}", id);
// same features
println!(" `{}` ({:?})\n `{}` ({:?})", cur.0, cur.1, prev.0, prev.1);
}
println!("the following dependencies have different features:");
for (id, cur, prev) in different {
println!(" {}", id);
let cur_features: HashSet<_> = cur.2.into_iter().collect();
let prev_features: HashSet<_> = prev.2.into_iter().collect();
println!(
" `{}` additionally enabled features {:?} at {:?}",
cur.0,
&cur_features - &prev_features,
cur.1
);
println!(
" `{}` additionally enabled features {:?} at {:?}",
prev.0,
&prev_features - &cur_features,
prev.1
);
}
println!();
println!(
"to fix this you will probably want to edit the local \
src/tools/rustc-workspace-hack/Cargo.toml crate, as \
that will update the dependency graph to ensure that \
these crates all share the same feature set"
);
panic!("tools should not compile multiple copies of the same crate");
}
builder.save_toolstate(
tool,
if is_expected { ToolState::TestFail } else { ToolState::BuildFail },
);
if !is_expected {
if !is_optional_tool {
exit(1);
} else {
None
}
} else {
// HACK(#82501): on Windows, the tools directory gets added to PATH when running tests, and
// compiletest confuses HTML tidy with the in-tree tidy. Name the in-tree tidy something
// different so the problem doesn't come up.
if tool == "tidy" {
tool = "rust-tidy";
}
let cargo_out =
builder.cargo_out(compiler, self.mode, target).join(exe(tool, compiler.host));
let bin = builder.tools_dir(compiler).join(exe(tool, compiler.host));
builder.copy(&cargo_out, &bin);
Some(bin)
}
}
}
pub fn prepare_tool_cargo(
builder: &Builder<'_>,
compiler: Compiler,
mode: Mode,
target: TargetSelection,
command: &'static str,
path: &'static str,
source_type: SourceType,
extra_features: &[String],
) -> CargoCommand {
let mut cargo = builder.cargo(compiler, mode, source_type, target, command);
let dir = builder.src.join(path);
cargo.arg("--manifest-path").arg(dir.join("Cargo.toml"));
let mut features = extra_features.to_vec();
if builder.build.config.cargo_native_static {
if path.ends_with("cargo")
|| path.ends_with("rls")
|| path.ends_with("clippy")
|| path.ends_with("miri")
|| path.ends_with("rustfmt")
{
cargo.env("LIBZ_SYS_STATIC", "1");
features.push("rustc-workspace-hack/all-static".to_string());
}
}
// if tools are using lzma we want to force the build script to build its
// own copy
cargo.env("LZMA_API_STATIC", "1");
// CFG_RELEASE is needed by rustfmt (and possibly other tools) which
// import rustc-ap-rustc_attr which requires this to be set for the
// `#[cfg(version(...))]` attribute.
cargo.env("CFG_RELEASE", builder.rust_release());
cargo.env("CFG_RELEASE_CHANNEL", &builder.config.channel);
cargo.env("CFG_VERSION", builder.rust_version());
cargo.env("CFG_RELEASE_NUM", &builder.version);
let info = GitInfo::new(builder.config.ignore_git, &dir);
if let Some(sha) = info.sha() {
cargo.env("CFG_COMMIT_HASH", sha);
}
if let Some(sha_short) = info.sha_short() {
cargo.env("CFG_SHORT_COMMIT_HASH", sha_short);
}
if let Some(date) = info.commit_date() {
cargo.env("CFG_COMMIT_DATE", date);
}
if !features.is_empty() {
cargo.arg("--features").arg(&features.join(", "));
}
cargo
}
macro_rules! bootstrap_tool {
($(
$name:ident, $path:expr, $tool_name:expr
$(,is_external_tool = $external:expr)*
$(,is_unstable_tool = $unstable:expr)*
$(,features = $features:expr)*
;
)+) => {
#[derive(Copy, PartialEq, Eq, Clone)]
pub enum Tool {
$(
$name,
)+
}
impl<'a> Builder<'a> {
pub fn tool_exe(&self, tool: Tool) -> PathBuf {
match tool {
$(Tool::$name =>
self.ensure($name {
compiler: self.compiler(0, self.config.build),
target: self.config.build,
}),
)+
}
}
}
$(
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct $name {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for $name {
type Output = PathBuf;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path($path)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure($name {
// snapshot compiler
compiler: run.builder.compiler(0, run.builder.config.build),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
builder.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: $tool_name,
mode: if false $(|| $unstable)* {
// use in-tree libraries for unstable features
Mode::ToolStd
} else {
Mode::ToolBootstrap
},
path: $path,
is_optional_tool: false,
source_type: if false $(|| $external)* {
SourceType::Submodule
} else {
SourceType::InTree
},
extra_features: {
// FIXME(#60643): avoid this lint by using `_`
let mut _tmp = Vec::new();
$(_tmp.extend($features);)*
_tmp
},
}).expect("expected to build -- essential tool")
}
}
)+
}
}
bootstrap_tool!(
Rustbook, "src/tools/rustbook", "rustbook";
UnstableBookGen, "src/tools/unstable-book-gen", "unstable-book-gen";
Tidy, "src/tools/tidy", "tidy";
Linkchecker, "src/tools/linkchecker", "linkchecker";
CargoTest, "src/tools/cargotest", "cargotest";
Compiletest, "src/tools/compiletest", "compiletest", is_unstable_tool = true;
BuildManifest, "src/tools/build-manifest", "build-manifest";
RemoteTestClient, "src/tools/remote-test-client", "remote-test-client";
RustInstaller, "src/tools/rust-installer", "fabricate", is_external_tool = true;
RustdocTheme, "src/tools/rustdoc-themes", "rustdoc-themes";
ExpandYamlAnchors, "src/tools/expand-yaml-anchors", "expand-yaml-anchors";
LintDocs, "src/tools/lint-docs", "lint-docs";
JsonDocCk, "src/tools/jsondocck", "jsondocck";
);
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
pub struct ErrorIndex {
pub compiler: Compiler,
}
impl ErrorIndex {
pub fn command(builder: &Builder<'_>) -> Command {
// This uses stage-1 to match the behavior of building rustdoc.
// Error-index-generator links with the rustdoc library, so we want to
// use the same librustdoc to avoid building rustdoc twice (and to
// avoid building the compiler an extra time). This uses
// saturating_sub to deal with building with stage 0. (Using stage 0
// isn't recommended, since it will fail if any new error index tests
// use new syntax, but it should work otherwise.)
let compiler = builder.compiler(builder.top_stage.saturating_sub(1), builder.config.build);
let mut cmd = Command::new(builder.ensure(ErrorIndex { compiler }));
add_dylib_path(
vec![PathBuf::from(&builder.sysroot_libdir(compiler, compiler.host))],
&mut cmd,
);
cmd
}
}
impl Step for ErrorIndex {
type Output = PathBuf;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig<'_>) {
// Compile the error-index in the same stage as rustdoc to avoid
// recompiling rustdoc twice if we can.
//
// NOTE: This `make_run` isn't used in normal situations, only if you
// manually build the tool with `x.py build
// src/tools/error-index-generator` which almost nobody does.
// Normally, `x.py test` or `x.py doc` will use the
// `ErrorIndex::command` function instead.
let compiler =
run.builder.compiler(run.builder.top_stage.saturating_sub(1), run.builder.config.build);
run.builder.ensure(ErrorIndex { compiler });
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
builder
.ensure(ToolBuild {
compiler: self.compiler,
target: self.compiler.host,
tool: "error_index_generator",
mode: Mode::ToolRustc,
path: "src/tools/error_index_generator",
is_optional_tool: false,
source_type: SourceType::InTree,
extra_features: Vec::new(),
})
.expect("expected to build -- essential tool")
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RemoteTestServer {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RemoteTestServer {
type Output = PathBuf;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/remote-test-server")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RemoteTestServer {
compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
builder
.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: "remote-test-server",
mode: Mode::ToolStd,
path: "src/tools/remote-test-server",
is_optional_tool: false,
source_type: SourceType::InTree,
extra_features: Vec::new(),
})
.expect("expected to build -- essential tool")
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
pub struct Rustdoc {
/// This should only ever be 0 or 2.
/// We sometimes want to reference the "bootstrap" rustdoc, which is why this option is here.
pub compiler: Compiler,
}
impl Step for Rustdoc {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rustdoc").path("src/librustdoc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustdoc {
// Note: this is somewhat unique in that we actually want a *target*
// compiler here, because rustdoc *is* a compiler. We won't be using
// this as the compiler to build with, but rather this is "what
// compiler are we producing"?
compiler: run.builder.compiler(run.builder.top_stage, run.target),
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let target_compiler = self.compiler;
if target_compiler.stage == 0 {
if !target_compiler.is_snapshot(builder) {
panic!("rustdoc in stage 0 must be snapshot rustdoc");
}
return builder.initial_rustc.with_file_name(exe("rustdoc", target_compiler.host));
}
let target = target_compiler.host;
// Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise
// we'd have stageN/bin/rustc and stageN/bin/rustdoc be effectively different stage
// compilers, which isn't what we want. Rustdoc should be linked in the same way as the
// rustc compiler it's paired with, so it must be built with the previous stage compiler.
let build_compiler = builder.compiler(target_compiler.stage - 1, builder.config.build);
// When using `download-rustc` and a stage0 build_compiler, copying rustc doesn't actually
// build stage0 libstd (because the libstd in sysroot has the wrong ABI). Explicitly build
// it.
builder.ensure(compile::Std { compiler: build_compiler, target: target_compiler.host });
builder.ensure(compile::Rustc { compiler: build_compiler, target: target_compiler.host });
// NOTE: this implies that `download-rustc` is pretty useless when compiling with the stage0
// compiler, since you do just as much work.
if !builder.config.dry_run && builder.config.download_rustc && build_compiler.stage == 0 {
println!(
"warning: `download-rustc` does nothing when building stage1 tools; consider using `--stage 2` instead"
);
}
// The presence of `target_compiler` ensures that the necessary libraries (codegen backends,
// compiler libraries, ...) are built. Rustdoc does not require the presence of any
// libraries within sysroot_libdir (i.e., rustlib), though doctests may want it (since
// they'll be linked to those libraries). As such, don't explicitly `ensure` any additional
// libraries here. The intuition here is that If we've built a compiler, we should be able
// to build rustdoc.
//
let mut features = Vec::new();
if builder.config.jemalloc {
features.push("jemalloc".to_string());
}
let cargo = prepare_tool_cargo(
builder,
build_compiler,
Mode::ToolRustc,
target,
"build",
"src/tools/rustdoc",
SourceType::InTree,
features.as_slice(),
);
builder.info(&format!(
"Building rustdoc for stage{} ({})",
target_compiler.stage, target_compiler.host
));
builder.run(&mut cargo.into());
// Cargo adds a number of paths to the dylib search path on windows, which results in
// the wrong rustdoc being executed. To avoid the conflicting rustdocs, we name the "tool"
// rustdoc a different name.
let tool_rustdoc = builder
.cargo_out(build_compiler, Mode::ToolRustc, target)
.join(exe("rustdoc_tool_binary", target_compiler.host));
// don't create a stage0-sysroot/bin directory.
if target_compiler.stage > 0 {
let sysroot = builder.sysroot(target_compiler);
let bindir = sysroot.join("bin");
t!(fs::create_dir_all(&bindir));
let bin_rustdoc = bindir.join(exe("rustdoc", target_compiler.host));
let _ = fs::remove_file(&bin_rustdoc);
builder.copy(&tool_rustdoc, &bin_rustdoc);
bin_rustdoc
} else {
tool_rustdoc
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Cargo {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Cargo {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src/tools/cargo").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo {
compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let cargo_bin_path = builder
.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: "cargo",
mode: Mode::ToolRustc,
path: "src/tools/cargo",
is_optional_tool: false,
source_type: SourceType::Submodule,
extra_features: Vec::new(),
})
.expect("expected to build -- essential tool");
let build_cred = |name, path| {
// These credential helpers are currently experimental.
// Any build failures will be ignored.
let _ = builder.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: name,
mode: Mode::ToolRustc,
path,
is_optional_tool: true,
source_type: SourceType::Submodule,
extra_features: Vec::new(),
});
};
if self.target.contains("windows") {
build_cred(
"cargo-credential-wincred",
"src/tools/cargo/crates/credential/cargo-credential-wincred",
);
}
if self.target.contains("apple-darwin") {
build_cred(
"cargo-credential-macos-keychain",
"src/tools/cargo/crates/credential/cargo-credential-macos-keychain",
);
}
build_cred(
"cargo-credential-1password",
"src/tools/cargo/crates/credential/cargo-credential-1password",
);
cargo_bin_path
}
}
macro_rules! tool_extended {
(($sel:ident, $builder:ident),
$($name:ident,
$toolstate:ident,
$path:expr,
$tool_name:expr,
stable = $stable:expr,
$(in_tree = $in_tree:expr,)*
$extra_deps:block;)+) => {
$(
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub struct $name {
pub compiler: Compiler,
pub target: TargetSelection,
pub extra_features: Vec<String>,
}
impl Step for $name {
type Output = Option<PathBuf>;
const DEFAULT: bool = true; // Overwritten below
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path($path).default_condition(
builder.config.extended
&& builder.config.tools.as_ref().map_or(
// By default, on nightly/dev enable all tools, else only
// build stable tools.
$stable || builder.build.unstable_features(),
// If `tools` is set, search list for this tool.
|tools| {
tools.iter().any(|tool| match tool.as_ref() {
"clippy" => $tool_name == "clippy-driver",
x => $tool_name == x,
})
}),
)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build),
target: run.target,
extra_features: Vec::new(),
});
}
#[allow(unused_mut)]
fn run(mut $sel, $builder: &Builder<'_>) -> Option<PathBuf> {
$extra_deps
$builder.ensure(ToolBuild {
compiler: $sel.compiler,
target: $sel.target,
tool: $tool_name,
mode: Mode::ToolRustc,
path: $path,
extra_features: $sel.extra_features,
is_optional_tool: true,
source_type: if false $(|| $in_tree)* {
SourceType::InTree
} else {
SourceType::Submodule
},
})
}
}
)+
}
}
// Note: tools need to be also added to `Builder::get_step_descriptions` in `builder.rs`
// to make `./x.py build <tool>` work.
tool_extended!((self, builder),
Cargofmt, rustfmt, "src/tools/rustfmt", "cargo-fmt", stable=true, {};
CargoClippy, clippy, "src/tools/clippy", "cargo-clippy", stable=true, in_tree=true, {};
Clippy, clippy, "src/tools/clippy", "clippy-driver", stable=true, in_tree=true, {};
Miri, miri, "src/tools/miri", "miri", stable=false, {};
CargoMiri, miri, "src/tools/miri/cargo-miri", "cargo-miri", stable=false, {};
Rls, rls, "src/tools/rls", "rls", stable=true, {
builder.ensure(Clippy {
compiler: self.compiler,
target: self.target,
extra_features: Vec::new(),
});
self.extra_features.push("clippy".to_owned());
};
RustDemangler, rust_demangler, "src/tools/rust-demangler", "rust-demangler", stable=false, in_tree=true, {};
Rustfmt, rustfmt, "src/tools/rustfmt", "rustfmt", stable=true, {};
RustAnalyzer, rust_analyzer, "src/tools/rust-analyzer/crates/rust-analyzer", "rust-analyzer", stable=false, {};
);
impl<'a> Builder<'a> {
/// Gets a `Command` which is ready to run `tool` in `stage` built for
/// `host`.
pub fn tool_cmd(&self, tool: Tool) -> Command {
let mut cmd = Command::new(self.tool_exe(tool));
let compiler = self.compiler(0, self.config.build);
let host = &compiler.host;
// Prepares the `cmd` provided to be able to run the `compiler` provided.
//
// Notably this munges the dynamic library lookup path to point to the
// right location to run `compiler`.
let mut lib_paths: Vec<PathBuf> = vec![
self.build.rustc_snapshot_libdir(),
self.cargo_out(compiler, Mode::ToolBootstrap, *host).join("deps"),
];
// On MSVC a tool may invoke a C compiler (e.g., compiletest in run-make
// mode) and that C compiler may need some extra PATH modification. Do
// so here.
if compiler.host.contains("msvc") {
let curpaths = env::var_os("PATH").unwrap_or_default();
let curpaths = env::split_paths(&curpaths).collect::<Vec<_>>();
for &(ref k, ref v) in self.cc[&compiler.host].env() {
if k != "PATH" {
continue;
}
for path in env::split_paths(v) {
if !curpaths.contains(&path) {
lib_paths.push(path);
}
}
}
}
add_dylib_path(lib_paths, &mut cmd);
// Provide a RUSTC for this command to use.
cmd.env("RUSTC", &self.initial_rustc);
cmd
}
}
Allow running `x.py test src/test/linkchecker` with `download-llvm = true`
Previously, the LD_LIBRARY_PATH for the linkchecker looked like
`build/x86_64-unknown-linux-gnu/stage1/lib/rustlib/x86_64-unknown-linux-gnu/lib`, because the linkchecker depends on the master copy of the standard library. This is true, but doesn't include the library path for the compiler libraries:
```
/home/joshua/src/rust/rust/build/x86_64-unknown-linux-gnu/stage1-tools-bin/error_index_generator: error while loading shared libraries: libLLVM-12-rust-1.53.0-nightly.so: cannot open shared object file: No such file or directory
```
That file is in
`build/x86_64-unknown-linux-gnu/stage1/lib/libLLVM-12-rust-1.53.0-nightly.so`,
which wasn't included in the dynamic path. This adds `build/x86_64-unknown-linux-gnu/stage1/lib` to the dynamic path for the linkchecker.
use std::collections::HashSet;
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process::{exit, Command};
use build_helper::t;
use crate::builder::{Builder, Cargo as CargoCommand, RunConfig, ShouldRun, Step};
use crate::channel::GitInfo;
use crate::compile;
use crate::config::TargetSelection;
use crate::toolstate::ToolState;
use crate::util::{add_dylib_path, exe};
use crate::Compiler;
use crate::Mode;
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum SourceType {
InTree,
Submodule,
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
struct ToolBuild {
compiler: Compiler,
target: TargetSelection,
tool: &'static str,
path: &'static str,
mode: Mode,
is_optional_tool: bool,
source_type: SourceType,
extra_features: Vec<String>,
}
impl Step for ToolBuild {
type Output = Option<PathBuf>;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
/// Builds a tool in `src/tools`
///
/// This will build the specified tool with the specified `host` compiler in
/// `stage` into the normal cargo output directory.
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let compiler = self.compiler;
let target = self.target;
let mut tool = self.tool;
let path = self.path;
let is_optional_tool = self.is_optional_tool;
match self.mode {
Mode::ToolRustc => builder.ensure(compile::Rustc { compiler, target }),
Mode::ToolStd => builder.ensure(compile::Std { compiler, target }),
Mode::ToolBootstrap => {} // uses downloaded stage0 compiler libs
_ => panic!("unexpected Mode for tool build"),
}
let cargo = prepare_tool_cargo(
builder,
compiler,
self.mode,
target,
"build",
path,
self.source_type,
&self.extra_features,
);
builder.info(&format!("Building stage{} tool {} ({})", compiler.stage, tool, target));
let mut duplicates = Vec::new();
let is_expected = compile::stream_cargo(builder, cargo, vec![], &mut |msg| {
// Only care about big things like the RLS/Cargo for now
match tool {
"rls" | "cargo" | "clippy-driver" | "miri" | "rustfmt" => {}
_ => return,
}
let (id, features, filenames) = match msg {
compile::CargoMessage::CompilerArtifact {
package_id,
features,
filenames,
target: _,
} => (package_id, features, filenames),
_ => return,
};
let features = features.iter().map(|s| s.to_string()).collect::<Vec<_>>();
for path in filenames {
let val = (tool, PathBuf::from(&*path), features.clone());
// we're only interested in deduplicating rlibs for now
if val.1.extension().and_then(|s| s.to_str()) != Some("rlib") {
continue;
}
// Don't worry about compiles that turn out to be host
// dependencies or build scripts. To skip these we look for
// anything that goes in `.../release/deps` but *doesn't* go in
// `$target/release/deps`. This ensure that outputs in
// `$target/release` are still considered candidates for
// deduplication.
if let Some(parent) = val.1.parent() {
if parent.ends_with("release/deps") {
let maybe_target = parent
.parent()
.and_then(|p| p.parent())
.and_then(|p| p.file_name())
.and_then(|p| p.to_str())
.unwrap();
if maybe_target != &*target.triple {
continue;
}
}
}
// Record that we've built an artifact for `id`, and if one was
// already listed then we need to see if we reused the same
// artifact or produced a duplicate.
let mut artifacts = builder.tool_artifacts.borrow_mut();
let prev_artifacts = artifacts.entry(target).or_default();
let prev = match prev_artifacts.get(&*id) {
Some(prev) => prev,
None => {
prev_artifacts.insert(id.to_string(), val);
continue;
}
};
if prev.1 == val.1 {
return; // same path, same artifact
}
// If the paths are different and one of them *isn't* inside of
// `release/deps`, then it means it's probably in
// `$target/release`, or it's some final artifact like
// `libcargo.rlib`. In these situations Cargo probably just
// copied it up from `$target/release/deps/libcargo-xxxx.rlib`,
// so if the features are equal we can just skip it.
let prev_no_hash = prev.1.parent().unwrap().ends_with("release/deps");
let val_no_hash = val.1.parent().unwrap().ends_with("release/deps");
if prev.2 == val.2 || !prev_no_hash || !val_no_hash {
return;
}
// ... and otherwise this looks like we duplicated some sort of
// compilation, so record it to generate an error later.
duplicates.push((id.to_string(), val, prev.clone()));
}
});
if is_expected && !duplicates.is_empty() {
println!(
"duplicate artifacts found when compiling a tool, this \
typically means that something was recompiled because \
a transitive dependency has different features activated \
than in a previous build:\n"
);
println!(
"the following dependencies are duplicated although they \
have the same features enabled:"
);
let (same, different): (Vec<_>, Vec<_>) =
duplicates.into_iter().partition(|(_, cur, prev)| cur.2 == prev.2);
for (id, cur, prev) in same {
println!(" {}", id);
// same features
println!(" `{}` ({:?})\n `{}` ({:?})", cur.0, cur.1, prev.0, prev.1);
}
println!("the following dependencies have different features:");
for (id, cur, prev) in different {
println!(" {}", id);
let cur_features: HashSet<_> = cur.2.into_iter().collect();
let prev_features: HashSet<_> = prev.2.into_iter().collect();
println!(
" `{}` additionally enabled features {:?} at {:?}",
cur.0,
&cur_features - &prev_features,
cur.1
);
println!(
" `{}` additionally enabled features {:?} at {:?}",
prev.0,
&prev_features - &cur_features,
prev.1
);
}
println!();
println!(
"to fix this you will probably want to edit the local \
src/tools/rustc-workspace-hack/Cargo.toml crate, as \
that will update the dependency graph to ensure that \
these crates all share the same feature set"
);
panic!("tools should not compile multiple copies of the same crate");
}
builder.save_toolstate(
tool,
if is_expected { ToolState::TestFail } else { ToolState::BuildFail },
);
if !is_expected {
if !is_optional_tool {
exit(1);
} else {
None
}
} else {
// HACK(#82501): on Windows, the tools directory gets added to PATH when running tests, and
// compiletest confuses HTML tidy with the in-tree tidy. Name the in-tree tidy something
// different so the problem doesn't come up.
if tool == "tidy" {
tool = "rust-tidy";
}
let cargo_out =
builder.cargo_out(compiler, self.mode, target).join(exe(tool, compiler.host));
let bin = builder.tools_dir(compiler).join(exe(tool, compiler.host));
builder.copy(&cargo_out, &bin);
Some(bin)
}
}
}
pub fn prepare_tool_cargo(
builder: &Builder<'_>,
compiler: Compiler,
mode: Mode,
target: TargetSelection,
command: &'static str,
path: &'static str,
source_type: SourceType,
extra_features: &[String],
) -> CargoCommand {
let mut cargo = builder.cargo(compiler, mode, source_type, target, command);
let dir = builder.src.join(path);
cargo.arg("--manifest-path").arg(dir.join("Cargo.toml"));
let mut features = extra_features.to_vec();
if builder.build.config.cargo_native_static {
if path.ends_with("cargo")
|| path.ends_with("rls")
|| path.ends_with("clippy")
|| path.ends_with("miri")
|| path.ends_with("rustfmt")
{
cargo.env("LIBZ_SYS_STATIC", "1");
features.push("rustc-workspace-hack/all-static".to_string());
}
}
// if tools are using lzma we want to force the build script to build its
// own copy
cargo.env("LZMA_API_STATIC", "1");
// CFG_RELEASE is needed by rustfmt (and possibly other tools) which
// import rustc-ap-rustc_attr which requires this to be set for the
// `#[cfg(version(...))]` attribute.
cargo.env("CFG_RELEASE", builder.rust_release());
cargo.env("CFG_RELEASE_CHANNEL", &builder.config.channel);
cargo.env("CFG_VERSION", builder.rust_version());
cargo.env("CFG_RELEASE_NUM", &builder.version);
let info = GitInfo::new(builder.config.ignore_git, &dir);
if let Some(sha) = info.sha() {
cargo.env("CFG_COMMIT_HASH", sha);
}
if let Some(sha_short) = info.sha_short() {
cargo.env("CFG_SHORT_COMMIT_HASH", sha_short);
}
if let Some(date) = info.commit_date() {
cargo.env("CFG_COMMIT_DATE", date);
}
if !features.is_empty() {
cargo.arg("--features").arg(&features.join(", "));
}
cargo
}
macro_rules! bootstrap_tool {
($(
$name:ident, $path:expr, $tool_name:expr
$(,is_external_tool = $external:expr)*
$(,is_unstable_tool = $unstable:expr)*
$(,features = $features:expr)*
;
)+) => {
#[derive(Copy, PartialEq, Eq, Clone)]
pub enum Tool {
$(
$name,
)+
}
impl<'a> Builder<'a> {
pub fn tool_exe(&self, tool: Tool) -> PathBuf {
match tool {
$(Tool::$name =>
self.ensure($name {
compiler: self.compiler(0, self.config.build),
target: self.config.build,
}),
)+
}
}
}
$(
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct $name {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for $name {
type Output = PathBuf;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path($path)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure($name {
// snapshot compiler
compiler: run.builder.compiler(0, run.builder.config.build),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
builder.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: $tool_name,
mode: if false $(|| $unstable)* {
// use in-tree libraries for unstable features
Mode::ToolStd
} else {
Mode::ToolBootstrap
},
path: $path,
is_optional_tool: false,
source_type: if false $(|| $external)* {
SourceType::Submodule
} else {
SourceType::InTree
},
extra_features: {
// FIXME(#60643): avoid this lint by using `_`
let mut _tmp = Vec::new();
$(_tmp.extend($features);)*
_tmp
},
}).expect("expected to build -- essential tool")
}
}
)+
}
}
bootstrap_tool!(
Rustbook, "src/tools/rustbook", "rustbook";
UnstableBookGen, "src/tools/unstable-book-gen", "unstable-book-gen";
Tidy, "src/tools/tidy", "tidy";
Linkchecker, "src/tools/linkchecker", "linkchecker";
CargoTest, "src/tools/cargotest", "cargotest";
Compiletest, "src/tools/compiletest", "compiletest", is_unstable_tool = true;
BuildManifest, "src/tools/build-manifest", "build-manifest";
RemoteTestClient, "src/tools/remote-test-client", "remote-test-client";
RustInstaller, "src/tools/rust-installer", "fabricate", is_external_tool = true;
RustdocTheme, "src/tools/rustdoc-themes", "rustdoc-themes";
ExpandYamlAnchors, "src/tools/expand-yaml-anchors", "expand-yaml-anchors";
LintDocs, "src/tools/lint-docs", "lint-docs";
JsonDocCk, "src/tools/jsondocck", "jsondocck";
);
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
pub struct ErrorIndex {
pub compiler: Compiler,
}
impl ErrorIndex {
pub fn command(builder: &Builder<'_>) -> Command {
// This uses stage-1 to match the behavior of building rustdoc.
// Error-index-generator links with the rustdoc library, so we want to
// use the same librustdoc to avoid building rustdoc twice (and to
// avoid building the compiler an extra time). This uses
// saturating_sub to deal with building with stage 0. (Using stage 0
// isn't recommended, since it will fail if any new error index tests
// use new syntax, but it should work otherwise.)
let compiler = builder.compiler(builder.top_stage.saturating_sub(1), builder.config.build);
let mut cmd = Command::new(builder.ensure(ErrorIndex { compiler }));
add_dylib_path(
vec![
PathBuf::from(&builder.sysroot_libdir(compiler, compiler.host)),
PathBuf::from(builder.rustc_libdir(compiler)),
],
&mut cmd,
);
cmd
}
}
impl Step for ErrorIndex {
type Output = PathBuf;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/error_index_generator")
}
fn make_run(run: RunConfig<'_>) {
// Compile the error-index in the same stage as rustdoc to avoid
// recompiling rustdoc twice if we can.
//
// NOTE: This `make_run` isn't used in normal situations, only if you
// manually build the tool with `x.py build
// src/tools/error-index-generator` which almost nobody does.
// Normally, `x.py test` or `x.py doc` will use the
// `ErrorIndex::command` function instead.
let compiler =
run.builder.compiler(run.builder.top_stage.saturating_sub(1), run.builder.config.build);
run.builder.ensure(ErrorIndex { compiler });
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
builder
.ensure(ToolBuild {
compiler: self.compiler,
target: self.compiler.host,
tool: "error_index_generator",
mode: Mode::ToolRustc,
path: "src/tools/error_index_generator",
is_optional_tool: false,
source_type: SourceType::InTree,
extra_features: Vec::new(),
})
.expect("expected to build -- essential tool")
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct RemoteTestServer {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for RemoteTestServer {
type Output = PathBuf;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/remote-test-server")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(RemoteTestServer {
compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
builder
.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: "remote-test-server",
mode: Mode::ToolStd,
path: "src/tools/remote-test-server",
is_optional_tool: false,
source_type: SourceType::InTree,
extra_features: Vec::new(),
})
.expect("expected to build -- essential tool")
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
pub struct Rustdoc {
/// This should only ever be 0 or 2.
/// We sometimes want to reference the "bootstrap" rustdoc, which is why this option is here.
pub compiler: Compiler,
}
impl Step for Rustdoc {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/tools/rustdoc").path("src/librustdoc")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Rustdoc {
// Note: this is somewhat unique in that we actually want a *target*
// compiler here, because rustdoc *is* a compiler. We won't be using
// this as the compiler to build with, but rather this is "what
// compiler are we producing"?
compiler: run.builder.compiler(run.builder.top_stage, run.target),
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let target_compiler = self.compiler;
if target_compiler.stage == 0 {
if !target_compiler.is_snapshot(builder) {
panic!("rustdoc in stage 0 must be snapshot rustdoc");
}
return builder.initial_rustc.with_file_name(exe("rustdoc", target_compiler.host));
}
let target = target_compiler.host;
// Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise
// we'd have stageN/bin/rustc and stageN/bin/rustdoc be effectively different stage
// compilers, which isn't what we want. Rustdoc should be linked in the same way as the
// rustc compiler it's paired with, so it must be built with the previous stage compiler.
let build_compiler = builder.compiler(target_compiler.stage - 1, builder.config.build);
// When using `download-rustc` and a stage0 build_compiler, copying rustc doesn't actually
// build stage0 libstd (because the libstd in sysroot has the wrong ABI). Explicitly build
// it.
builder.ensure(compile::Std { compiler: build_compiler, target: target_compiler.host });
builder.ensure(compile::Rustc { compiler: build_compiler, target: target_compiler.host });
// NOTE: this implies that `download-rustc` is pretty useless when compiling with the stage0
// compiler, since you do just as much work.
if !builder.config.dry_run && builder.config.download_rustc && build_compiler.stage == 0 {
println!(
"warning: `download-rustc` does nothing when building stage1 tools; consider using `--stage 2` instead"
);
}
// The presence of `target_compiler` ensures that the necessary libraries (codegen backends,
// compiler libraries, ...) are built. Rustdoc does not require the presence of any
// libraries within sysroot_libdir (i.e., rustlib), though doctests may want it (since
// they'll be linked to those libraries). As such, don't explicitly `ensure` any additional
// libraries here. The intuition here is that If we've built a compiler, we should be able
// to build rustdoc.
//
let mut features = Vec::new();
if builder.config.jemalloc {
features.push("jemalloc".to_string());
}
let cargo = prepare_tool_cargo(
builder,
build_compiler,
Mode::ToolRustc,
target,
"build",
"src/tools/rustdoc",
SourceType::InTree,
features.as_slice(),
);
builder.info(&format!(
"Building rustdoc for stage{} ({})",
target_compiler.stage, target_compiler.host
));
builder.run(&mut cargo.into());
// Cargo adds a number of paths to the dylib search path on windows, which results in
// the wrong rustdoc being executed. To avoid the conflicting rustdocs, we name the "tool"
// rustdoc a different name.
let tool_rustdoc = builder
.cargo_out(build_compiler, Mode::ToolRustc, target)
.join(exe("rustdoc_tool_binary", target_compiler.host));
// don't create a stage0-sysroot/bin directory.
if target_compiler.stage > 0 {
let sysroot = builder.sysroot(target_compiler);
let bindir = sysroot.join("bin");
t!(fs::create_dir_all(&bindir));
let bin_rustdoc = bindir.join(exe("rustdoc", target_compiler.host));
let _ = fs::remove_file(&bin_rustdoc);
builder.copy(&tool_rustdoc, &bin_rustdoc);
bin_rustdoc
} else {
tool_rustdoc
}
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Cargo {
pub compiler: Compiler,
pub target: TargetSelection,
}
impl Step for Cargo {
type Output = PathBuf;
const DEFAULT: bool = true;
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path("src/tools/cargo").default_condition(builder.config.extended)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Cargo {
compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build),
target: run.target,
});
}
fn run(self, builder: &Builder<'_>) -> PathBuf {
let cargo_bin_path = builder
.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: "cargo",
mode: Mode::ToolRustc,
path: "src/tools/cargo",
is_optional_tool: false,
source_type: SourceType::Submodule,
extra_features: Vec::new(),
})
.expect("expected to build -- essential tool");
let build_cred = |name, path| {
// These credential helpers are currently experimental.
// Any build failures will be ignored.
let _ = builder.ensure(ToolBuild {
compiler: self.compiler,
target: self.target,
tool: name,
mode: Mode::ToolRustc,
path,
is_optional_tool: true,
source_type: SourceType::Submodule,
extra_features: Vec::new(),
});
};
if self.target.contains("windows") {
build_cred(
"cargo-credential-wincred",
"src/tools/cargo/crates/credential/cargo-credential-wincred",
);
}
if self.target.contains("apple-darwin") {
build_cred(
"cargo-credential-macos-keychain",
"src/tools/cargo/crates/credential/cargo-credential-macos-keychain",
);
}
build_cred(
"cargo-credential-1password",
"src/tools/cargo/crates/credential/cargo-credential-1password",
);
cargo_bin_path
}
}
macro_rules! tool_extended {
(($sel:ident, $builder:ident),
$($name:ident,
$toolstate:ident,
$path:expr,
$tool_name:expr,
stable = $stable:expr,
$(in_tree = $in_tree:expr,)*
$extra_deps:block;)+) => {
$(
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub struct $name {
pub compiler: Compiler,
pub target: TargetSelection,
pub extra_features: Vec<String>,
}
impl Step for $name {
type Output = Option<PathBuf>;
const DEFAULT: bool = true; // Overwritten below
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let builder = run.builder;
run.path($path).default_condition(
builder.config.extended
&& builder.config.tools.as_ref().map_or(
// By default, on nightly/dev enable all tools, else only
// build stable tools.
$stable || builder.build.unstable_features(),
// If `tools` is set, search list for this tool.
|tools| {
tools.iter().any(|tool| match tool.as_ref() {
"clippy" => $tool_name == "clippy-driver",
x => $tool_name == x,
})
}),
)
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure($name {
compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build),
target: run.target,
extra_features: Vec::new(),
});
}
#[allow(unused_mut)]
fn run(mut $sel, $builder: &Builder<'_>) -> Option<PathBuf> {
$extra_deps
$builder.ensure(ToolBuild {
compiler: $sel.compiler,
target: $sel.target,
tool: $tool_name,
mode: Mode::ToolRustc,
path: $path,
extra_features: $sel.extra_features,
is_optional_tool: true,
source_type: if false $(|| $in_tree)* {
SourceType::InTree
} else {
SourceType::Submodule
},
})
}
}
)+
}
}
// Note: tools need to be also added to `Builder::get_step_descriptions` in `builder.rs`
// to make `./x.py build <tool>` work.
tool_extended!((self, builder),
Cargofmt, rustfmt, "src/tools/rustfmt", "cargo-fmt", stable=true, {};
CargoClippy, clippy, "src/tools/clippy", "cargo-clippy", stable=true, in_tree=true, {};
Clippy, clippy, "src/tools/clippy", "clippy-driver", stable=true, in_tree=true, {};
Miri, miri, "src/tools/miri", "miri", stable=false, {};
CargoMiri, miri, "src/tools/miri/cargo-miri", "cargo-miri", stable=false, {};
Rls, rls, "src/tools/rls", "rls", stable=true, {
builder.ensure(Clippy {
compiler: self.compiler,
target: self.target,
extra_features: Vec::new(),
});
self.extra_features.push("clippy".to_owned());
};
RustDemangler, rust_demangler, "src/tools/rust-demangler", "rust-demangler", stable=false, in_tree=true, {};
Rustfmt, rustfmt, "src/tools/rustfmt", "rustfmt", stable=true, {};
RustAnalyzer, rust_analyzer, "src/tools/rust-analyzer/crates/rust-analyzer", "rust-analyzer", stable=false, {};
);
impl<'a> Builder<'a> {
/// Gets a `Command` which is ready to run `tool` in `stage` built for
/// `host`.
pub fn tool_cmd(&self, tool: Tool) -> Command {
let mut cmd = Command::new(self.tool_exe(tool));
let compiler = self.compiler(0, self.config.build);
let host = &compiler.host;
// Prepares the `cmd` provided to be able to run the `compiler` provided.
//
// Notably this munges the dynamic library lookup path to point to the
// right location to run `compiler`.
let mut lib_paths: Vec<PathBuf> = vec![
self.build.rustc_snapshot_libdir(),
self.cargo_out(compiler, Mode::ToolBootstrap, *host).join("deps"),
];
// On MSVC a tool may invoke a C compiler (e.g., compiletest in run-make
// mode) and that C compiler may need some extra PATH modification. Do
// so here.
if compiler.host.contains("msvc") {
let curpaths = env::var_os("PATH").unwrap_or_default();
let curpaths = env::split_paths(&curpaths).collect::<Vec<_>>();
for &(ref k, ref v) in self.cc[&compiler.host].env() {
if k != "PATH" {
continue;
}
for path in env::split_paths(v) {
if !curpaths.contains(&path) {
lib_paths.push(path);
}
}
}
}
add_dylib_path(lib_paths, &mut cmd);
// Provide a RUSTC for this command to use.
cmd.env("RUSTC", &self.initial_rustc);
cmd
}
}
|
//! This module provides functions to deconstruct and reconstruct patterns into a constructor
//! applied to some fields. This is used by the `_match` module to compute pattern
//! usefulness/exhaustiveness.
use self::Constructor::*;
use self::SliceKind::*;
use super::compare_const_vals;
use super::usefulness::{MatchCheckCtxt, PatCtxt};
use super::{FieldPat, Pat, PatKind, PatRange};
use rustc_data_structures::captures::Captures;
use rustc_index::vec::Idx;
use rustc_attr::{SignedInt, UnsignedInt};
use rustc_hir::def_id::DefId;
use rustc_hir::{HirId, RangeEnd};
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::Field;
use rustc_middle::ty::layout::IntegerExt;
use rustc_middle::ty::{self, Const, Ty, TyCtxt};
use rustc_session::lint;
use rustc_span::{Span, DUMMY_SP};
use rustc_target::abi::{Integer, Size, VariantIdx};
use smallvec::{smallvec, SmallVec};
use std::cmp::{self, max, min, Ordering};
use std::iter::IntoIterator;
use std::ops::RangeInclusive;
/// An inclusive interval, used for precise integer exhaustiveness checking.
/// `IntRange`s always store a contiguous range. This means that values are
/// encoded such that `0` encodes the minimum value for the integer,
/// regardless of the signedness.
/// For example, the pattern `-128..=127i8` is encoded as `0..=255`.
/// This makes comparisons and arithmetic on interval endpoints much more
/// straightforward. See `signed_bias` for details.
///
/// `IntRange` is never used to encode an empty range or a "range" that wraps
/// around the (offset) space: i.e., `range.lo <= range.hi`.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(super) struct IntRange {
range: RangeInclusive<u128>,
}
impl IntRange {
#[inline]
fn is_integral(ty: Ty<'_>) -> bool {
matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_) | ty::Bool)
}
fn is_singleton(&self) -> bool {
self.range.start() == self.range.end()
}
fn boundaries(&self) -> (u128, u128) {
(*self.range.start(), *self.range.end())
}
#[inline]
fn integral_size_and_signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Size, u128)> {
match *ty.kind() {
ty::Bool => Some((Size::from_bytes(1), 0)),
ty::Char => Some((Size::from_bytes(4), 0)),
ty::Int(ity) => {
let size = Integer::from_attr(&tcx, SignedInt(ity)).size();
Some((size, 1u128 << (size.bits() as u128 - 1)))
}
ty::Uint(uty) => Some((Integer::from_attr(&tcx, UnsignedInt(uty)).size(), 0)),
_ => None,
}
}
#[inline]
fn from_const<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: &Const<'tcx>,
) -> Option<IntRange> {
if let Some((target_size, bias)) = Self::integral_size_and_signed_bias(tcx, value.ty) {
let ty = value.ty;
let val = (|| {
if let ty::ConstKind::Value(ConstValue::Scalar(scalar)) = value.val {
// For this specific pattern we can skip a lot of effort and go
// straight to the result, after doing a bit of checking. (We
// could remove this branch and just fall through, which
// is more general but much slower.)
if let Ok(bits) = scalar.to_bits_or_ptr(target_size, &tcx) {
return Some(bits);
}
}
// This is a more general form of the previous case.
value.try_eval_bits(tcx, param_env, ty)
})()?;
let val = val ^ bias;
Some(IntRange { range: val..=val })
} else {
None
}
}
#[inline]
fn from_range<'tcx>(
tcx: TyCtxt<'tcx>,
lo: u128,
hi: u128,
ty: Ty<'tcx>,
end: &RangeEnd,
) -> Option<IntRange> {
if Self::is_integral(ty) {
// Perform a shift if the underlying types are signed,
// which makes the interval arithmetic simpler.
let bias = IntRange::signed_bias(tcx, ty);
let (lo, hi) = (lo ^ bias, hi ^ bias);
let offset = (*end == RangeEnd::Excluded) as u128;
if lo > hi || (lo == hi && *end == RangeEnd::Excluded) {
// This should have been caught earlier by E0030.
bug!("malformed range pattern: {}..={}", lo, (hi - offset));
}
Some(IntRange { range: lo..=(hi - offset) })
} else {
None
}
}
// The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
fn signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> u128 {
match *ty.kind() {
ty::Int(ity) => {
let bits = Integer::from_attr(&tcx, SignedInt(ity)).size().bits() as u128;
1u128 << (bits - 1)
}
_ => 0,
}
}
fn is_subrange(&self, other: &Self) -> bool {
other.range.start() <= self.range.start() && self.range.end() <= other.range.end()
}
fn intersection(&self, other: &Self) -> Option<Self> {
let (lo, hi) = self.boundaries();
let (other_lo, other_hi) = other.boundaries();
if lo <= other_hi && other_lo <= hi {
Some(IntRange { range: max(lo, other_lo)..=min(hi, other_hi) })
} else {
None
}
}
fn suspicious_intersection(&self, other: &Self) -> bool {
// `false` in the following cases:
// 1 ---- // 1 ---------- // 1 ---- // 1 ----
// 2 ---------- // 2 ---- // 2 ---- // 2 ----
//
// The following are currently `false`, but could be `true` in the future (#64007):
// 1 --------- // 1 ---------
// 2 ---------- // 2 ----------
//
// `true` in the following cases:
// 1 ------- // 1 -------
// 2 -------- // 2 -------
let (lo, hi) = self.boundaries();
let (other_lo, other_hi) = other.boundaries();
lo == other_hi || hi == other_lo
}
fn to_pat<'tcx>(&self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Pat<'tcx> {
let (lo, hi) = self.boundaries();
let bias = IntRange::signed_bias(tcx, ty);
let (lo, hi) = (lo ^ bias, hi ^ bias);
let env = ty::ParamEnv::empty().and(ty);
let lo_const = ty::Const::from_bits(tcx, lo, env);
let hi_const = ty::Const::from_bits(tcx, hi, env);
let kind = if lo == hi {
PatKind::Constant { value: lo_const }
} else {
PatKind::Range(PatRange { lo: lo_const, hi: hi_const, end: RangeEnd::Included })
};
Pat { ty, span: DUMMY_SP, kind: Box::new(kind) }
}
/// For exhaustive integer matching, some constructors are grouped within other constructors
/// (namely integer typed values are grouped within ranges). However, when specialising these
/// constructors, we want to be specialising for the underlying constructors (the integers), not
/// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
/// mean creating a separate constructor for every single value in the range, which is clearly
/// impractical. However, observe that for some ranges of integers, the specialisation will be
/// identical across all values in that range (i.e., there are equivalence classes of ranges of
/// constructors based on their `U(S(c, P), S(c, p))` outcome). These classes are grouped by
/// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
/// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
/// change.
/// Our solution, therefore, is to split the range constructor into subranges at every single point
/// the group of intersecting patterns changes (using the method described below).
/// And voilà! We're testing precisely those ranges that we need to, without any exhaustive matching
/// on actual integers. The nice thing about this is that the number of subranges is linear in the
/// number of rows in the matrix (i.e., the number of cases in the `match` statement), so we don't
/// need to be worried about matching over gargantuan ranges.
///
/// Essentially, given the first column of a matrix representing ranges, looking like the following:
///
/// |------| |----------| |-------| ||
/// |-------| |-------| |----| ||
/// |---------|
///
/// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
///
/// |--|--|||-||||--||---|||-------| |-|||| ||
///
/// The logic for determining how to split the ranges is fairly straightforward: we calculate
/// boundaries for each interval range, sort them, then create constructors for each new interval
/// between every pair of boundary points. (This essentially sums up to performing the intuitive
/// merging operation depicted above.)
fn split<'p, 'tcx>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
hir_id: Option<HirId>,
) -> SmallVec<[Constructor<'tcx>; 1]> {
/// Represents a border between 2 integers. Because the intervals spanning borders
/// must be able to cover every integer, we need to be able to represent
/// 2^128 + 1 such borders.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
enum Border {
JustBefore(u128),
AfterMax,
}
// A function for extracting the borders of an integer interval.
fn range_borders(r: IntRange) -> impl Iterator<Item = Border> {
let (lo, hi) = r.range.into_inner();
let from = Border::JustBefore(lo);
let to = match hi.checked_add(1) {
Some(m) => Border::JustBefore(m),
None => Border::AfterMax,
};
vec![from, to].into_iter()
}
// Collect the span and range of all the intersecting ranges to lint on likely
// incorrect range patterns. (#63987)
let mut overlaps = vec![];
let row_len = pcx.matrix.column_count().unwrap_or(0);
// `borders` is the set of borders between equivalence classes: each equivalence
// class lies between 2 borders.
let row_borders = pcx
.matrix
.head_ctors_and_spans(pcx.cx)
.filter_map(|(ctor, span)| Some((ctor.as_int_range()?, span)))
.filter_map(|(range, span)| {
let intersection = self.intersection(&range);
let should_lint = self.suspicious_intersection(&range);
if let (Some(range), 1, true) = (&intersection, row_len, should_lint) {
// FIXME: for now, only check for overlapping ranges on simple range
// patterns. Otherwise with the current logic the following is detected
// as overlapping:
// ```
// match (0u8, true) {
// (0 ..= 125, false) => {}
// (125 ..= 255, true) => {}
// _ => {}
// }
// ```
overlaps.push((range.clone(), span));
}
intersection
})
.flat_map(range_borders);
let self_borders = range_borders(self.clone());
let mut borders: Vec<_> = row_borders.chain(self_borders).collect();
borders.sort_unstable();
self.lint_overlapping_patterns(pcx, hir_id, overlaps);
// We're going to iterate through every adjacent pair of borders, making sure that
// each represents an interval of nonnegative length, and convert each such
// interval into a constructor.
borders
.array_windows()
.filter_map(|&pair| match pair {
[Border::JustBefore(n), Border::JustBefore(m)] => {
if n < m {
Some(n..=(m - 1))
} else {
None
}
}
[Border::JustBefore(n), Border::AfterMax] => Some(n..=u128::MAX),
[Border::AfterMax, _] => None,
})
.map(|range| IntRange { range })
.map(IntRange)
.collect()
}
fn lint_overlapping_patterns(
&self,
pcx: PatCtxt<'_, '_, '_>,
hir_id: Option<HirId>,
overlaps: Vec<(IntRange, Span)>,
) {
if let (true, Some(hir_id)) = (!overlaps.is_empty(), hir_id) {
pcx.cx.tcx.struct_span_lint_hir(
lint::builtin::OVERLAPPING_PATTERNS,
hir_id,
pcx.span,
|lint| {
let mut err = lint.build("multiple patterns covering the same range");
err.span_label(pcx.span, "overlapping patterns");
for (int_range, span) in overlaps {
// Use the real type for user display of the ranges:
err.span_label(
span,
&format!(
"this range overlaps on `{}`",
int_range.to_pat(pcx.cx.tcx, pcx.ty),
),
);
}
err.emit();
},
);
}
}
/// See `Constructor::is_covered_by`
fn is_covered_by(&self, other: &Self) -> bool {
if self.intersection(other).is_some() {
// Constructor splitting should ensure that all intersections we encounter are actually
// inclusions.
assert!(self.is_subrange(other));
true
} else {
false
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum SliceKind {
/// Patterns of length `n` (`[x, y]`).
FixedLen(u64),
/// Patterns using the `..` notation (`[x, .., y]`).
/// Captures any array constructor of `length >= i + j`.
/// In the case where `array_len` is `Some(_)`,
/// this indicates that we only care about the first `i` and the last `j` values of the array,
/// and everything in between is a wildcard `_`.
VarLen(u64, u64),
}
impl SliceKind {
fn arity(self) -> u64 {
match self {
FixedLen(length) => length,
VarLen(prefix, suffix) => prefix + suffix,
}
}
/// Whether this pattern includes patterns of length `other_len`.
fn covers_length(self, other_len: u64) -> bool {
match self {
FixedLen(len) => len == other_len,
VarLen(prefix, suffix) => prefix + suffix <= other_len,
}
}
}
/// A constructor for array and slice patterns.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) struct Slice {
/// `None` if the matched value is a slice, `Some(n)` if it is an array of size `n`.
array_len: Option<u64>,
/// The kind of pattern it is: fixed-length `[x, y]` or variable length `[x, .., y]`.
kind: SliceKind,
}
impl Slice {
fn new(array_len: Option<u64>, kind: SliceKind) -> Self {
let kind = match (array_len, kind) {
// If the middle `..` is empty, we effectively have a fixed-length pattern.
(Some(len), VarLen(prefix, suffix)) if prefix + suffix >= len => FixedLen(len),
_ => kind,
};
Slice { array_len, kind }
}
fn arity(self) -> u64 {
self.kind.arity()
}
/// The exhaustiveness-checking paper does not include any details on
/// checking variable-length slice patterns. However, they may be
/// matched by an infinite collection of fixed-length array patterns.
///
/// Checking the infinite set directly would take an infinite amount
/// of time. However, it turns out that for each finite set of
/// patterns `P`, all sufficiently large array lengths are equivalent:
///
/// Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies
/// to exactly the subset `Pₜ` of `P` can be transformed to a slice
/// `sₘ` for each sufficiently-large length `m` that applies to exactly
/// the same subset of `P`.
///
/// Because of that, each witness for reachability-checking of one
/// of the sufficiently-large lengths can be transformed to an
/// equally-valid witness of any other length, so we only have
/// to check slices of the "minimal sufficiently-large length"
/// and less.
///
/// Note that the fact that there is a *single* `sₘ` for each `m`
/// not depending on the specific pattern in `P` is important: if
/// you look at the pair of patterns
/// `[true, ..]`
/// `[.., false]`
/// Then any slice of length ≥1 that matches one of these two
/// patterns can be trivially turned to a slice of any
/// other length ≥1 that matches them and vice-versa,
/// but the slice of length 2 `[false, true]` that matches neither
/// of these patterns can't be turned to a slice from length 1 that
/// matches neither of these patterns, so we have to consider
/// slices from length 2 there.
///
/// Now, to see that that length exists and find it, observe that slice
/// patterns are either "fixed-length" patterns (`[_, _, _]`) or
/// "variable-length" patterns (`[_, .., _]`).
///
/// For fixed-length patterns, all slices with lengths *longer* than
/// the pattern's length have the same outcome (of not matching), so
/// as long as `L` is greater than the pattern's length we can pick
/// any `sₘ` from that length and get the same result.
///
/// For variable-length patterns, the situation is more complicated,
/// because as seen above the precise value of `sₘ` matters.
///
/// However, for each variable-length pattern `p` with a prefix of length
/// `plₚ` and suffix of length `slₚ`, only the first `plₚ` and the last
/// `slₚ` elements are examined.
///
/// Therefore, as long as `L` is positive (to avoid concerns about empty
/// types), all elements after the maximum prefix length and before
/// the maximum suffix length are not examined by any variable-length
/// pattern, and therefore can be added/removed without affecting
/// them - creating equivalent patterns from any sufficiently-large
/// length.
///
/// Of course, if fixed-length patterns exist, we must be sure
/// that our length is large enough to miss them all, so
/// we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
///
/// for example, with the above pair of patterns, all elements
/// but the first and last can be added/removed, so any
/// witness of length ≥2 (say, `[false, false, true]`) can be
/// turned to a witness from any other length ≥2.
fn split<'p, 'tcx>(self, pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Constructor<'tcx>; 1]> {
let (self_prefix, self_suffix) = match self.kind {
VarLen(self_prefix, self_suffix) => (self_prefix, self_suffix),
_ => return smallvec![Slice(self)],
};
let head_ctors = pcx.matrix.head_ctors(pcx.cx).filter(|c| !c.is_wildcard());
let mut max_prefix_len = self_prefix;
let mut max_suffix_len = self_suffix;
let mut max_fixed_len = 0;
for ctor in head_ctors {
if let Slice(slice) = ctor {
match slice.kind {
FixedLen(len) => {
max_fixed_len = cmp::max(max_fixed_len, len);
}
VarLen(prefix, suffix) => {
max_prefix_len = cmp::max(max_prefix_len, prefix);
max_suffix_len = cmp::max(max_suffix_len, suffix);
}
}
} else {
bug!("unexpected ctor for slice type: {:?}", ctor);
}
}
// For diagnostics, we keep the prefix and suffix lengths separate, so in the case
// where `max_fixed_len + 1` is the largest, we adapt `max_prefix_len` accordingly,
// so that `L = max_prefix_len + max_suffix_len`.
if max_fixed_len + 1 >= max_prefix_len + max_suffix_len {
// The subtraction can't overflow thanks to the above check.
// The new `max_prefix_len` is also guaranteed to be larger than its previous
// value.
max_prefix_len = max_fixed_len + 1 - max_suffix_len;
}
let final_slice = VarLen(max_prefix_len, max_suffix_len);
let final_slice = Slice::new(self.array_len, final_slice);
match self.array_len {
Some(_) => smallvec![Slice(final_slice)],
None => {
// `self` originally covered the range `(self.arity()..infinity)`. We split that
// range into two: lengths smaller than `final_slice.arity()` are treated
// independently as fixed-lengths slices, and lengths above are captured by
// `final_slice`.
let smaller_lengths = (self.arity()..final_slice.arity()).map(FixedLen);
smaller_lengths
.map(|kind| Slice::new(self.array_len, kind))
.chain(Some(final_slice))
.map(Slice)
.collect()
}
}
}
/// See `Constructor::is_covered_by`
fn is_covered_by(self, other: Self) -> bool {
other.kind.covers_length(self.arity())
}
}
/// A value can be decomposed into a constructor applied to some fields. This struct represents
/// the constructor. See also `Fields`.
///
/// `pat_constructor` retrieves the constructor corresponding to a pattern.
/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a
/// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and
/// `Fields`.
#[derive(Clone, Debug, PartialEq)]
pub(super) enum Constructor<'tcx> {
/// The constructor for patterns that have a single constructor, like tuples, struct patterns
/// and fixed-length arrays.
Single,
/// Enum variants.
Variant(DefId),
/// Ranges of integer literal values (`2`, `2..=5` or `2..5`).
IntRange(IntRange),
/// Ranges of floating-point literal values (`2.0..=5.2`).
FloatRange(&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>, RangeEnd),
/// String literals. Strings are not quite the same as `&[u8]` so we treat them separately.
Str(&'tcx ty::Const<'tcx>),
/// Array and slice patterns.
Slice(Slice),
/// Constants that must not be matched structurally. They are treated as black
/// boxes for the purposes of exhaustiveness: we must not inspect them, and they
/// don't count towards making a match exhaustive.
Opaque,
/// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used
/// for those types for which we cannot list constructors explicitly, like `f64` and `str`.
NonExhaustive,
/// Wildcard pattern.
Wildcard,
}
impl<'tcx> Constructor<'tcx> {
pub(super) fn is_wildcard(&self) -> bool {
matches!(self, Wildcard)
}
fn as_int_range(&self) -> Option<&IntRange> {
match self {
IntRange(range) => Some(range),
_ => None,
}
}
fn as_slice(&self) -> Option<Slice> {
match self {
Slice(slice) => Some(*slice),
_ => None,
}
}
fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx {
match *self {
Variant(id) => adt.variant_index_with_id(id),
Single => {
assert!(!adt.is_enum());
VariantIdx::new(0)
}
_ => bug!("bad constructor {:?} for adt {:?}", self, adt),
}
}
/// Determines the constructor that the given pattern can be specialized to.
pub(super) fn from_pat<'p>(cx: &MatchCheckCtxt<'p, 'tcx>, pat: &'p Pat<'tcx>) -> Self {
match pat.kind.as_ref() {
PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
PatKind::Binding { .. } | PatKind::Wild => Wildcard,
PatKind::Leaf { .. } | PatKind::Deref { .. } => Single,
&PatKind::Variant { adt_def, variant_index, .. } => {
Variant(adt_def.variants[variant_index].def_id)
}
PatKind::Constant { value } => {
if let Some(int_range) = IntRange::from_const(cx.tcx, cx.param_env, value) {
IntRange(int_range)
} else {
match pat.ty.kind() {
ty::Float(_) => FloatRange(value, value, RangeEnd::Included),
// In `expand_pattern`, we convert string literals to `&CONST` patterns with
// `CONST` a pattern of type `str`. In truth this contains a constant of type
// `&str`.
ty::Str => Str(value),
// All constants that can be structurally matched have already been expanded
// into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
// opaque.
_ => Opaque,
}
}
}
&PatKind::Range(PatRange { lo, hi, end }) => {
let ty = lo.ty;
if let Some(int_range) = IntRange::from_range(
cx.tcx,
lo.eval_bits(cx.tcx, cx.param_env, lo.ty),
hi.eval_bits(cx.tcx, cx.param_env, hi.ty),
ty,
&end,
) {
IntRange(int_range)
} else {
FloatRange(lo, hi, end)
}
}
PatKind::Array { prefix, slice, suffix } | PatKind::Slice { prefix, slice, suffix } => {
let array_len = match pat.ty.kind() {
ty::Array(_, length) => Some(length.eval_usize(cx.tcx, cx.param_env)),
ty::Slice(_) => None,
_ => span_bug!(pat.span, "bad ty {:?} for slice pattern", pat.ty),
};
let prefix = prefix.len() as u64;
let suffix = suffix.len() as u64;
let kind = if slice.is_some() {
VarLen(prefix, suffix)
} else {
FixedLen(prefix + suffix)
};
Slice(Slice::new(array_len, kind))
}
PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
}
}
/// Some constructors (namely `Wildcard`, `IntRange` and `Slice`) actually stand for a set of actual
/// constructors (like variants, integers or fixed-sized slices). When specializing for these
/// constructors, we want to be specialising for the actual underlying constructors.
/// Naively, we would simply return the list of constructors they correspond to. We instead are
/// more clever: if there are constructors that we know will behave the same wrt the current
/// matrix, we keep them grouped. For example, all slices of a sufficiently large length
/// will either be all useful or all non-useful with a given matrix.
///
/// See the branches for details on how the splitting is done.
///
/// This function may discard some irrelevant constructors if this preserves behavior and
/// diagnostics. Eg. for the `_` case, we ignore the constructors already present in the
/// matrix, unless all of them are.
///
/// `hir_id` is `None` when we're evaluating the wildcard pattern. In that case we do not want
/// to lint for overlapping ranges.
pub(super) fn split<'p>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
hir_id: Option<HirId>,
) -> SmallVec<[Self; 1]> {
debug!("Constructor::split({:#?}, {:#?})", self, pcx.matrix);
match self {
Wildcard => Constructor::split_wildcard(pcx),
// Fast-track if the range is trivial. In particular, we don't do the overlapping
// ranges check.
IntRange(ctor_range) if !ctor_range.is_singleton() => ctor_range.split(pcx, hir_id),
Slice(slice @ Slice { kind: VarLen(..), .. }) => slice.split(pcx),
// Any other constructor can be used unchanged.
_ => smallvec![self.clone()],
}
}
/// For wildcards, there are two groups of constructors: there are the constructors actually
/// present in the matrix (`head_ctors`), and the constructors not present (`missing_ctors`).
/// Two constructors that are not in the matrix will either both be caught (by a wildcard), or
/// both not be caught. Therefore we can keep the missing constructors grouped together.
fn split_wildcard<'p>(pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Self; 1]> {
// Missing constructors are those that are not matched by any non-wildcard patterns in the
// current column. We only fully construct them on-demand, because they're rarely used and
// can be big.
let missing_ctors = MissingConstructors::new(pcx);
if missing_ctors.is_empty(pcx) {
// All the constructors are present in the matrix, so we just go through them all.
// We must also split them first.
missing_ctors.all_ctors
} else {
// Some constructors are missing, thus we can specialize with the wildcard constructor,
// which will stand for those constructors that are missing, and behaves like any of
// them.
smallvec![Wildcard]
}
}
/// Returns whether `self` is covered by `other`, i.e. whether `self` is a subset of `other`.
/// For the simple cases, this is simply checking for equality. For the "grouped" constructors,
/// this checks for inclusion.
pub(super) fn is_covered_by<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool {
// This must be kept in sync with `is_covered_by_any`.
match (self, other) {
// Wildcards cover anything
(_, Wildcard) => true,
// Wildcards are only covered by wildcards
(Wildcard, _) => false,
(Single, Single) => true,
(Variant(self_id), Variant(other_id)) => self_id == other_id,
(IntRange(self_range), IntRange(other_range)) => self_range.is_covered_by(other_range),
(
FloatRange(self_from, self_to, self_end),
FloatRange(other_from, other_to, other_end),
) => {
match (
compare_const_vals(pcx.cx.tcx, self_to, other_to, pcx.cx.param_env, pcx.ty),
compare_const_vals(pcx.cx.tcx, self_from, other_from, pcx.cx.param_env, pcx.ty),
) {
(Some(to), Some(from)) => {
(from == Ordering::Greater || from == Ordering::Equal)
&& (to == Ordering::Less
|| (other_end == self_end && to == Ordering::Equal))
}
_ => false,
}
}
(Str(self_val), Str(other_val)) => {
// FIXME: there's probably a more direct way of comparing for equality
match compare_const_vals(pcx.cx.tcx, self_val, other_val, pcx.cx.param_env, pcx.ty)
{
Some(comparison) => comparison == Ordering::Equal,
None => false,
}
}
(Slice(self_slice), Slice(other_slice)) => self_slice.is_covered_by(*other_slice),
// We are trying to inspect an opaque constant. Thus we skip the row.
(Opaque, _) | (_, Opaque) => false,
// Only a wildcard pattern can match the special extra constructor.
(NonExhaustive, _) => false,
_ => span_bug!(
pcx.span,
"trying to compare incompatible constructors {:?} and {:?}",
self,
other
),
}
}
/// Faster version of `is_covered_by` when applied to many constructors. `used_ctors` is
/// assumed to be built from `matrix.head_ctors()` with wildcards filtered out, and `self` is
/// assumed to have been split from a wildcard.
fn is_covered_by_any<'p>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
used_ctors: &[Constructor<'tcx>],
) -> bool {
if used_ctors.is_empty() {
return false;
}
// This must be kept in sync with `is_covered_by`.
match self {
// If `self` is `Single`, `used_ctors` cannot contain anything else than `Single`s.
Single => !used_ctors.is_empty(),
Variant(_) => used_ctors.iter().any(|c| c == self),
IntRange(range) => used_ctors
.iter()
.filter_map(|c| c.as_int_range())
.any(|other| range.is_covered_by(other)),
Slice(slice) => used_ctors
.iter()
.filter_map(|c| c.as_slice())
.any(|other| slice.is_covered_by(other)),
// This constructor is never covered by anything else
NonExhaustive => false,
Str(..) | FloatRange(..) | Opaque | Wildcard => {
span_bug!(pcx.span, "found unexpected ctor in all_ctors: {:?}", self)
}
}
}
}
/// This determines the set of all possible constructors of a pattern matching
/// values of type `left_ty`. For vectors, this would normally be an infinite set
/// but is instead bounded by the maximum fixed length of slice patterns in
/// the column of patterns being analyzed.
///
/// We make sure to omit constructors that are statically impossible. E.g., for
/// `Option<!>`, we do not include `Some(_)` in the returned list of constructors.
/// Invariant: this returns an empty `Vec` if and only if the type is uninhabited (as determined by
/// `cx.is_uninhabited()`).
fn all_constructors<'p, 'tcx>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Vec<Constructor<'tcx>> {
debug!("all_constructors({:?})", pcx.ty);
let cx = pcx.cx;
let make_range = |start, end| {
IntRange(
// `unwrap()` is ok because we know the type is an integer.
IntRange::from_range(cx.tcx, start, end, pcx.ty, &RangeEnd::Included).unwrap(),
)
};
match pcx.ty.kind() {
ty::Bool => vec![make_range(0, 1)],
ty::Array(sub_ty, len) if len.try_eval_usize(cx.tcx, cx.param_env).is_some() => {
let len = len.eval_usize(cx.tcx, cx.param_env);
if len != 0 && cx.is_uninhabited(sub_ty) {
vec![]
} else {
vec![Slice(Slice::new(Some(len), VarLen(0, 0)))]
}
}
// Treat arrays of a constant but unknown length like slices.
ty::Array(sub_ty, _) | ty::Slice(sub_ty) => {
let kind = if cx.is_uninhabited(sub_ty) { FixedLen(0) } else { VarLen(0, 0) };
vec![Slice(Slice::new(None, kind))]
}
ty::Adt(def, substs) if def.is_enum() => {
// If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
// additional "unknown" constructor.
// There is no point in enumerating all possible variants, because the user can't
// actually match against them all themselves. So we always return only the fictitious
// constructor.
// E.g., in an example like:
//
// ```
// let err: io::ErrorKind = ...;
// match err {
// io::ErrorKind::NotFound => {},
// }
// ```
//
// we don't want to show every possible IO error, but instead have only `_` as the
// witness.
let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(pcx.ty);
// If `exhaustive_patterns` is disabled and our scrutinee is an empty enum, we treat it
// as though it had an "unknown" constructor to avoid exposing its emptiness. The
// exception is if the pattern is at the top level, because we want empty matches to be
// considered exhaustive.
let is_secretly_empty = def.variants.is_empty()
&& !cx.tcx.features().exhaustive_patterns
&& !pcx.is_top_level;
if is_secretly_empty || is_declared_nonexhaustive {
vec![NonExhaustive]
} else if cx.tcx.features().exhaustive_patterns {
// If `exhaustive_patterns` is enabled, we exclude variants known to be
// uninhabited.
def.variants
.iter()
.filter(|v| {
!v.uninhabited_from(cx.tcx, substs, def.adt_kind(), cx.param_env)
.contains(cx.tcx, cx.module)
})
.map(|v| Variant(v.def_id))
.collect()
} else {
def.variants.iter().map(|v| Variant(v.def_id)).collect()
}
}
ty::Char => {
vec![
// The valid Unicode Scalar Value ranges.
make_range('\u{0000}' as u128, '\u{D7FF}' as u128),
make_range('\u{E000}' as u128, '\u{10FFFF}' as u128),
]
}
ty::Int(_) | ty::Uint(_)
if pcx.ty.is_ptr_sized_integral()
&& !cx.tcx.features().precise_pointer_size_matching =>
{
// `usize`/`isize` are not allowed to be matched exhaustively unless the
// `precise_pointer_size_matching` feature is enabled. So we treat those types like
// `#[non_exhaustive]` enums by returning a special unmatcheable constructor.
vec![NonExhaustive]
}
&ty::Int(ity) => {
let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128;
let min = 1u128 << (bits - 1);
let max = min - 1;
vec![make_range(min, max)]
}
&ty::Uint(uty) => {
let size = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size();
let max = size.truncate(u128::MAX);
vec![make_range(0, max)]
}
// If `exhaustive_patterns` is disabled and our scrutinee is the never type, we cannot
// expose its emptiness. The exception is if the pattern is at the top level, because we
// want empty matches to be considered exhaustive.
ty::Never if !cx.tcx.features().exhaustive_patterns && !pcx.is_top_level => {
vec![NonExhaustive]
}
ty::Never => vec![],
_ if cx.is_uninhabited(pcx.ty) => vec![],
ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => vec![Single],
// This type is one for which we cannot list constructors, like `str` or `f64`.
_ => vec![NonExhaustive],
}
}
// A struct to compute a set of constructors equivalent to `all_ctors \ used_ctors`.
#[derive(Debug)]
pub(super) struct MissingConstructors<'tcx> {
all_ctors: SmallVec<[Constructor<'tcx>; 1]>,
used_ctors: Vec<Constructor<'tcx>>,
}
impl<'tcx> MissingConstructors<'tcx> {
pub(super) fn new<'p>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Self {
let used_ctors: Vec<Constructor<'_>> =
pcx.matrix.head_ctors(pcx.cx).cloned().filter(|c| !c.is_wildcard()).collect();
// Since `all_ctors` never contains wildcards, this won't recurse further.
let all_ctors =
all_constructors(pcx).into_iter().flat_map(|ctor| ctor.split(pcx, None)).collect();
MissingConstructors { all_ctors, used_ctors }
}
fn is_empty<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>) -> bool {
self.iter(pcx).next().is_none()
}
/// Iterate over all_ctors \ used_ctors
fn iter<'a, 'p>(
&'a self,
pcx: PatCtxt<'a, 'p, 'tcx>,
) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> {
self.all_ctors.iter().filter(move |ctor| !ctor.is_covered_by_any(pcx, &self.used_ctors))
}
/// List the patterns corresponding to the missing constructors. In some cases, instead of
/// listing all constructors of a given type, we prefer to simply report a wildcard.
pub(super) fn report_patterns<'p>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
) -> SmallVec<[Pat<'tcx>; 1]> {
// There are 2 ways we can report a witness here.
// Commonly, we can report all the "free"
// constructors as witnesses, e.g., if we have:
//
// ```
// enum Direction { N, S, E, W }
// let Direction::N = ...;
// ```
//
// we can report 3 witnesses: `S`, `E`, and `W`.
//
// However, there is a case where we don't want
// to do this and instead report a single `_` witness:
// if the user didn't actually specify a constructor
// in this arm, e.g., in
//
// ```
// let x: (Direction, Direction, bool) = ...;
// let (_, _, false) = x;
// ```
//
// we don't want to show all 16 possible witnesses
// `(<direction-1>, <direction-2>, true)` - we are
// satisfied with `(_, _, true)`. In this case,
// `used_ctors` is empty.
// The exception is: if we are at the top-level, for example in an empty match, we
// sometimes prefer reporting the list of constructors instead of just `_`.
let report_when_all_missing = pcx.is_top_level && !IntRange::is_integral(pcx.ty);
if self.used_ctors.is_empty() && !report_when_all_missing {
// All constructors are unused. Report only a wildcard
// rather than each individual constructor.
smallvec![Pat::wildcard_from_ty(pcx.ty)]
} else {
// Construct for each missing constructor a "wild" version of this
// constructor, that matches everything that can be built with
// it. For example, if `ctor` is a `Constructor::Variant` for
// `Option::Some`, we get the pattern `Some(_)`.
self.iter(pcx)
.map(|missing_ctor| Fields::wildcards(pcx, &missing_ctor).apply(pcx, missing_ctor))
.collect()
}
}
}
/// Some fields need to be explicitly hidden away in certain cases; see the comment above the
/// `Fields` struct. This struct represents such a potentially-hidden field. When a field is hidden
/// we still keep its type around.
#[derive(Debug, Copy, Clone)]
pub(super) enum FilteredField<'p, 'tcx> {
Kept(&'p Pat<'tcx>),
Hidden(Ty<'tcx>),
}
impl<'p, 'tcx> FilteredField<'p, 'tcx> {
fn kept(self) -> Option<&'p Pat<'tcx>> {
match self {
FilteredField::Kept(p) => Some(p),
FilteredField::Hidden(_) => None,
}
}
fn to_pattern(self) -> Pat<'tcx> {
match self {
FilteredField::Kept(p) => p.clone(),
FilteredField::Hidden(ty) => Pat::wildcard_from_ty(ty),
}
}
}
/// A value can be decomposed into a constructor applied to some fields. This struct represents
/// those fields, generalized to allow patterns in each field. See also `Constructor`.
///
/// If a private or `non_exhaustive` field is uninhabited, the code mustn't observe that it is
/// uninhabited. For that, we filter these fields out of the matrix. This is subtle because we
/// still need to have those fields back when going to/from a `Pat`. Most of this is handled
/// automatically in `Fields`, but when constructing or deconstructing `Fields` you need to be
/// careful. As a rule, when going to/from the matrix, use the filtered field list; when going
/// to/from `Pat`, use the full field list.
/// This filtering is uncommon in practice, because uninhabited fields are rarely used, so we avoid
/// it when possible to preserve performance.
#[derive(Debug, Clone)]
pub(super) enum Fields<'p, 'tcx> {
/// Lists of patterns that don't contain any filtered fields.
/// `Slice` and `Vec` behave the same; the difference is only to avoid allocating and
/// triple-dereferences when possible. Frankly this is premature optimization, I (Nadrieril)
/// have not measured if it really made a difference.
Slice(&'p [Pat<'tcx>]),
Vec(SmallVec<[&'p Pat<'tcx>; 2]>),
/// Patterns where some of the fields need to be hidden. `kept_count` caches the number of
/// non-hidden fields.
Filtered {
fields: SmallVec<[FilteredField<'p, 'tcx>; 2]>,
kept_count: usize,
},
}
impl<'p, 'tcx> Fields<'p, 'tcx> {
fn empty() -> Self {
Fields::Slice(&[])
}
/// Construct a new `Fields` from the given pattern. Must not be used if the pattern is a field
/// of a struct/tuple/variant.
fn from_single_pattern(pat: &'p Pat<'tcx>) -> Self {
Fields::Slice(std::slice::from_ref(pat))
}
/// Convenience; internal use.
fn wildcards_from_tys(
cx: &MatchCheckCtxt<'p, 'tcx>,
tys: impl IntoIterator<Item = Ty<'tcx>>,
) -> Self {
let wilds = tys.into_iter().map(Pat::wildcard_from_ty);
let pats = cx.pattern_arena.alloc_from_iter(wilds);
Fields::Slice(pats)
}
/// Creates a new list of wildcard fields for a given constructor.
pub(super) fn wildcards(pcx: PatCtxt<'_, 'p, 'tcx>, constructor: &Constructor<'tcx>) -> Self {
let ty = pcx.ty;
let cx = pcx.cx;
let wildcard_from_ty = |ty| &*cx.pattern_arena.alloc(Pat::wildcard_from_ty(ty));
let ret = match constructor {
Single | Variant(_) => match ty.kind() {
ty::Tuple(ref fs) => {
Fields::wildcards_from_tys(cx, fs.into_iter().map(|ty| ty.expect_ty()))
}
ty::Ref(_, rty, _) => Fields::from_single_pattern(wildcard_from_ty(rty)),
ty::Adt(adt, substs) => {
if adt.is_box() {
// Use T as the sub pattern type of Box<T>.
Fields::from_single_pattern(wildcard_from_ty(substs.type_at(0)))
} else {
let variant = &adt.variants[constructor.variant_index_for_adt(adt)];
// Whether we must not match the fields of this variant exhaustively.
let is_non_exhaustive =
variant.is_field_list_non_exhaustive() && !adt.did.is_local();
let field_tys = variant.fields.iter().map(|field| field.ty(cx.tcx, substs));
// In the following cases, we don't need to filter out any fields. This is
// the vast majority of real cases, since uninhabited fields are uncommon.
let has_no_hidden_fields = (adt.is_enum() && !is_non_exhaustive)
|| !field_tys.clone().any(|ty| cx.is_uninhabited(ty));
if has_no_hidden_fields {
Fields::wildcards_from_tys(cx, field_tys)
} else {
let mut kept_count = 0;
let fields = variant
.fields
.iter()
.map(|field| {
let ty = field.ty(cx.tcx, substs);
let is_visible = adt.is_enum()
|| field.vis.is_accessible_from(cx.module, cx.tcx);
let is_uninhabited = cx.is_uninhabited(ty);
// In the cases of either a `#[non_exhaustive]` field list
// or a non-public field, we hide uninhabited fields in
// order not to reveal the uninhabitedness of the whole
// variant.
if is_uninhabited && (!is_visible || is_non_exhaustive) {
FilteredField::Hidden(ty)
} else {
kept_count += 1;
FilteredField::Kept(wildcard_from_ty(ty))
}
})
.collect();
Fields::Filtered { fields, kept_count }
}
}
}
_ => bug!("Unexpected type for `Single` constructor: {:?}", ty),
},
Slice(slice) => match *ty.kind() {
ty::Slice(ty) | ty::Array(ty, _) => {
let arity = slice.arity();
Fields::wildcards_from_tys(cx, (0..arity).map(|_| ty))
}
_ => bug!("bad slice pattern {:?} {:?}", constructor, ty),
},
Str(..) | FloatRange(..) | IntRange(..) | NonExhaustive | Opaque | Wildcard => {
Fields::empty()
}
};
debug!("Fields::wildcards({:?}, {:?}) = {:#?}", constructor, ty, ret);
ret
}
/// Apply a constructor to a list of patterns, yielding a new pattern. `self`
/// must have as many elements as this constructor's arity.
///
/// This is roughly the inverse of `specialize_constructor`.
///
/// Examples:
/// `ctor`: `Constructor::Single`
/// `ty`: `Foo(u32, u32, u32)`
/// `self`: `[10, 20, _]`
/// returns `Foo(10, 20, _)`
///
/// `ctor`: `Constructor::Variant(Option::Some)`
/// `ty`: `Option<bool>`
/// `self`: `[false]`
/// returns `Some(false)`
pub(super) fn apply(self, pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Pat<'tcx> {
let mut subpatterns = self.all_patterns();
let pat = match ctor {
Single | Variant(_) => match pcx.ty.kind() {
ty::Adt(..) | ty::Tuple(..) => {
let subpatterns = subpatterns
.enumerate()
.map(|(i, p)| FieldPat { field: Field::new(i), pattern: p })
.collect();
if let ty::Adt(adt, substs) = pcx.ty.kind() {
if adt.is_enum() {
PatKind::Variant {
adt_def: adt,
substs,
variant_index: ctor.variant_index_for_adt(adt),
subpatterns,
}
} else {
PatKind::Leaf { subpatterns }
}
} else {
PatKind::Leaf { subpatterns }
}
}
// Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
// be careful to reconstruct the correct constant pattern here. However a string
// literal pattern will never be reported as a non-exhaustiveness witness, so we
// can ignore this issue.
ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() },
ty::Slice(_) | ty::Array(..) => bug!("bad slice pattern {:?} {:?}", ctor, pcx.ty),
_ => PatKind::Wild,
},
Slice(slice) => match slice.kind {
FixedLen(_) => {
PatKind::Slice { prefix: subpatterns.collect(), slice: None, suffix: vec![] }
}
VarLen(prefix, _) => {
let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix as usize).collect();
if slice.array_len.is_some() {
// Improves diagnostics a bit: if the type is a known-size array, instead
// of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`.
// This is incorrect if the size is not known, since `[_, ..]` captures
// arrays of lengths `>= 1` whereas `[..]` captures any length.
while !prefix.is_empty() && prefix.last().unwrap().is_wildcard() {
prefix.pop();
}
}
let suffix: Vec<_> = if slice.array_len.is_some() {
// Same as above.
subpatterns.skip_while(Pat::is_wildcard).collect()
} else {
subpatterns.collect()
};
let wild = Pat::wildcard_from_ty(pcx.ty);
PatKind::Slice { prefix, slice: Some(wild), suffix }
}
},
&Str(value) => PatKind::Constant { value },
&FloatRange(lo, hi, end) => PatKind::Range(PatRange { lo, hi, end }),
IntRange(range) => return range.to_pat(pcx.cx.tcx, pcx.ty),
NonExhaustive => PatKind::Wild,
Opaque => bug!("we should not try to apply an opaque constructor"),
Wildcard => bug!(
"trying to apply a wildcard constructor; this should have been done in `apply_constructors`"
),
};
Pat { ty: pcx.ty, span: DUMMY_SP, kind: Box::new(pat) }
}
/// Returns the number of patterns from the viewpoint of match-checking, i.e. excluding hidden
/// fields. This is what we want in most cases in this file, the only exception being
/// conversion to/from `Pat`.
pub(super) fn len(&self) -> usize {
match self {
Fields::Slice(pats) => pats.len(),
Fields::Vec(pats) => pats.len(),
Fields::Filtered { kept_count, .. } => *kept_count,
}
}
/// Returns the complete list of patterns, including hidden fields.
fn all_patterns(self) -> impl Iterator<Item = Pat<'tcx>> {
let pats: SmallVec<[_; 2]> = match self {
Fields::Slice(pats) => pats.iter().cloned().collect(),
Fields::Vec(pats) => pats.into_iter().cloned().collect(),
Fields::Filtered { fields, .. } => {
// We don't skip any fields here.
fields.into_iter().map(|p| p.to_pattern()).collect()
}
};
pats.into_iter()
}
/// Returns the filtered list of patterns, not including hidden fields.
pub(super) fn filtered_patterns(self) -> SmallVec<[&'p Pat<'tcx>; 2]> {
match self {
Fields::Slice(pats) => pats.iter().collect(),
Fields::Vec(pats) => pats,
Fields::Filtered { fields, .. } => {
// We skip hidden fields here
fields.into_iter().filter_map(|p| p.kept()).collect()
}
}
}
/// Overrides some of the fields with the provided patterns. Exactly like
/// `replace_fields_indexed`, except that it takes `FieldPat`s as input.
fn replace_with_fieldpats(
&self,
new_pats: impl IntoIterator<Item = &'p FieldPat<'tcx>>,
) -> Self {
self.replace_fields_indexed(
new_pats.into_iter().map(|pat| (pat.field.index(), &pat.pattern)),
)
}
/// Overrides some of the fields with the provided patterns. This is used when a pattern
/// defines some fields but not all, for example `Foo { field1: Some(_), .. }`: here we start with a
/// `Fields` that is just one wildcard per field of the `Foo` struct, and override the entry
/// corresponding to `field1` with the pattern `Some(_)`. This is also used for slice patterns
/// for the same reason.
fn replace_fields_indexed(
&self,
new_pats: impl IntoIterator<Item = (usize, &'p Pat<'tcx>)>,
) -> Self {
let mut fields = self.clone();
if let Fields::Slice(pats) = fields {
fields = Fields::Vec(pats.iter().collect());
}
match &mut fields {
Fields::Vec(pats) => {
for (i, pat) in new_pats {
pats[i] = pat
}
}
Fields::Filtered { fields, .. } => {
for (i, pat) in new_pats {
if let FilteredField::Kept(p) = &mut fields[i] {
*p = pat
}
}
}
Fields::Slice(_) => unreachable!(),
}
fields
}
/// Replaces contained fields with the given filtered list of patterns, e.g. taken from the
/// matrix. There must be `len()` patterns in `pats`.
pub(super) fn replace_fields(
&self,
cx: &MatchCheckCtxt<'p, 'tcx>,
pats: impl IntoIterator<Item = Pat<'tcx>>,
) -> Self {
let pats: &[_] = cx.pattern_arena.alloc_from_iter(pats);
match self {
Fields::Filtered { fields, kept_count } => {
let mut pats = pats.iter();
let mut fields = fields.clone();
for f in &mut fields {
if let FilteredField::Kept(p) = f {
// We take one input pattern for each `Kept` field, in order.
*p = pats.next().unwrap();
}
}
Fields::Filtered { fields, kept_count: *kept_count }
}
_ => Fields::Slice(pats),
}
}
/// Replaces contained fields with the arguments of the given pattern. Only use on a pattern
/// that is compatible with the constructor used to build `self`.
/// This is meant to be used on the result of `Fields::wildcards()`. The idea is that
/// `wildcards` constructs a list of fields where all entries are wildcards, and the pattern
/// provided to this function fills some of the fields with non-wildcards.
/// In the following example `Fields::wildcards` would return `[_, _, _, _]`. If we call
/// `replace_with_pattern_arguments` on it with the pattern, the result will be `[Some(0), _,
/// _, _]`.
/// ```rust
/// let x: [Option<u8>; 4] = foo();
/// match x {
/// [Some(0), ..] => {}
/// }
/// ```
/// This is guaranteed to preserve the number of patterns in `self`.
pub(super) fn replace_with_pattern_arguments(&self, pat: &'p Pat<'tcx>) -> Self {
match pat.kind.as_ref() {
PatKind::Deref { subpattern } => {
assert_eq!(self.len(), 1);
Fields::from_single_pattern(subpattern)
}
PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
self.replace_with_fieldpats(subpatterns)
}
PatKind::Array { prefix, suffix, .. } | PatKind::Slice { prefix, suffix, .. } => {
// Number of subpatterns for the constructor
let ctor_arity = self.len();
// Replace the prefix and the suffix with the given patterns, leaving wildcards in
// the middle if there was a subslice pattern `..`.
let prefix = prefix.iter().enumerate();
let suffix =
suffix.iter().enumerate().map(|(i, p)| (ctor_arity - suffix.len() + i, p));
self.replace_fields_indexed(prefix.chain(suffix))
}
_ => self.clone(),
}
}
}
Inline `is_covered_by`
//! This module provides functions to deconstruct and reconstruct patterns into a constructor
//! applied to some fields. This is used by the `_match` module to compute pattern
//! usefulness/exhaustiveness.
use self::Constructor::*;
use self::SliceKind::*;
use super::compare_const_vals;
use super::usefulness::{MatchCheckCtxt, PatCtxt};
use super::{FieldPat, Pat, PatKind, PatRange};
use rustc_data_structures::captures::Captures;
use rustc_index::vec::Idx;
use rustc_attr::{SignedInt, UnsignedInt};
use rustc_hir::def_id::DefId;
use rustc_hir::{HirId, RangeEnd};
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::Field;
use rustc_middle::ty::layout::IntegerExt;
use rustc_middle::ty::{self, Const, Ty, TyCtxt};
use rustc_session::lint;
use rustc_span::{Span, DUMMY_SP};
use rustc_target::abi::{Integer, Size, VariantIdx};
use smallvec::{smallvec, SmallVec};
use std::cmp::{self, max, min, Ordering};
use std::iter::IntoIterator;
use std::ops::RangeInclusive;
/// An inclusive interval, used for precise integer exhaustiveness checking.
/// `IntRange`s always store a contiguous range. This means that values are
/// encoded such that `0` encodes the minimum value for the integer,
/// regardless of the signedness.
/// For example, the pattern `-128..=127i8` is encoded as `0..=255`.
/// This makes comparisons and arithmetic on interval endpoints much more
/// straightforward. See `signed_bias` for details.
///
/// `IntRange` is never used to encode an empty range or a "range" that wraps
/// around the (offset) space: i.e., `range.lo <= range.hi`.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(super) struct IntRange {
range: RangeInclusive<u128>,
}
impl IntRange {
#[inline]
fn is_integral(ty: Ty<'_>) -> bool {
matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_) | ty::Bool)
}
fn is_singleton(&self) -> bool {
self.range.start() == self.range.end()
}
fn boundaries(&self) -> (u128, u128) {
(*self.range.start(), *self.range.end())
}
#[inline]
fn integral_size_and_signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Size, u128)> {
match *ty.kind() {
ty::Bool => Some((Size::from_bytes(1), 0)),
ty::Char => Some((Size::from_bytes(4), 0)),
ty::Int(ity) => {
let size = Integer::from_attr(&tcx, SignedInt(ity)).size();
Some((size, 1u128 << (size.bits() as u128 - 1)))
}
ty::Uint(uty) => Some((Integer::from_attr(&tcx, UnsignedInt(uty)).size(), 0)),
_ => None,
}
}
#[inline]
fn from_const<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: &Const<'tcx>,
) -> Option<IntRange> {
if let Some((target_size, bias)) = Self::integral_size_and_signed_bias(tcx, value.ty) {
let ty = value.ty;
let val = (|| {
if let ty::ConstKind::Value(ConstValue::Scalar(scalar)) = value.val {
// For this specific pattern we can skip a lot of effort and go
// straight to the result, after doing a bit of checking. (We
// could remove this branch and just fall through, which
// is more general but much slower.)
if let Ok(bits) = scalar.to_bits_or_ptr(target_size, &tcx) {
return Some(bits);
}
}
// This is a more general form of the previous case.
value.try_eval_bits(tcx, param_env, ty)
})()?;
let val = val ^ bias;
Some(IntRange { range: val..=val })
} else {
None
}
}
#[inline]
fn from_range<'tcx>(
tcx: TyCtxt<'tcx>,
lo: u128,
hi: u128,
ty: Ty<'tcx>,
end: &RangeEnd,
) -> Option<IntRange> {
if Self::is_integral(ty) {
// Perform a shift if the underlying types are signed,
// which makes the interval arithmetic simpler.
let bias = IntRange::signed_bias(tcx, ty);
let (lo, hi) = (lo ^ bias, hi ^ bias);
let offset = (*end == RangeEnd::Excluded) as u128;
if lo > hi || (lo == hi && *end == RangeEnd::Excluded) {
// This should have been caught earlier by E0030.
bug!("malformed range pattern: {}..={}", lo, (hi - offset));
}
Some(IntRange { range: lo..=(hi - offset) })
} else {
None
}
}
// The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
fn signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> u128 {
match *ty.kind() {
ty::Int(ity) => {
let bits = Integer::from_attr(&tcx, SignedInt(ity)).size().bits() as u128;
1u128 << (bits - 1)
}
_ => 0,
}
}
fn is_subrange(&self, other: &Self) -> bool {
other.range.start() <= self.range.start() && self.range.end() <= other.range.end()
}
fn intersection(&self, other: &Self) -> Option<Self> {
let (lo, hi) = self.boundaries();
let (other_lo, other_hi) = other.boundaries();
if lo <= other_hi && other_lo <= hi {
Some(IntRange { range: max(lo, other_lo)..=min(hi, other_hi) })
} else {
None
}
}
fn suspicious_intersection(&self, other: &Self) -> bool {
// `false` in the following cases:
// 1 ---- // 1 ---------- // 1 ---- // 1 ----
// 2 ---------- // 2 ---- // 2 ---- // 2 ----
//
// The following are currently `false`, but could be `true` in the future (#64007):
// 1 --------- // 1 ---------
// 2 ---------- // 2 ----------
//
// `true` in the following cases:
// 1 ------- // 1 -------
// 2 -------- // 2 -------
let (lo, hi) = self.boundaries();
let (other_lo, other_hi) = other.boundaries();
lo == other_hi || hi == other_lo
}
fn to_pat<'tcx>(&self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Pat<'tcx> {
let (lo, hi) = self.boundaries();
let bias = IntRange::signed_bias(tcx, ty);
let (lo, hi) = (lo ^ bias, hi ^ bias);
let env = ty::ParamEnv::empty().and(ty);
let lo_const = ty::Const::from_bits(tcx, lo, env);
let hi_const = ty::Const::from_bits(tcx, hi, env);
let kind = if lo == hi {
PatKind::Constant { value: lo_const }
} else {
PatKind::Range(PatRange { lo: lo_const, hi: hi_const, end: RangeEnd::Included })
};
Pat { ty, span: DUMMY_SP, kind: Box::new(kind) }
}
/// For exhaustive integer matching, some constructors are grouped within other constructors
/// (namely integer typed values are grouped within ranges). However, when specialising these
/// constructors, we want to be specialising for the underlying constructors (the integers), not
/// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
/// mean creating a separate constructor for every single value in the range, which is clearly
/// impractical. However, observe that for some ranges of integers, the specialisation will be
/// identical across all values in that range (i.e., there are equivalence classes of ranges of
/// constructors based on their `U(S(c, P), S(c, p))` outcome). These classes are grouped by
/// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
/// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
/// change.
/// Our solution, therefore, is to split the range constructor into subranges at every single point
/// the group of intersecting patterns changes (using the method described below).
/// And voilà! We're testing precisely those ranges that we need to, without any exhaustive matching
/// on actual integers. The nice thing about this is that the number of subranges is linear in the
/// number of rows in the matrix (i.e., the number of cases in the `match` statement), so we don't
/// need to be worried about matching over gargantuan ranges.
///
/// Essentially, given the first column of a matrix representing ranges, looking like the following:
///
/// |------| |----------| |-------| ||
/// |-------| |-------| |----| ||
/// |---------|
///
/// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
///
/// |--|--|||-||||--||---|||-------| |-|||| ||
///
/// The logic for determining how to split the ranges is fairly straightforward: we calculate
/// boundaries for each interval range, sort them, then create constructors for each new interval
/// between every pair of boundary points. (This essentially sums up to performing the intuitive
/// merging operation depicted above.)
fn split<'p, 'tcx>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
hir_id: Option<HirId>,
) -> SmallVec<[Constructor<'tcx>; 1]> {
/// Represents a border between 2 integers. Because the intervals spanning borders
/// must be able to cover every integer, we need to be able to represent
/// 2^128 + 1 such borders.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
enum Border {
JustBefore(u128),
AfterMax,
}
// A function for extracting the borders of an integer interval.
fn range_borders(r: IntRange) -> impl Iterator<Item = Border> {
let (lo, hi) = r.range.into_inner();
let from = Border::JustBefore(lo);
let to = match hi.checked_add(1) {
Some(m) => Border::JustBefore(m),
None => Border::AfterMax,
};
vec![from, to].into_iter()
}
// Collect the span and range of all the intersecting ranges to lint on likely
// incorrect range patterns. (#63987)
let mut overlaps = vec![];
let row_len = pcx.matrix.column_count().unwrap_or(0);
// `borders` is the set of borders between equivalence classes: each equivalence
// class lies between 2 borders.
let row_borders = pcx
.matrix
.head_ctors_and_spans(pcx.cx)
.filter_map(|(ctor, span)| Some((ctor.as_int_range()?, span)))
.filter_map(|(range, span)| {
let intersection = self.intersection(&range);
let should_lint = self.suspicious_intersection(&range);
if let (Some(range), 1, true) = (&intersection, row_len, should_lint) {
// FIXME: for now, only check for overlapping ranges on simple range
// patterns. Otherwise with the current logic the following is detected
// as overlapping:
// ```
// match (0u8, true) {
// (0 ..= 125, false) => {}
// (125 ..= 255, true) => {}
// _ => {}
// }
// ```
overlaps.push((range.clone(), span));
}
intersection
})
.flat_map(range_borders);
let self_borders = range_borders(self.clone());
let mut borders: Vec<_> = row_borders.chain(self_borders).collect();
borders.sort_unstable();
self.lint_overlapping_patterns(pcx, hir_id, overlaps);
// We're going to iterate through every adjacent pair of borders, making sure that
// each represents an interval of nonnegative length, and convert each such
// interval into a constructor.
borders
.array_windows()
.filter_map(|&pair| match pair {
[Border::JustBefore(n), Border::JustBefore(m)] => {
if n < m {
Some(n..=(m - 1))
} else {
None
}
}
[Border::JustBefore(n), Border::AfterMax] => Some(n..=u128::MAX),
[Border::AfterMax, _] => None,
})
.map(|range| IntRange { range })
.map(IntRange)
.collect()
}
fn lint_overlapping_patterns(
&self,
pcx: PatCtxt<'_, '_, '_>,
hir_id: Option<HirId>,
overlaps: Vec<(IntRange, Span)>,
) {
if let (true, Some(hir_id)) = (!overlaps.is_empty(), hir_id) {
pcx.cx.tcx.struct_span_lint_hir(
lint::builtin::OVERLAPPING_PATTERNS,
hir_id,
pcx.span,
|lint| {
let mut err = lint.build("multiple patterns covering the same range");
err.span_label(pcx.span, "overlapping patterns");
for (int_range, span) in overlaps {
// Use the real type for user display of the ranges:
err.span_label(
span,
&format!(
"this range overlaps on `{}`",
int_range.to_pat(pcx.cx.tcx, pcx.ty),
),
);
}
err.emit();
},
);
}
}
/// See `Constructor::is_covered_by`
fn is_covered_by(&self, other: &Self) -> bool {
if self.intersection(other).is_some() {
// Constructor splitting should ensure that all intersections we encounter are actually
// inclusions.
assert!(self.is_subrange(other));
true
} else {
false
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum SliceKind {
/// Patterns of length `n` (`[x, y]`).
FixedLen(u64),
/// Patterns using the `..` notation (`[x, .., y]`).
/// Captures any array constructor of `length >= i + j`.
/// In the case where `array_len` is `Some(_)`,
/// this indicates that we only care about the first `i` and the last `j` values of the array,
/// and everything in between is a wildcard `_`.
VarLen(u64, u64),
}
impl SliceKind {
fn arity(self) -> u64 {
match self {
FixedLen(length) => length,
VarLen(prefix, suffix) => prefix + suffix,
}
}
/// Whether this pattern includes patterns of length `other_len`.
fn covers_length(self, other_len: u64) -> bool {
match self {
FixedLen(len) => len == other_len,
VarLen(prefix, suffix) => prefix + suffix <= other_len,
}
}
}
/// A constructor for array and slice patterns.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) struct Slice {
/// `None` if the matched value is a slice, `Some(n)` if it is an array of size `n`.
array_len: Option<u64>,
/// The kind of pattern it is: fixed-length `[x, y]` or variable length `[x, .., y]`.
kind: SliceKind,
}
impl Slice {
fn new(array_len: Option<u64>, kind: SliceKind) -> Self {
let kind = match (array_len, kind) {
// If the middle `..` is empty, we effectively have a fixed-length pattern.
(Some(len), VarLen(prefix, suffix)) if prefix + suffix >= len => FixedLen(len),
_ => kind,
};
Slice { array_len, kind }
}
fn arity(self) -> u64 {
self.kind.arity()
}
/// The exhaustiveness-checking paper does not include any details on
/// checking variable-length slice patterns. However, they may be
/// matched by an infinite collection of fixed-length array patterns.
///
/// Checking the infinite set directly would take an infinite amount
/// of time. However, it turns out that for each finite set of
/// patterns `P`, all sufficiently large array lengths are equivalent:
///
/// Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies
/// to exactly the subset `Pₜ` of `P` can be transformed to a slice
/// `sₘ` for each sufficiently-large length `m` that applies to exactly
/// the same subset of `P`.
///
/// Because of that, each witness for reachability-checking of one
/// of the sufficiently-large lengths can be transformed to an
/// equally-valid witness of any other length, so we only have
/// to check slices of the "minimal sufficiently-large length"
/// and less.
///
/// Note that the fact that there is a *single* `sₘ` for each `m`
/// not depending on the specific pattern in `P` is important: if
/// you look at the pair of patterns
/// `[true, ..]`
/// `[.., false]`
/// Then any slice of length ≥1 that matches one of these two
/// patterns can be trivially turned to a slice of any
/// other length ≥1 that matches them and vice-versa,
/// but the slice of length 2 `[false, true]` that matches neither
/// of these patterns can't be turned to a slice from length 1 that
/// matches neither of these patterns, so we have to consider
/// slices from length 2 there.
///
/// Now, to see that that length exists and find it, observe that slice
/// patterns are either "fixed-length" patterns (`[_, _, _]`) or
/// "variable-length" patterns (`[_, .., _]`).
///
/// For fixed-length patterns, all slices with lengths *longer* than
/// the pattern's length have the same outcome (of not matching), so
/// as long as `L` is greater than the pattern's length we can pick
/// any `sₘ` from that length and get the same result.
///
/// For variable-length patterns, the situation is more complicated,
/// because as seen above the precise value of `sₘ` matters.
///
/// However, for each variable-length pattern `p` with a prefix of length
/// `plₚ` and suffix of length `slₚ`, only the first `plₚ` and the last
/// `slₚ` elements are examined.
///
/// Therefore, as long as `L` is positive (to avoid concerns about empty
/// types), all elements after the maximum prefix length and before
/// the maximum suffix length are not examined by any variable-length
/// pattern, and therefore can be added/removed without affecting
/// them - creating equivalent patterns from any sufficiently-large
/// length.
///
/// Of course, if fixed-length patterns exist, we must be sure
/// that our length is large enough to miss them all, so
/// we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
///
/// for example, with the above pair of patterns, all elements
/// but the first and last can be added/removed, so any
/// witness of length ≥2 (say, `[false, false, true]`) can be
/// turned to a witness from any other length ≥2.
fn split<'p, 'tcx>(self, pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Constructor<'tcx>; 1]> {
let (self_prefix, self_suffix) = match self.kind {
VarLen(self_prefix, self_suffix) => (self_prefix, self_suffix),
_ => return smallvec![Slice(self)],
};
let head_ctors = pcx.matrix.head_ctors(pcx.cx).filter(|c| !c.is_wildcard());
let mut max_prefix_len = self_prefix;
let mut max_suffix_len = self_suffix;
let mut max_fixed_len = 0;
for ctor in head_ctors {
if let Slice(slice) = ctor {
match slice.kind {
FixedLen(len) => {
max_fixed_len = cmp::max(max_fixed_len, len);
}
VarLen(prefix, suffix) => {
max_prefix_len = cmp::max(max_prefix_len, prefix);
max_suffix_len = cmp::max(max_suffix_len, suffix);
}
}
} else {
bug!("unexpected ctor for slice type: {:?}", ctor);
}
}
// For diagnostics, we keep the prefix and suffix lengths separate, so in the case
// where `max_fixed_len + 1` is the largest, we adapt `max_prefix_len` accordingly,
// so that `L = max_prefix_len + max_suffix_len`.
if max_fixed_len + 1 >= max_prefix_len + max_suffix_len {
// The subtraction can't overflow thanks to the above check.
// The new `max_prefix_len` is also guaranteed to be larger than its previous
// value.
max_prefix_len = max_fixed_len + 1 - max_suffix_len;
}
let final_slice = VarLen(max_prefix_len, max_suffix_len);
let final_slice = Slice::new(self.array_len, final_slice);
match self.array_len {
Some(_) => smallvec![Slice(final_slice)],
None => {
// `self` originally covered the range `(self.arity()..infinity)`. We split that
// range into two: lengths smaller than `final_slice.arity()` are treated
// independently as fixed-lengths slices, and lengths above are captured by
// `final_slice`.
let smaller_lengths = (self.arity()..final_slice.arity()).map(FixedLen);
smaller_lengths
.map(|kind| Slice::new(self.array_len, kind))
.chain(Some(final_slice))
.map(Slice)
.collect()
}
}
}
/// See `Constructor::is_covered_by`
fn is_covered_by(self, other: Self) -> bool {
other.kind.covers_length(self.arity())
}
}
/// A value can be decomposed into a constructor applied to some fields. This struct represents
/// the constructor. See also `Fields`.
///
/// `pat_constructor` retrieves the constructor corresponding to a pattern.
/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a
/// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and
/// `Fields`.
#[derive(Clone, Debug, PartialEq)]
pub(super) enum Constructor<'tcx> {
/// The constructor for patterns that have a single constructor, like tuples, struct patterns
/// and fixed-length arrays.
Single,
/// Enum variants.
Variant(DefId),
/// Ranges of integer literal values (`2`, `2..=5` or `2..5`).
IntRange(IntRange),
/// Ranges of floating-point literal values (`2.0..=5.2`).
FloatRange(&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>, RangeEnd),
/// String literals. Strings are not quite the same as `&[u8]` so we treat them separately.
Str(&'tcx ty::Const<'tcx>),
/// Array and slice patterns.
Slice(Slice),
/// Constants that must not be matched structurally. They are treated as black
/// boxes for the purposes of exhaustiveness: we must not inspect them, and they
/// don't count towards making a match exhaustive.
Opaque,
/// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used
/// for those types for which we cannot list constructors explicitly, like `f64` and `str`.
NonExhaustive,
/// Wildcard pattern.
Wildcard,
}
impl<'tcx> Constructor<'tcx> {
pub(super) fn is_wildcard(&self) -> bool {
matches!(self, Wildcard)
}
fn as_int_range(&self) -> Option<&IntRange> {
match self {
IntRange(range) => Some(range),
_ => None,
}
}
fn as_slice(&self) -> Option<Slice> {
match self {
Slice(slice) => Some(*slice),
_ => None,
}
}
fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx {
match *self {
Variant(id) => adt.variant_index_with_id(id),
Single => {
assert!(!adt.is_enum());
VariantIdx::new(0)
}
_ => bug!("bad constructor {:?} for adt {:?}", self, adt),
}
}
/// Determines the constructor that the given pattern can be specialized to.
pub(super) fn from_pat<'p>(cx: &MatchCheckCtxt<'p, 'tcx>, pat: &'p Pat<'tcx>) -> Self {
match pat.kind.as_ref() {
PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
PatKind::Binding { .. } | PatKind::Wild => Wildcard,
PatKind::Leaf { .. } | PatKind::Deref { .. } => Single,
&PatKind::Variant { adt_def, variant_index, .. } => {
Variant(adt_def.variants[variant_index].def_id)
}
PatKind::Constant { value } => {
if let Some(int_range) = IntRange::from_const(cx.tcx, cx.param_env, value) {
IntRange(int_range)
} else {
match pat.ty.kind() {
ty::Float(_) => FloatRange(value, value, RangeEnd::Included),
// In `expand_pattern`, we convert string literals to `&CONST` patterns with
// `CONST` a pattern of type `str`. In truth this contains a constant of type
// `&str`.
ty::Str => Str(value),
// All constants that can be structurally matched have already been expanded
// into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
// opaque.
_ => Opaque,
}
}
}
&PatKind::Range(PatRange { lo, hi, end }) => {
let ty = lo.ty;
if let Some(int_range) = IntRange::from_range(
cx.tcx,
lo.eval_bits(cx.tcx, cx.param_env, lo.ty),
hi.eval_bits(cx.tcx, cx.param_env, hi.ty),
ty,
&end,
) {
IntRange(int_range)
} else {
FloatRange(lo, hi, end)
}
}
PatKind::Array { prefix, slice, suffix } | PatKind::Slice { prefix, slice, suffix } => {
let array_len = match pat.ty.kind() {
ty::Array(_, length) => Some(length.eval_usize(cx.tcx, cx.param_env)),
ty::Slice(_) => None,
_ => span_bug!(pat.span, "bad ty {:?} for slice pattern", pat.ty),
};
let prefix = prefix.len() as u64;
let suffix = suffix.len() as u64;
let kind = if slice.is_some() {
VarLen(prefix, suffix)
} else {
FixedLen(prefix + suffix)
};
Slice(Slice::new(array_len, kind))
}
PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
}
}
/// Some constructors (namely `Wildcard`, `IntRange` and `Slice`) actually stand for a set of actual
/// constructors (like variants, integers or fixed-sized slices). When specializing for these
/// constructors, we want to be specialising for the actual underlying constructors.
/// Naively, we would simply return the list of constructors they correspond to. We instead are
/// more clever: if there are constructors that we know will behave the same wrt the current
/// matrix, we keep them grouped. For example, all slices of a sufficiently large length
/// will either be all useful or all non-useful with a given matrix.
///
/// See the branches for details on how the splitting is done.
///
/// This function may discard some irrelevant constructors if this preserves behavior and
/// diagnostics. Eg. for the `_` case, we ignore the constructors already present in the
/// matrix, unless all of them are.
///
/// `hir_id` is `None` when we're evaluating the wildcard pattern. In that case we do not want
/// to lint for overlapping ranges.
pub(super) fn split<'p>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
hir_id: Option<HirId>,
) -> SmallVec<[Self; 1]> {
debug!("Constructor::split({:#?}, {:#?})", self, pcx.matrix);
match self {
Wildcard => Constructor::split_wildcard(pcx),
// Fast-track if the range is trivial. In particular, we don't do the overlapping
// ranges check.
IntRange(ctor_range) if !ctor_range.is_singleton() => ctor_range.split(pcx, hir_id),
Slice(slice @ Slice { kind: VarLen(..), .. }) => slice.split(pcx),
// Any other constructor can be used unchanged.
_ => smallvec![self.clone()],
}
}
/// For wildcards, there are two groups of constructors: there are the constructors actually
/// present in the matrix (`head_ctors`), and the constructors not present (`missing_ctors`).
/// Two constructors that are not in the matrix will either both be caught (by a wildcard), or
/// both not be caught. Therefore we can keep the missing constructors grouped together.
fn split_wildcard<'p>(pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Self; 1]> {
// Missing constructors are those that are not matched by any non-wildcard patterns in the
// current column. We only fully construct them on-demand, because they're rarely used and
// can be big.
let missing_ctors = MissingConstructors::new(pcx);
if missing_ctors.is_empty(pcx) {
// All the constructors are present in the matrix, so we just go through them all.
// We must also split them first.
missing_ctors.all_ctors
} else {
// Some constructors are missing, thus we can specialize with the wildcard constructor,
// which will stand for those constructors that are missing, and behaves like any of
// them.
smallvec![Wildcard]
}
}
/// Returns whether `self` is covered by `other`, i.e. whether `self` is a subset of `other`.
/// For the simple cases, this is simply checking for equality. For the "grouped" constructors,
/// this checks for inclusion.
// We inline because this has a single call site in `Matrix::specialize_constructor`.
#[inline]
pub(super) fn is_covered_by<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool {
// This must be kept in sync with `is_covered_by_any`.
match (self, other) {
// Wildcards cover anything
(_, Wildcard) => true,
// Wildcards are only covered by wildcards
(Wildcard, _) => false,
(Single, Single) => true,
(Variant(self_id), Variant(other_id)) => self_id == other_id,
(IntRange(self_range), IntRange(other_range)) => self_range.is_covered_by(other_range),
(
FloatRange(self_from, self_to, self_end),
FloatRange(other_from, other_to, other_end),
) => {
match (
compare_const_vals(pcx.cx.tcx, self_to, other_to, pcx.cx.param_env, pcx.ty),
compare_const_vals(pcx.cx.tcx, self_from, other_from, pcx.cx.param_env, pcx.ty),
) {
(Some(to), Some(from)) => {
(from == Ordering::Greater || from == Ordering::Equal)
&& (to == Ordering::Less
|| (other_end == self_end && to == Ordering::Equal))
}
_ => false,
}
}
(Str(self_val), Str(other_val)) => {
// FIXME: there's probably a more direct way of comparing for equality
match compare_const_vals(pcx.cx.tcx, self_val, other_val, pcx.cx.param_env, pcx.ty)
{
Some(comparison) => comparison == Ordering::Equal,
None => false,
}
}
(Slice(self_slice), Slice(other_slice)) => self_slice.is_covered_by(*other_slice),
// We are trying to inspect an opaque constant. Thus we skip the row.
(Opaque, _) | (_, Opaque) => false,
// Only a wildcard pattern can match the special extra constructor.
(NonExhaustive, _) => false,
_ => span_bug!(
pcx.span,
"trying to compare incompatible constructors {:?} and {:?}",
self,
other
),
}
}
/// Faster version of `is_covered_by` when applied to many constructors. `used_ctors` is
/// assumed to be built from `matrix.head_ctors()` with wildcards filtered out, and `self` is
/// assumed to have been split from a wildcard.
fn is_covered_by_any<'p>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
used_ctors: &[Constructor<'tcx>],
) -> bool {
if used_ctors.is_empty() {
return false;
}
// This must be kept in sync with `is_covered_by`.
match self {
// If `self` is `Single`, `used_ctors` cannot contain anything else than `Single`s.
Single => !used_ctors.is_empty(),
Variant(_) => used_ctors.iter().any(|c| c == self),
IntRange(range) => used_ctors
.iter()
.filter_map(|c| c.as_int_range())
.any(|other| range.is_covered_by(other)),
Slice(slice) => used_ctors
.iter()
.filter_map(|c| c.as_slice())
.any(|other| slice.is_covered_by(other)),
// This constructor is never covered by anything else
NonExhaustive => false,
Str(..) | FloatRange(..) | Opaque | Wildcard => {
span_bug!(pcx.span, "found unexpected ctor in all_ctors: {:?}", self)
}
}
}
}
/// This determines the set of all possible constructors of a pattern matching
/// values of type `left_ty`. For vectors, this would normally be an infinite set
/// but is instead bounded by the maximum fixed length of slice patterns in
/// the column of patterns being analyzed.
///
/// We make sure to omit constructors that are statically impossible. E.g., for
/// `Option<!>`, we do not include `Some(_)` in the returned list of constructors.
/// Invariant: this returns an empty `Vec` if and only if the type is uninhabited (as determined by
/// `cx.is_uninhabited()`).
fn all_constructors<'p, 'tcx>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Vec<Constructor<'tcx>> {
debug!("all_constructors({:?})", pcx.ty);
let cx = pcx.cx;
let make_range = |start, end| {
IntRange(
// `unwrap()` is ok because we know the type is an integer.
IntRange::from_range(cx.tcx, start, end, pcx.ty, &RangeEnd::Included).unwrap(),
)
};
match pcx.ty.kind() {
ty::Bool => vec![make_range(0, 1)],
ty::Array(sub_ty, len) if len.try_eval_usize(cx.tcx, cx.param_env).is_some() => {
let len = len.eval_usize(cx.tcx, cx.param_env);
if len != 0 && cx.is_uninhabited(sub_ty) {
vec![]
} else {
vec![Slice(Slice::new(Some(len), VarLen(0, 0)))]
}
}
// Treat arrays of a constant but unknown length like slices.
ty::Array(sub_ty, _) | ty::Slice(sub_ty) => {
let kind = if cx.is_uninhabited(sub_ty) { FixedLen(0) } else { VarLen(0, 0) };
vec![Slice(Slice::new(None, kind))]
}
ty::Adt(def, substs) if def.is_enum() => {
// If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
// additional "unknown" constructor.
// There is no point in enumerating all possible variants, because the user can't
// actually match against them all themselves. So we always return only the fictitious
// constructor.
// E.g., in an example like:
//
// ```
// let err: io::ErrorKind = ...;
// match err {
// io::ErrorKind::NotFound => {},
// }
// ```
//
// we don't want to show every possible IO error, but instead have only `_` as the
// witness.
let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(pcx.ty);
// If `exhaustive_patterns` is disabled and our scrutinee is an empty enum, we treat it
// as though it had an "unknown" constructor to avoid exposing its emptiness. The
// exception is if the pattern is at the top level, because we want empty matches to be
// considered exhaustive.
let is_secretly_empty = def.variants.is_empty()
&& !cx.tcx.features().exhaustive_patterns
&& !pcx.is_top_level;
if is_secretly_empty || is_declared_nonexhaustive {
vec![NonExhaustive]
} else if cx.tcx.features().exhaustive_patterns {
// If `exhaustive_patterns` is enabled, we exclude variants known to be
// uninhabited.
def.variants
.iter()
.filter(|v| {
!v.uninhabited_from(cx.tcx, substs, def.adt_kind(), cx.param_env)
.contains(cx.tcx, cx.module)
})
.map(|v| Variant(v.def_id))
.collect()
} else {
def.variants.iter().map(|v| Variant(v.def_id)).collect()
}
}
ty::Char => {
vec![
// The valid Unicode Scalar Value ranges.
make_range('\u{0000}' as u128, '\u{D7FF}' as u128),
make_range('\u{E000}' as u128, '\u{10FFFF}' as u128),
]
}
ty::Int(_) | ty::Uint(_)
if pcx.ty.is_ptr_sized_integral()
&& !cx.tcx.features().precise_pointer_size_matching =>
{
// `usize`/`isize` are not allowed to be matched exhaustively unless the
// `precise_pointer_size_matching` feature is enabled. So we treat those types like
// `#[non_exhaustive]` enums by returning a special unmatcheable constructor.
vec![NonExhaustive]
}
&ty::Int(ity) => {
let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128;
let min = 1u128 << (bits - 1);
let max = min - 1;
vec![make_range(min, max)]
}
&ty::Uint(uty) => {
let size = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size();
let max = size.truncate(u128::MAX);
vec![make_range(0, max)]
}
// If `exhaustive_patterns` is disabled and our scrutinee is the never type, we cannot
// expose its emptiness. The exception is if the pattern is at the top level, because we
// want empty matches to be considered exhaustive.
ty::Never if !cx.tcx.features().exhaustive_patterns && !pcx.is_top_level => {
vec![NonExhaustive]
}
ty::Never => vec![],
_ if cx.is_uninhabited(pcx.ty) => vec![],
ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => vec![Single],
// This type is one for which we cannot list constructors, like `str` or `f64`.
_ => vec![NonExhaustive],
}
}
// A struct to compute a set of constructors equivalent to `all_ctors \ used_ctors`.
#[derive(Debug)]
pub(super) struct MissingConstructors<'tcx> {
all_ctors: SmallVec<[Constructor<'tcx>; 1]>,
used_ctors: Vec<Constructor<'tcx>>,
}
impl<'tcx> MissingConstructors<'tcx> {
pub(super) fn new<'p>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Self {
let used_ctors: Vec<Constructor<'_>> =
pcx.matrix.head_ctors(pcx.cx).cloned().filter(|c| !c.is_wildcard()).collect();
// Since `all_ctors` never contains wildcards, this won't recurse further.
let all_ctors =
all_constructors(pcx).into_iter().flat_map(|ctor| ctor.split(pcx, None)).collect();
MissingConstructors { all_ctors, used_ctors }
}
fn is_empty<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>) -> bool {
self.iter(pcx).next().is_none()
}
/// Iterate over all_ctors \ used_ctors
fn iter<'a, 'p>(
&'a self,
pcx: PatCtxt<'a, 'p, 'tcx>,
) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> {
self.all_ctors.iter().filter(move |ctor| !ctor.is_covered_by_any(pcx, &self.used_ctors))
}
/// List the patterns corresponding to the missing constructors. In some cases, instead of
/// listing all constructors of a given type, we prefer to simply report a wildcard.
pub(super) fn report_patterns<'p>(
&self,
pcx: PatCtxt<'_, 'p, 'tcx>,
) -> SmallVec<[Pat<'tcx>; 1]> {
// There are 2 ways we can report a witness here.
// Commonly, we can report all the "free"
// constructors as witnesses, e.g., if we have:
//
// ```
// enum Direction { N, S, E, W }
// let Direction::N = ...;
// ```
//
// we can report 3 witnesses: `S`, `E`, and `W`.
//
// However, there is a case where we don't want
// to do this and instead report a single `_` witness:
// if the user didn't actually specify a constructor
// in this arm, e.g., in
//
// ```
// let x: (Direction, Direction, bool) = ...;
// let (_, _, false) = x;
// ```
//
// we don't want to show all 16 possible witnesses
// `(<direction-1>, <direction-2>, true)` - we are
// satisfied with `(_, _, true)`. In this case,
// `used_ctors` is empty.
// The exception is: if we are at the top-level, for example in an empty match, we
// sometimes prefer reporting the list of constructors instead of just `_`.
let report_when_all_missing = pcx.is_top_level && !IntRange::is_integral(pcx.ty);
if self.used_ctors.is_empty() && !report_when_all_missing {
// All constructors are unused. Report only a wildcard
// rather than each individual constructor.
smallvec![Pat::wildcard_from_ty(pcx.ty)]
} else {
// Construct for each missing constructor a "wild" version of this
// constructor, that matches everything that can be built with
// it. For example, if `ctor` is a `Constructor::Variant` for
// `Option::Some`, we get the pattern `Some(_)`.
self.iter(pcx)
.map(|missing_ctor| Fields::wildcards(pcx, &missing_ctor).apply(pcx, missing_ctor))
.collect()
}
}
}
/// Some fields need to be explicitly hidden away in certain cases; see the comment above the
/// `Fields` struct. This struct represents such a potentially-hidden field. When a field is hidden
/// we still keep its type around.
#[derive(Debug, Copy, Clone)]
pub(super) enum FilteredField<'p, 'tcx> {
Kept(&'p Pat<'tcx>),
Hidden(Ty<'tcx>),
}
impl<'p, 'tcx> FilteredField<'p, 'tcx> {
fn kept(self) -> Option<&'p Pat<'tcx>> {
match self {
FilteredField::Kept(p) => Some(p),
FilteredField::Hidden(_) => None,
}
}
fn to_pattern(self) -> Pat<'tcx> {
match self {
FilteredField::Kept(p) => p.clone(),
FilteredField::Hidden(ty) => Pat::wildcard_from_ty(ty),
}
}
}
/// A value can be decomposed into a constructor applied to some fields. This struct represents
/// those fields, generalized to allow patterns in each field. See also `Constructor`.
///
/// If a private or `non_exhaustive` field is uninhabited, the code mustn't observe that it is
/// uninhabited. For that, we filter these fields out of the matrix. This is subtle because we
/// still need to have those fields back when going to/from a `Pat`. Most of this is handled
/// automatically in `Fields`, but when constructing or deconstructing `Fields` you need to be
/// careful. As a rule, when going to/from the matrix, use the filtered field list; when going
/// to/from `Pat`, use the full field list.
/// This filtering is uncommon in practice, because uninhabited fields are rarely used, so we avoid
/// it when possible to preserve performance.
#[derive(Debug, Clone)]
pub(super) enum Fields<'p, 'tcx> {
/// Lists of patterns that don't contain any filtered fields.
/// `Slice` and `Vec` behave the same; the difference is only to avoid allocating and
/// triple-dereferences when possible. Frankly this is premature optimization, I (Nadrieril)
/// have not measured if it really made a difference.
Slice(&'p [Pat<'tcx>]),
Vec(SmallVec<[&'p Pat<'tcx>; 2]>),
/// Patterns where some of the fields need to be hidden. `kept_count` caches the number of
/// non-hidden fields.
Filtered {
fields: SmallVec<[FilteredField<'p, 'tcx>; 2]>,
kept_count: usize,
},
}
impl<'p, 'tcx> Fields<'p, 'tcx> {
fn empty() -> Self {
Fields::Slice(&[])
}
/// Construct a new `Fields` from the given pattern. Must not be used if the pattern is a field
/// of a struct/tuple/variant.
fn from_single_pattern(pat: &'p Pat<'tcx>) -> Self {
Fields::Slice(std::slice::from_ref(pat))
}
/// Convenience; internal use.
fn wildcards_from_tys(
cx: &MatchCheckCtxt<'p, 'tcx>,
tys: impl IntoIterator<Item = Ty<'tcx>>,
) -> Self {
let wilds = tys.into_iter().map(Pat::wildcard_from_ty);
let pats = cx.pattern_arena.alloc_from_iter(wilds);
Fields::Slice(pats)
}
/// Creates a new list of wildcard fields for a given constructor.
pub(super) fn wildcards(pcx: PatCtxt<'_, 'p, 'tcx>, constructor: &Constructor<'tcx>) -> Self {
let ty = pcx.ty;
let cx = pcx.cx;
let wildcard_from_ty = |ty| &*cx.pattern_arena.alloc(Pat::wildcard_from_ty(ty));
let ret = match constructor {
Single | Variant(_) => match ty.kind() {
ty::Tuple(ref fs) => {
Fields::wildcards_from_tys(cx, fs.into_iter().map(|ty| ty.expect_ty()))
}
ty::Ref(_, rty, _) => Fields::from_single_pattern(wildcard_from_ty(rty)),
ty::Adt(adt, substs) => {
if adt.is_box() {
// Use T as the sub pattern type of Box<T>.
Fields::from_single_pattern(wildcard_from_ty(substs.type_at(0)))
} else {
let variant = &adt.variants[constructor.variant_index_for_adt(adt)];
// Whether we must not match the fields of this variant exhaustively.
let is_non_exhaustive =
variant.is_field_list_non_exhaustive() && !adt.did.is_local();
let field_tys = variant.fields.iter().map(|field| field.ty(cx.tcx, substs));
// In the following cases, we don't need to filter out any fields. This is
// the vast majority of real cases, since uninhabited fields are uncommon.
let has_no_hidden_fields = (adt.is_enum() && !is_non_exhaustive)
|| !field_tys.clone().any(|ty| cx.is_uninhabited(ty));
if has_no_hidden_fields {
Fields::wildcards_from_tys(cx, field_tys)
} else {
let mut kept_count = 0;
let fields = variant
.fields
.iter()
.map(|field| {
let ty = field.ty(cx.tcx, substs);
let is_visible = adt.is_enum()
|| field.vis.is_accessible_from(cx.module, cx.tcx);
let is_uninhabited = cx.is_uninhabited(ty);
// In the cases of either a `#[non_exhaustive]` field list
// or a non-public field, we hide uninhabited fields in
// order not to reveal the uninhabitedness of the whole
// variant.
if is_uninhabited && (!is_visible || is_non_exhaustive) {
FilteredField::Hidden(ty)
} else {
kept_count += 1;
FilteredField::Kept(wildcard_from_ty(ty))
}
})
.collect();
Fields::Filtered { fields, kept_count }
}
}
}
_ => bug!("Unexpected type for `Single` constructor: {:?}", ty),
},
Slice(slice) => match *ty.kind() {
ty::Slice(ty) | ty::Array(ty, _) => {
let arity = slice.arity();
Fields::wildcards_from_tys(cx, (0..arity).map(|_| ty))
}
_ => bug!("bad slice pattern {:?} {:?}", constructor, ty),
},
Str(..) | FloatRange(..) | IntRange(..) | NonExhaustive | Opaque | Wildcard => {
Fields::empty()
}
};
debug!("Fields::wildcards({:?}, {:?}) = {:#?}", constructor, ty, ret);
ret
}
/// Apply a constructor to a list of patterns, yielding a new pattern. `self`
/// must have as many elements as this constructor's arity.
///
/// This is roughly the inverse of `specialize_constructor`.
///
/// Examples:
/// `ctor`: `Constructor::Single`
/// `ty`: `Foo(u32, u32, u32)`
/// `self`: `[10, 20, _]`
/// returns `Foo(10, 20, _)`
///
/// `ctor`: `Constructor::Variant(Option::Some)`
/// `ty`: `Option<bool>`
/// `self`: `[false]`
/// returns `Some(false)`
pub(super) fn apply(self, pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Pat<'tcx> {
let mut subpatterns = self.all_patterns();
let pat = match ctor {
Single | Variant(_) => match pcx.ty.kind() {
ty::Adt(..) | ty::Tuple(..) => {
let subpatterns = subpatterns
.enumerate()
.map(|(i, p)| FieldPat { field: Field::new(i), pattern: p })
.collect();
if let ty::Adt(adt, substs) = pcx.ty.kind() {
if adt.is_enum() {
PatKind::Variant {
adt_def: adt,
substs,
variant_index: ctor.variant_index_for_adt(adt),
subpatterns,
}
} else {
PatKind::Leaf { subpatterns }
}
} else {
PatKind::Leaf { subpatterns }
}
}
// Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
// be careful to reconstruct the correct constant pattern here. However a string
// literal pattern will never be reported as a non-exhaustiveness witness, so we
// can ignore this issue.
ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() },
ty::Slice(_) | ty::Array(..) => bug!("bad slice pattern {:?} {:?}", ctor, pcx.ty),
_ => PatKind::Wild,
},
Slice(slice) => match slice.kind {
FixedLen(_) => {
PatKind::Slice { prefix: subpatterns.collect(), slice: None, suffix: vec![] }
}
VarLen(prefix, _) => {
let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix as usize).collect();
if slice.array_len.is_some() {
// Improves diagnostics a bit: if the type is a known-size array, instead
// of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`.
// This is incorrect if the size is not known, since `[_, ..]` captures
// arrays of lengths `>= 1` whereas `[..]` captures any length.
while !prefix.is_empty() && prefix.last().unwrap().is_wildcard() {
prefix.pop();
}
}
let suffix: Vec<_> = if slice.array_len.is_some() {
// Same as above.
subpatterns.skip_while(Pat::is_wildcard).collect()
} else {
subpatterns.collect()
};
let wild = Pat::wildcard_from_ty(pcx.ty);
PatKind::Slice { prefix, slice: Some(wild), suffix }
}
},
&Str(value) => PatKind::Constant { value },
&FloatRange(lo, hi, end) => PatKind::Range(PatRange { lo, hi, end }),
IntRange(range) => return range.to_pat(pcx.cx.tcx, pcx.ty),
NonExhaustive => PatKind::Wild,
Opaque => bug!("we should not try to apply an opaque constructor"),
Wildcard => bug!(
"trying to apply a wildcard constructor; this should have been done in `apply_constructors`"
),
};
Pat { ty: pcx.ty, span: DUMMY_SP, kind: Box::new(pat) }
}
/// Returns the number of patterns from the viewpoint of match-checking, i.e. excluding hidden
/// fields. This is what we want in most cases in this file, the only exception being
/// conversion to/from `Pat`.
pub(super) fn len(&self) -> usize {
match self {
Fields::Slice(pats) => pats.len(),
Fields::Vec(pats) => pats.len(),
Fields::Filtered { kept_count, .. } => *kept_count,
}
}
/// Returns the complete list of patterns, including hidden fields.
fn all_patterns(self) -> impl Iterator<Item = Pat<'tcx>> {
let pats: SmallVec<[_; 2]> = match self {
Fields::Slice(pats) => pats.iter().cloned().collect(),
Fields::Vec(pats) => pats.into_iter().cloned().collect(),
Fields::Filtered { fields, .. } => {
// We don't skip any fields here.
fields.into_iter().map(|p| p.to_pattern()).collect()
}
};
pats.into_iter()
}
/// Returns the filtered list of patterns, not including hidden fields.
pub(super) fn filtered_patterns(self) -> SmallVec<[&'p Pat<'tcx>; 2]> {
match self {
Fields::Slice(pats) => pats.iter().collect(),
Fields::Vec(pats) => pats,
Fields::Filtered { fields, .. } => {
// We skip hidden fields here
fields.into_iter().filter_map(|p| p.kept()).collect()
}
}
}
/// Overrides some of the fields with the provided patterns. Exactly like
/// `replace_fields_indexed`, except that it takes `FieldPat`s as input.
fn replace_with_fieldpats(
&self,
new_pats: impl IntoIterator<Item = &'p FieldPat<'tcx>>,
) -> Self {
self.replace_fields_indexed(
new_pats.into_iter().map(|pat| (pat.field.index(), &pat.pattern)),
)
}
/// Overrides some of the fields with the provided patterns. This is used when a pattern
/// defines some fields but not all, for example `Foo { field1: Some(_), .. }`: here we start with a
/// `Fields` that is just one wildcard per field of the `Foo` struct, and override the entry
/// corresponding to `field1` with the pattern `Some(_)`. This is also used for slice patterns
/// for the same reason.
fn replace_fields_indexed(
&self,
new_pats: impl IntoIterator<Item = (usize, &'p Pat<'tcx>)>,
) -> Self {
let mut fields = self.clone();
if let Fields::Slice(pats) = fields {
fields = Fields::Vec(pats.iter().collect());
}
match &mut fields {
Fields::Vec(pats) => {
for (i, pat) in new_pats {
pats[i] = pat
}
}
Fields::Filtered { fields, .. } => {
for (i, pat) in new_pats {
if let FilteredField::Kept(p) = &mut fields[i] {
*p = pat
}
}
}
Fields::Slice(_) => unreachable!(),
}
fields
}
/// Replaces contained fields with the given filtered list of patterns, e.g. taken from the
/// matrix. There must be `len()` patterns in `pats`.
pub(super) fn replace_fields(
&self,
cx: &MatchCheckCtxt<'p, 'tcx>,
pats: impl IntoIterator<Item = Pat<'tcx>>,
) -> Self {
let pats: &[_] = cx.pattern_arena.alloc_from_iter(pats);
match self {
Fields::Filtered { fields, kept_count } => {
let mut pats = pats.iter();
let mut fields = fields.clone();
for f in &mut fields {
if let FilteredField::Kept(p) = f {
// We take one input pattern for each `Kept` field, in order.
*p = pats.next().unwrap();
}
}
Fields::Filtered { fields, kept_count: *kept_count }
}
_ => Fields::Slice(pats),
}
}
/// Replaces contained fields with the arguments of the given pattern. Only use on a pattern
/// that is compatible with the constructor used to build `self`.
/// This is meant to be used on the result of `Fields::wildcards()`. The idea is that
/// `wildcards` constructs a list of fields where all entries are wildcards, and the pattern
/// provided to this function fills some of the fields with non-wildcards.
/// In the following example `Fields::wildcards` would return `[_, _, _, _]`. If we call
/// `replace_with_pattern_arguments` on it with the pattern, the result will be `[Some(0), _,
/// _, _]`.
/// ```rust
/// let x: [Option<u8>; 4] = foo();
/// match x {
/// [Some(0), ..] => {}
/// }
/// ```
/// This is guaranteed to preserve the number of patterns in `self`.
pub(super) fn replace_with_pattern_arguments(&self, pat: &'p Pat<'tcx>) -> Self {
match pat.kind.as_ref() {
PatKind::Deref { subpattern } => {
assert_eq!(self.len(), 1);
Fields::from_single_pattern(subpattern)
}
PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
self.replace_with_fieldpats(subpatterns)
}
PatKind::Array { prefix, suffix, .. } | PatKind::Slice { prefix, suffix, .. } => {
// Number of subpatterns for the constructor
let ctor_arity = self.len();
// Replace the prefix and the suffix with the given patterns, leaving wildcards in
// the middle if there was a subslice pattern `..`.
let prefix = prefix.iter().enumerate();
let suffix =
suffix.iter().enumerate().map(|(i, p)| (ctor_arity - suffix.len() + i, p));
self.replace_fields_indexed(prefix.chain(suffix))
}
_ => self.clone(),
}
}
}
|
use std::collections::{HashSet, HashMap};
use std::process::{Command, Stdio};
use std::path::{Path, PathBuf};
use std::io::{self, BufRead, BufReader};
use std::ffi::OsString;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::cell::Cell;
use std::env;
use std::thread;
use std::str;
use std::error;
use std::fmt;
use cargo_metadata;
use serde_json;
mod cargo_output;
mod rustc_diagnostic;
mod diagnostic_formatter;
use self::cargo_output::{CargoOutput, PackageId};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum BuildType {
Debug,
Release
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Profile {
Main,
Test,
Bench
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum TargetKind {
Lib,
CDyLib,
Bin,
Example,
Test,
Bench
}
#[derive(Clone, Debug)]
pub struct CargoProject {
pub packages: Vec< CargoPackage >,
pub target_directory: String
}
#[derive(Clone, Debug)]
pub struct CargoPackageId( PackageId );
// TODO: Fix this upstream.
impl PartialEq for CargoPackageId {
fn eq( &self, rhs: &CargoPackageId ) -> bool {
self.0.name() == rhs.0.name() &&
self.0.version() == rhs.0.version() &&
self.0.url() == rhs.0.url()
}
}
impl Eq for CargoPackageId {}
impl Hash for CargoPackageId {
fn hash< H: Hasher >( &self, state: &mut H ) {
self.0.name().hash( state );
self.0.version().hash( state );
self.0.url().hash( state );
}
}
impl CargoPackageId {
fn new( id: &str ) -> Option< Self > {
let value = serde_json::Value::String( id.to_owned() );
match serde_json::from_value( value ).ok() {
Some( package_id ) => Some( CargoPackageId( package_id ) ),
None => None
}
}
}
impl Deref for CargoPackageId {
type Target = PackageId;
fn deref( &self ) -> &Self::Target {
&self.0
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct CargoPackage {
pub id: CargoPackageId,
pub name: String,
pub manifest_path: PathBuf,
pub crate_root: PathBuf,
pub targets: Vec< CargoTarget >,
pub dependencies: Vec< CargoDependency >,
pub is_workspace_member: bool,
pub is_default: bool
}
#[derive(Clone, PartialEq, Debug)]
pub struct CargoTarget {
pub name: String,
pub kind: TargetKind,
pub source_directory: PathBuf
}
#[derive(Clone, PartialEq, Debug)]
pub enum CargoDependencyKind {
Normal,
Development,
Build
}
#[derive(Clone, PartialEq, Debug)]
pub enum CargoDependencyTarget {
Target( String ),
Emscripten, // TODO: Remove these hardcodes.
NonEmscripten
}
impl CargoDependencyTarget {
fn matches( &self, triplet: &str ) -> bool {
match *self {
CargoDependencyTarget::Target( ref target ) => target == triplet,
CargoDependencyTarget::Emscripten => triplet.ends_with( "-emscripten" ),
CargoDependencyTarget::NonEmscripten => !triplet.ends_with( "-emscripten" )
}
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct CargoDependency {
pub name: String,
pub kind: CargoDependencyKind,
pub target: Option< CargoDependencyTarget >,
pub resolved_to: Option< CargoPackageId >
}
#[derive(Debug)]
pub enum Error {
CannotLaunchCargo( io::Error ),
CargoFailed( String ),
CannotParseCargoOutput( serde_json::Error )
}
impl error::Error for Error {
fn description( &self ) -> &str {
match *self {
Error::CannotLaunchCargo( _ ) => "cannot launch cargo",
Error::CargoFailed( _ ) => "cargo failed",
Error::CannotParseCargoOutput( _ ) => "cannot parse cargo output"
}
}
}
impl fmt::Display for Error {
fn fmt( &self, formatter: &mut fmt::Formatter ) -> fmt::Result {
use std::error::Error as StdError;
match *self {
Error::CannotLaunchCargo( ref err ) => write!( formatter, "{}: {}", self.description(), err ),
Error::CargoFailed( ref err ) => write!( formatter, "{}: {}", self.description(), err ),
Error::CannotParseCargoOutput( ref err ) => write!( formatter, "{}: {}", self.description(), err )
}
}
}
impl CargoProject {
pub fn new(
manifest_path: Option< &str >,
no_default_features: bool,
enable_all_features: bool,
features: &[String]
) -> Result< CargoProject, Error >
{
let cwd = env::current_dir().expect( "cannot get current working directory" );
let cargo = env::var( "CARGO" ).unwrap_or_else( |_|
if cfg!( windows ) {
"cargo.exe"
} else {
"cargo"
}.to_owned()
);
let mut command = Command::new( cargo );
command.arg( "metadata" );
if no_default_features {
command.arg( "--no-default-features" );
}
if enable_all_features {
command.arg( "--all-features" );
}
if !features.is_empty() {
command.arg( "--features" );
command.arg( &features.join( " " ) );
}
command.arg( "--format-version" );
command.arg( "1" );
if let Some( manifest_path ) = manifest_path {
command.arg( "--manifest-path" );
command.arg( manifest_path );
}
if cfg!( unix ) {
command.arg( "--color" );
command.arg( "always" );
}
debug!( "Launching: {:?}", command );
let output = command.output().map_err( |err| Error::CannotLaunchCargo( err ) )?;
if !output.status.success() {
return Err( Error::CargoFailed( String::from_utf8_lossy( &output.stderr ).into_owned() ) );
}
let metadata = str::from_utf8( &output.stdout ).expect( "cargo output is not valid UTF-8" );
let metadata: cargo_metadata::Metadata =
serde_json::from_str( metadata ).map_err( |err| Error::CannotParseCargoOutput( err ) )?;
let mut workspace_members = HashSet::new();
for member in metadata.workspace_members {
workspace_members.insert( member.name().to_owned() );
}
let mut project = CargoProject {
target_directory: metadata.target_directory,
packages: metadata.packages.into_iter().map( |package| {
let manifest_path: PathBuf = package.manifest_path.into();
let is_workspace_member = workspace_members.contains( &*package.name );
CargoPackage {
id: CargoPackageId::new( &package.id ).expect( "unparsable package id" ),
name: package.name,
crate_root: manifest_path.parent().unwrap().into(),
manifest_path: manifest_path,
is_workspace_member,
is_default: false,
targets: package.targets.into_iter().filter_map( |target| {
Some( CargoTarget {
name: target.name,
kind: match target.kind[ 0 ].as_str() {
"lib" => TargetKind::Lib,
"rlib" => TargetKind::Lib,
"cdylib" => TargetKind::CDyLib,
"dylib" => TargetKind::Lib,
"staticlib" => TargetKind::Lib,
"bin" => TargetKind::Bin,
"example" => TargetKind::Example,
"test" => TargetKind::Test,
"bench" => TargetKind::Bench,
"custom-build" => return None,
"proc-macro" => return None,
_ => panic!( "Unknown target kind: '{}'", target.kind[ 0 ] )
},
source_directory: Into::< PathBuf >::into( target.src_path ).parent().unwrap().into()
})
}).collect(),
dependencies: package.dependencies.into_iter().map( |dependency| {
// TODO: Make the `target` field public in `cargo_metadata`.
let json: serde_json::Value = serde_json::from_str( &serde_json::to_string( &dependency ).unwrap() ).unwrap();
let target = match json.get( "target" ).unwrap() {
&serde_json::Value::Null => None,
&serde_json::Value::String( ref target ) => {
let target = match target.replace( " ", "" ).as_str() {
// TODO: Do this properly.
"cfg(target_os=\"emscripten\")" => CargoDependencyTarget::Emscripten,
"cfg(not(target_os=\"emscripten\"))" => CargoDependencyTarget::NonEmscripten,
_ => CargoDependencyTarget::Target( target.clone() )
};
Some( target )
},
_ => unreachable!()
};
CargoDependency {
name: dependency.name,
kind: match dependency.kind {
cargo_metadata::DependencyKind::Normal => CargoDependencyKind::Normal,
cargo_metadata::DependencyKind::Development => CargoDependencyKind::Development,
cargo_metadata::DependencyKind::Build => CargoDependencyKind::Build,
other => panic!( "Unknown dependency kind: {:?}", other )
},
target,
resolved_to: None
}
}).collect()
}
}).collect()
};
let mut package_map = HashMap::new();
for (index, package) in project.packages.iter().enumerate() {
package_map.insert( package.id.clone(), index );
}
for node in metadata.resolve.expect( "missing `resolve` metadata section" ).nodes {
let id = CargoPackageId::new( &node.id ).expect( "unparsable package id in the `resolve` metadata section" );
let package_index = *package_map.get( &id ).expect( "extra entry in the `resolve` metadata section" );
let package = &mut project.packages[ package_index ];
for dependency_id in node.dependencies {
let dependency_id = CargoPackageId::new( &dependency_id ).expect( "unparsable dependency package id" );
let mut dependency_found = false;
for dependency in package.dependencies.iter_mut().filter( |dep| dep.name == dependency_id.name() ) {
assert!( dependency.resolved_to.is_none(), "duplicate dependency" );
dependency.resolved_to = Some( dependency_id.clone() );
dependency_found = true;
}
assert!( dependency_found, "dependency missing from packages" );
}
}
let mut default_package: Option< (usize, usize) > = None;
for (package_index, package) in project.packages.iter().enumerate() {
if !package.is_workspace_member {
continue;
}
let package_directory = package.manifest_path.parent().unwrap();
if !cwd.starts_with( package_directory ) {
continue;
}
let common_length = cwd.components().zip( package_directory.components() ).take_while( |&(a, b)| a == b ).count();
if default_package == None || default_package.unwrap().1 < common_length {
default_package = Some( (package_index, common_length) );
}
}
if let Some( (default_package_index, _) ) = default_package {
project.packages[ default_package_index ].is_default = true;
}
Ok( project )
}
pub fn default_package( &self ) -> Option< &CargoPackage > {
self.packages.iter().find( |package| package.is_default )
}
pub fn used_packages( &self, triplet: &str, main_package: &CargoPackage, profile: Profile ) -> Vec< &CargoPackage > {
let mut package_map = HashMap::new();
for (index, package) in self.packages.iter().enumerate() {
package_map.insert( package.id.clone(), index );
}
struct Entry< 'a > {
package: &'a CargoPackage,
is_used: Cell< bool >
}
let mut queue = Vec::new();
let entries: Vec< Entry > = self.packages.iter().enumerate().map( |(index, package)| {
let is_main_package = package == main_package;
if is_main_package {
queue.push( index );
}
Entry {
package,
is_used: Cell::new( is_main_package )
}
}).collect();
while let Some( index ) = queue.pop() {
for dependency in &entries[ index ].package.dependencies {
if let Some( ref required_triplet ) = dependency.target {
if !required_triplet.matches( triplet ) {
continue;
}
}
match profile {
Profile::Main => {
match dependency.kind {
CargoDependencyKind::Normal => {},
CargoDependencyKind::Development |
CargoDependencyKind::Build => continue
}
},
Profile::Test |
Profile::Bench => {
match dependency.kind {
CargoDependencyKind::Normal |
CargoDependencyKind::Development => {},
CargoDependencyKind::Build => continue
}
}
}
let dependency_id = match dependency.resolved_to {
Some( ref dependency_id ) => dependency_id,
None => continue
};
let dependency_index = *package_map.get( dependency_id ).unwrap();
if entries[ dependency_index ].is_used.get() {
continue;
}
entries[ dependency_index ].is_used.set( true );
queue.push( dependency_index );
}
}
entries.into_iter().filter( |entry| entry.is_used.get() ).map( |entry| entry.package ).collect()
}
}
#[derive(Clone, Debug)]
pub enum BuildTarget {
Lib( String, Profile ),
Bin( String, Profile ),
ExampleBin( String ),
IntegrationTest( String ),
IntegrationBench( String )
}
impl BuildTarget {
fn is_executable( &self ) -> bool {
match *self {
BuildTarget::Lib( _, Profile::Main ) => false,
_ => true
}
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum MessageFormat {
Human,
Json
}
#[derive(Clone, Debug)]
pub struct BuildConfig {
pub build_target: BuildTarget,
pub build_type: BuildType,
pub triplet: Option< String >,
pub package: Option< String >,
pub features: Vec< String >,
pub no_default_features: bool,
pub enable_all_features: bool,
pub extra_paths: Vec< PathBuf >,
pub extra_rustflags: Vec< String >,
pub extra_environment: Vec< (String, String) >,
pub message_format: MessageFormat,
pub is_verbose: bool,
pub use_color: bool
}
fn profile_to_arg( profile: Profile ) -> &'static str {
match profile {
Profile::Main => "dev",
Profile::Test => "test",
Profile::Bench => "bench"
}
}
pub fn target_to_build_target( target: &CargoTarget, profile: Profile ) -> BuildTarget {
match target.kind {
TargetKind::Lib => BuildTarget::Lib( target.name.clone(), profile ),
TargetKind::CDyLib => BuildTarget::Lib( target.name.clone(), profile ),
TargetKind::Bin => BuildTarget::Bin( target.name.clone(), profile ),
TargetKind::Example => BuildTarget::ExampleBin( target.name.clone() ),
TargetKind::Test => BuildTarget::IntegrationTest( target.name.clone() ),
TargetKind::Bench => BuildTarget::IntegrationBench( target.name.clone() )
}
}
impl BuildConfig {
fn as_command( &self, should_build: bool ) -> Command {
let mut command = Command::new( "cargo" );
if should_build {
command.arg( "rustc" );
} else {
command.arg( "check" );
}
command.arg( "--message-format" );
command.arg( "json" );
if cfg!( unix ) && self.use_color {
command.arg( "--color" );
command.arg( "always" );
}
if let Some( ref triplet ) = self.triplet {
command.arg( "--target" ).arg( triplet.as_str() );
}
if let Some( ref package ) = self.package {
command.arg( "--package" ).arg( package.as_str() );
}
match self.build_type {
BuildType::Debug => {},
BuildType::Release => {
command.arg( "--release" );
}
}
match self.build_target {
BuildTarget::Lib( _, _ ) if !should_build => {
command.arg( "--lib" );
},
BuildTarget::Bin( ref name, _ ) if !should_build => {
command.arg( "--bin" ).arg( name.as_str() );
},
BuildTarget::Lib( _, profile ) => {
command
.arg( "--profile" ).arg( profile_to_arg( profile ) )
.arg( "--lib" );
},
BuildTarget::Bin( ref name, profile ) => {
command
.arg( "--profile" ).arg( profile_to_arg( profile ) )
.arg( "--bin" ).arg( name.as_str() );
},
BuildTarget::ExampleBin( ref name ) => {
command.arg( "--example" ).arg( name.as_str() );
},
BuildTarget::IntegrationTest( ref name ) => {
command.arg( "--test" ).arg( name.as_str() );
},
BuildTarget::IntegrationBench( ref name ) => {
command.arg( "--bench" ).arg( name.as_str() );
}
}
if self.no_default_features {
command.arg( "--no-default-features" );
}
if self.enable_all_features {
command.arg( "--all-features" );
}
if !self.features.is_empty() {
command.arg( "--features" );
command.arg( &self.features.join( " " ) );
}
if self.is_verbose {
command.arg( "--verbose" );
}
command
}
pub fn check( &self ) -> CargoResult {
let status = self.launch_cargo( false ).map( |(status, _)| status );
CargoResult {
status,
artifacts: Vec::new()
}
/*
match self.launch_cargo( true ) {
Some( (status, _) ) => {
return CargoResult {
status: Some( status ),
artifacts: Vec::new()
}
},
None => {
return CargoResult {
status: None,
artifacts: Vec::new()
}
}
}
*/
}
pub fn build< F >( &self, mut postprocess: Option< F > ) -> CargoResult
where F: for <'a> FnMut( Vec< PathBuf > ) -> Vec< PathBuf >
{
let mut result = self.build_internal( &mut postprocess );
if result.is_ok() == false {
return result;
}
// HACK: For some reason when you install emscripten for the first time
// the first build is always a dud (it produces no artifacts), so we retry once.
let is_emscripten = self.triplet.as_ref().map( |triplet| {
triplet == "wasm32-unknown-emscripten" || triplet == "asmjs-unknown-emscripten"
}).unwrap_or( false );
if is_emscripten && self.build_target.is_executable() {
let no_js_generated = result
.artifacts()
.iter()
.find( |artifact| artifact.extension().map( |ext| ext == "js" ).unwrap_or( false ) )
.is_none();
if no_js_generated {
debug!( "No artifacts were generated yet build succeeded; retrying..." );
result = self.build_internal( &mut postprocess );
}
}
return result;
}
fn launch_cargo( &self, should_build: bool ) -> Option< (i32, Vec< cargo_output::Artifact >) > {
let mut command = self.as_command( should_build );
let env_paths = env::var_os( "PATH" )
.map( |paths| env::split_paths( &paths ).collect() )
.unwrap_or( Vec::new() );
let mut paths = Vec::new();
paths.extend( self.extra_paths.clone().into_iter() );
paths.extend( env_paths.into_iter() );
let new_paths = env::join_paths( paths ).unwrap();
debug!( "Will launch cargo with PATH: {:?}", new_paths );
command.env( "PATH", new_paths );
let mut rustflags = OsString::new();
for flag in &self.extra_rustflags {
if !rustflags.is_empty() {
rustflags.push( " " );
}
rustflags.push( flag );
}
if let Some( env_rustflags ) = env::var_os( "RUSTFLAGS" ) {
if !rustflags.is_empty() {
rustflags.push( " " );
}
rustflags.push( env_rustflags );
}
debug!( "Will launch cargo with RUSTFLAGS: {:?}", rustflags );
command.env( "RUSTFLAGS", rustflags );
for &(ref key, ref value) in &self.extra_environment {
debug!( "Will launch cargo with variable \"{}\" set to \"{}\"", key, value );
command.env( key, value );
}
command.stdout( Stdio::piped() );
command.stderr( Stdio::piped() );
debug!( "Launching cargo: {:?}", command );
let mut child = match command.spawn() {
Ok( child ) => child,
Err( _ ) => return None
};
let stderr = BufReader::new( child.stderr.take().unwrap() );
let stdout = BufReader::new( child.stdout.take().unwrap() );
let is_verbose = self.is_verbose;
thread::spawn( move || {
let mut skip = 0;
for line in stderr.lines() {
let line = match line {
Ok( line ) => line,
Err( _ ) => break
};
if skip > 0 {
skip -= 1;
continue;
}
// This is really ugly, so let's skip it.
if line.trim() == "Caused by:" && !is_verbose {
skip += 1;
continue;
}
eprintln!( "{}", line );
}
});
let mut artifacts = Vec::new();
for line in stdout.lines() {
let line = match line {
Ok( line ) => line,
Err( _ ) => break
};
let line = line.trim();
if line.is_empty() {
continue;
}
let json: serde_json::Value = serde_json::from_str( &line ).expect( "failed to parse cargo output" );
let line = serde_json::to_string_pretty( &json ).unwrap();
if let Some( output ) = CargoOutput::parse( &line ) {
match output {
CargoOutput::Message( message ) => {
match self.message_format {
MessageFormat::Human => diagnostic_formatter::print( self.use_color, &message ),
MessageFormat::Json => {
println!( "{}", serde_json::to_string( &message.to_json_value() ).unwrap() );
}
}
},
CargoOutput::Artifact( artifact ) => {
for filename in &artifact.filenames {
debug!( "Built artifact: {}", filename );
}
artifacts.push( artifact );
},
CargoOutput::BuildScriptExecuted( executed ) => {
match self.message_format {
MessageFormat::Human => {},
MessageFormat::Json => {
println!( "{}", serde_json::to_string( &executed.to_json_value() ).unwrap() );
}
}
}
}
}
}
let result = child.wait();
let status = result.unwrap().code().expect( "failed to grab cargo status code" );
debug!( "Cargo finished with status: {}", status );
Some( (status, artifacts) )
}
fn build_internal< F >( &self, postprocess: &mut Option< F > ) -> CargoResult
where F: for <'a> FnMut( Vec< PathBuf > ) -> Vec< PathBuf >
{
let (status, mut artifacts) = match self.launch_cargo( true ) {
Some( result ) => result,
None => {
return CargoResult {
status: None,
artifacts: Vec::new()
}
}
};
fn has_extension< P: AsRef< Path > >( path: P, extension: &str ) -> bool {
path.as_ref().extension().map( |ext| ext == extension ).unwrap_or( false )
}
fn find_artifact( artifacts: &[cargo_output::Artifact], extension: &str ) -> Option< (usize, usize) > {
artifacts.iter().enumerate().filter_map( |(artifact_index, artifact)| {
if let Some( filename_index ) = artifact.filenames.iter().position( |filename| has_extension( filename, extension ) ) {
Some( (artifact_index, filename_index) )
} else {
None
}
}).next()
}
// For some reason when building tests cargo doesn't treat
// the `.wasm` file as an artifact.
if status == 0 && self.triplet.as_ref().map( |triplet| triplet == "wasm32-unknown-emscripten" ).unwrap_or( false ) {
match self.build_target {
BuildTarget::Bin( _, Profile::Test ) |
BuildTarget::Lib( _, Profile::Test ) |
BuildTarget::IntegrationTest( _ ) => {
if find_artifact( &artifacts, "wasm" ).is_none() {
if let Some( (artifact_index, filename_index) ) = find_artifact( &artifacts, "js" ) {
let wasm_path = {
let main_artifact = Path::new( &artifacts[ artifact_index ].filenames[ filename_index ] );
let filename = main_artifact.file_name().unwrap();
main_artifact.parent().unwrap().join( "deps" ).join( filename ).with_extension( "wasm" )
};
assert!( wasm_path.exists(), "internal error: wasm doesn't exist where I expected it to be" );
artifacts[ artifact_index ].filenames.push( wasm_path.to_str().unwrap().to_owned() );
debug!( "Found `.wasm` test artifact: {:?}", wasm_path );
}
}
},
_ => {}
}
}
let mut artifact_paths = Vec::new();
for mut artifact in &mut artifacts {
if let Some( ref mut callback ) = postprocess.as_mut() {
let filenames = artifact.filenames.iter().map( |filename| Path::new( &filename ).to_owned() ).collect();
let filenames = callback( filenames );
artifact.filenames = filenames.into_iter().map( |filename| filename.to_str().unwrap().to_owned() ).collect();
}
}
for mut artifact in artifacts {
if artifact.filenames.is_empty() {
continue;
}
match self.message_format {
MessageFormat::Human => {},
MessageFormat::Json => {
println!( "{}", serde_json::to_string( &artifact.to_json_value() ).unwrap() );
}
}
for filename in artifact.filenames {
// NOTE: Since we extract the paths from the JSON
// we get a list of artifacts as `String`s instead of `PathBuf`s.
artifact_paths.push( filename.into() )
}
}
CargoResult {
status: Some( status ),
artifacts: artifact_paths
}
}
}
pub struct CargoResult {
status: Option< i32 >,
artifacts: Vec< PathBuf >
}
impl CargoResult {
pub fn is_ok( &self ) -> bool {
self.status == Some( 0 )
}
pub fn artifacts( &self ) -> &[PathBuf] {
&self.artifacts
}
}
Remove a comment with some dead code
use std::collections::{HashSet, HashMap};
use std::process::{Command, Stdio};
use std::path::{Path, PathBuf};
use std::io::{self, BufRead, BufReader};
use std::ffi::OsString;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::cell::Cell;
use std::env;
use std::thread;
use std::str;
use std::error;
use std::fmt;
use cargo_metadata;
use serde_json;
mod cargo_output;
mod rustc_diagnostic;
mod diagnostic_formatter;
use self::cargo_output::{CargoOutput, PackageId};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum BuildType {
Debug,
Release
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Profile {
Main,
Test,
Bench
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum TargetKind {
Lib,
CDyLib,
Bin,
Example,
Test,
Bench
}
#[derive(Clone, Debug)]
pub struct CargoProject {
pub packages: Vec< CargoPackage >,
pub target_directory: String
}
#[derive(Clone, Debug)]
pub struct CargoPackageId( PackageId );
// TODO: Fix this upstream.
impl PartialEq for CargoPackageId {
fn eq( &self, rhs: &CargoPackageId ) -> bool {
self.0.name() == rhs.0.name() &&
self.0.version() == rhs.0.version() &&
self.0.url() == rhs.0.url()
}
}
impl Eq for CargoPackageId {}
impl Hash for CargoPackageId {
fn hash< H: Hasher >( &self, state: &mut H ) {
self.0.name().hash( state );
self.0.version().hash( state );
self.0.url().hash( state );
}
}
impl CargoPackageId {
fn new( id: &str ) -> Option< Self > {
let value = serde_json::Value::String( id.to_owned() );
match serde_json::from_value( value ).ok() {
Some( package_id ) => Some( CargoPackageId( package_id ) ),
None => None
}
}
}
impl Deref for CargoPackageId {
type Target = PackageId;
fn deref( &self ) -> &Self::Target {
&self.0
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct CargoPackage {
pub id: CargoPackageId,
pub name: String,
pub manifest_path: PathBuf,
pub crate_root: PathBuf,
pub targets: Vec< CargoTarget >,
pub dependencies: Vec< CargoDependency >,
pub is_workspace_member: bool,
pub is_default: bool
}
#[derive(Clone, PartialEq, Debug)]
pub struct CargoTarget {
pub name: String,
pub kind: TargetKind,
pub source_directory: PathBuf
}
#[derive(Clone, PartialEq, Debug)]
pub enum CargoDependencyKind {
Normal,
Development,
Build
}
#[derive(Clone, PartialEq, Debug)]
pub enum CargoDependencyTarget {
Target( String ),
Emscripten, // TODO: Remove these hardcodes.
NonEmscripten
}
impl CargoDependencyTarget {
fn matches( &self, triplet: &str ) -> bool {
match *self {
CargoDependencyTarget::Target( ref target ) => target == triplet,
CargoDependencyTarget::Emscripten => triplet.ends_with( "-emscripten" ),
CargoDependencyTarget::NonEmscripten => !triplet.ends_with( "-emscripten" )
}
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct CargoDependency {
pub name: String,
pub kind: CargoDependencyKind,
pub target: Option< CargoDependencyTarget >,
pub resolved_to: Option< CargoPackageId >
}
#[derive(Debug)]
pub enum Error {
CannotLaunchCargo( io::Error ),
CargoFailed( String ),
CannotParseCargoOutput( serde_json::Error )
}
impl error::Error for Error {
fn description( &self ) -> &str {
match *self {
Error::CannotLaunchCargo( _ ) => "cannot launch cargo",
Error::CargoFailed( _ ) => "cargo failed",
Error::CannotParseCargoOutput( _ ) => "cannot parse cargo output"
}
}
}
impl fmt::Display for Error {
fn fmt( &self, formatter: &mut fmt::Formatter ) -> fmt::Result {
use std::error::Error as StdError;
match *self {
Error::CannotLaunchCargo( ref err ) => write!( formatter, "{}: {}", self.description(), err ),
Error::CargoFailed( ref err ) => write!( formatter, "{}: {}", self.description(), err ),
Error::CannotParseCargoOutput( ref err ) => write!( formatter, "{}: {}", self.description(), err )
}
}
}
impl CargoProject {
pub fn new(
manifest_path: Option< &str >,
no_default_features: bool,
enable_all_features: bool,
features: &[String]
) -> Result< CargoProject, Error >
{
let cwd = env::current_dir().expect( "cannot get current working directory" );
let cargo = env::var( "CARGO" ).unwrap_or_else( |_|
if cfg!( windows ) {
"cargo.exe"
} else {
"cargo"
}.to_owned()
);
let mut command = Command::new( cargo );
command.arg( "metadata" );
if no_default_features {
command.arg( "--no-default-features" );
}
if enable_all_features {
command.arg( "--all-features" );
}
if !features.is_empty() {
command.arg( "--features" );
command.arg( &features.join( " " ) );
}
command.arg( "--format-version" );
command.arg( "1" );
if let Some( manifest_path ) = manifest_path {
command.arg( "--manifest-path" );
command.arg( manifest_path );
}
if cfg!( unix ) {
command.arg( "--color" );
command.arg( "always" );
}
debug!( "Launching: {:?}", command );
let output = command.output().map_err( |err| Error::CannotLaunchCargo( err ) )?;
if !output.status.success() {
return Err( Error::CargoFailed( String::from_utf8_lossy( &output.stderr ).into_owned() ) );
}
let metadata = str::from_utf8( &output.stdout ).expect( "cargo output is not valid UTF-8" );
let metadata: cargo_metadata::Metadata =
serde_json::from_str( metadata ).map_err( |err| Error::CannotParseCargoOutput( err ) )?;
let mut workspace_members = HashSet::new();
for member in metadata.workspace_members {
workspace_members.insert( member.name().to_owned() );
}
let mut project = CargoProject {
target_directory: metadata.target_directory,
packages: metadata.packages.into_iter().map( |package| {
let manifest_path: PathBuf = package.manifest_path.into();
let is_workspace_member = workspace_members.contains( &*package.name );
CargoPackage {
id: CargoPackageId::new( &package.id ).expect( "unparsable package id" ),
name: package.name,
crate_root: manifest_path.parent().unwrap().into(),
manifest_path: manifest_path,
is_workspace_member,
is_default: false,
targets: package.targets.into_iter().filter_map( |target| {
Some( CargoTarget {
name: target.name,
kind: match target.kind[ 0 ].as_str() {
"lib" => TargetKind::Lib,
"rlib" => TargetKind::Lib,
"cdylib" => TargetKind::CDyLib,
"dylib" => TargetKind::Lib,
"staticlib" => TargetKind::Lib,
"bin" => TargetKind::Bin,
"example" => TargetKind::Example,
"test" => TargetKind::Test,
"bench" => TargetKind::Bench,
"custom-build" => return None,
"proc-macro" => return None,
_ => panic!( "Unknown target kind: '{}'", target.kind[ 0 ] )
},
source_directory: Into::< PathBuf >::into( target.src_path ).parent().unwrap().into()
})
}).collect(),
dependencies: package.dependencies.into_iter().map( |dependency| {
// TODO: Make the `target` field public in `cargo_metadata`.
let json: serde_json::Value = serde_json::from_str( &serde_json::to_string( &dependency ).unwrap() ).unwrap();
let target = match json.get( "target" ).unwrap() {
&serde_json::Value::Null => None,
&serde_json::Value::String( ref target ) => {
let target = match target.replace( " ", "" ).as_str() {
// TODO: Do this properly.
"cfg(target_os=\"emscripten\")" => CargoDependencyTarget::Emscripten,
"cfg(not(target_os=\"emscripten\"))" => CargoDependencyTarget::NonEmscripten,
_ => CargoDependencyTarget::Target( target.clone() )
};
Some( target )
},
_ => unreachable!()
};
CargoDependency {
name: dependency.name,
kind: match dependency.kind {
cargo_metadata::DependencyKind::Normal => CargoDependencyKind::Normal,
cargo_metadata::DependencyKind::Development => CargoDependencyKind::Development,
cargo_metadata::DependencyKind::Build => CargoDependencyKind::Build,
other => panic!( "Unknown dependency kind: {:?}", other )
},
target,
resolved_to: None
}
}).collect()
}
}).collect()
};
let mut package_map = HashMap::new();
for (index, package) in project.packages.iter().enumerate() {
package_map.insert( package.id.clone(), index );
}
for node in metadata.resolve.expect( "missing `resolve` metadata section" ).nodes {
let id = CargoPackageId::new( &node.id ).expect( "unparsable package id in the `resolve` metadata section" );
let package_index = *package_map.get( &id ).expect( "extra entry in the `resolve` metadata section" );
let package = &mut project.packages[ package_index ];
for dependency_id in node.dependencies {
let dependency_id = CargoPackageId::new( &dependency_id ).expect( "unparsable dependency package id" );
let mut dependency_found = false;
for dependency in package.dependencies.iter_mut().filter( |dep| dep.name == dependency_id.name() ) {
assert!( dependency.resolved_to.is_none(), "duplicate dependency" );
dependency.resolved_to = Some( dependency_id.clone() );
dependency_found = true;
}
assert!( dependency_found, "dependency missing from packages" );
}
}
let mut default_package: Option< (usize, usize) > = None;
for (package_index, package) in project.packages.iter().enumerate() {
if !package.is_workspace_member {
continue;
}
let package_directory = package.manifest_path.parent().unwrap();
if !cwd.starts_with( package_directory ) {
continue;
}
let common_length = cwd.components().zip( package_directory.components() ).take_while( |&(a, b)| a == b ).count();
if default_package == None || default_package.unwrap().1 < common_length {
default_package = Some( (package_index, common_length) );
}
}
if let Some( (default_package_index, _) ) = default_package {
project.packages[ default_package_index ].is_default = true;
}
Ok( project )
}
pub fn default_package( &self ) -> Option< &CargoPackage > {
self.packages.iter().find( |package| package.is_default )
}
pub fn used_packages( &self, triplet: &str, main_package: &CargoPackage, profile: Profile ) -> Vec< &CargoPackage > {
let mut package_map = HashMap::new();
for (index, package) in self.packages.iter().enumerate() {
package_map.insert( package.id.clone(), index );
}
struct Entry< 'a > {
package: &'a CargoPackage,
is_used: Cell< bool >
}
let mut queue = Vec::new();
let entries: Vec< Entry > = self.packages.iter().enumerate().map( |(index, package)| {
let is_main_package = package == main_package;
if is_main_package {
queue.push( index );
}
Entry {
package,
is_used: Cell::new( is_main_package )
}
}).collect();
while let Some( index ) = queue.pop() {
for dependency in &entries[ index ].package.dependencies {
if let Some( ref required_triplet ) = dependency.target {
if !required_triplet.matches( triplet ) {
continue;
}
}
match profile {
Profile::Main => {
match dependency.kind {
CargoDependencyKind::Normal => {},
CargoDependencyKind::Development |
CargoDependencyKind::Build => continue
}
},
Profile::Test |
Profile::Bench => {
match dependency.kind {
CargoDependencyKind::Normal |
CargoDependencyKind::Development => {},
CargoDependencyKind::Build => continue
}
}
}
let dependency_id = match dependency.resolved_to {
Some( ref dependency_id ) => dependency_id,
None => continue
};
let dependency_index = *package_map.get( dependency_id ).unwrap();
if entries[ dependency_index ].is_used.get() {
continue;
}
entries[ dependency_index ].is_used.set( true );
queue.push( dependency_index );
}
}
entries.into_iter().filter( |entry| entry.is_used.get() ).map( |entry| entry.package ).collect()
}
}
#[derive(Clone, Debug)]
pub enum BuildTarget {
Lib( String, Profile ),
Bin( String, Profile ),
ExampleBin( String ),
IntegrationTest( String ),
IntegrationBench( String )
}
impl BuildTarget {
fn is_executable( &self ) -> bool {
match *self {
BuildTarget::Lib( _, Profile::Main ) => false,
_ => true
}
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum MessageFormat {
Human,
Json
}
#[derive(Clone, Debug)]
pub struct BuildConfig {
pub build_target: BuildTarget,
pub build_type: BuildType,
pub triplet: Option< String >,
pub package: Option< String >,
pub features: Vec< String >,
pub no_default_features: bool,
pub enable_all_features: bool,
pub extra_paths: Vec< PathBuf >,
pub extra_rustflags: Vec< String >,
pub extra_environment: Vec< (String, String) >,
pub message_format: MessageFormat,
pub is_verbose: bool,
pub use_color: bool
}
fn profile_to_arg( profile: Profile ) -> &'static str {
match profile {
Profile::Main => "dev",
Profile::Test => "test",
Profile::Bench => "bench"
}
}
pub fn target_to_build_target( target: &CargoTarget, profile: Profile ) -> BuildTarget {
match target.kind {
TargetKind::Lib => BuildTarget::Lib( target.name.clone(), profile ),
TargetKind::CDyLib => BuildTarget::Lib( target.name.clone(), profile ),
TargetKind::Bin => BuildTarget::Bin( target.name.clone(), profile ),
TargetKind::Example => BuildTarget::ExampleBin( target.name.clone() ),
TargetKind::Test => BuildTarget::IntegrationTest( target.name.clone() ),
TargetKind::Bench => BuildTarget::IntegrationBench( target.name.clone() )
}
}
impl BuildConfig {
fn as_command( &self, should_build: bool ) -> Command {
let mut command = Command::new( "cargo" );
if should_build {
command.arg( "rustc" );
} else {
command.arg( "check" );
}
command.arg( "--message-format" );
command.arg( "json" );
if cfg!( unix ) && self.use_color {
command.arg( "--color" );
command.arg( "always" );
}
if let Some( ref triplet ) = self.triplet {
command.arg( "--target" ).arg( triplet.as_str() );
}
if let Some( ref package ) = self.package {
command.arg( "--package" ).arg( package.as_str() );
}
match self.build_type {
BuildType::Debug => {},
BuildType::Release => {
command.arg( "--release" );
}
}
match self.build_target {
BuildTarget::Lib( _, _ ) if !should_build => {
command.arg( "--lib" );
},
BuildTarget::Bin( ref name, _ ) if !should_build => {
command.arg( "--bin" ).arg( name.as_str() );
},
BuildTarget::Lib( _, profile ) => {
command
.arg( "--profile" ).arg( profile_to_arg( profile ) )
.arg( "--lib" );
},
BuildTarget::Bin( ref name, profile ) => {
command
.arg( "--profile" ).arg( profile_to_arg( profile ) )
.arg( "--bin" ).arg( name.as_str() );
},
BuildTarget::ExampleBin( ref name ) => {
command.arg( "--example" ).arg( name.as_str() );
},
BuildTarget::IntegrationTest( ref name ) => {
command.arg( "--test" ).arg( name.as_str() );
},
BuildTarget::IntegrationBench( ref name ) => {
command.arg( "--bench" ).arg( name.as_str() );
}
}
if self.no_default_features {
command.arg( "--no-default-features" );
}
if self.enable_all_features {
command.arg( "--all-features" );
}
if !self.features.is_empty() {
command.arg( "--features" );
command.arg( &self.features.join( " " ) );
}
if self.is_verbose {
command.arg( "--verbose" );
}
command
}
pub fn check( &self ) -> CargoResult {
let status = self.launch_cargo( false ).map( |(status, _)| status );
CargoResult {
status,
artifacts: Vec::new()
}
}
pub fn build< F >( &self, mut postprocess: Option< F > ) -> CargoResult
where F: for <'a> FnMut( Vec< PathBuf > ) -> Vec< PathBuf >
{
let mut result = self.build_internal( &mut postprocess );
if result.is_ok() == false {
return result;
}
// HACK: For some reason when you install emscripten for the first time
// the first build is always a dud (it produces no artifacts), so we retry once.
let is_emscripten = self.triplet.as_ref().map( |triplet| {
triplet == "wasm32-unknown-emscripten" || triplet == "asmjs-unknown-emscripten"
}).unwrap_or( false );
if is_emscripten && self.build_target.is_executable() {
let no_js_generated = result
.artifacts()
.iter()
.find( |artifact| artifact.extension().map( |ext| ext == "js" ).unwrap_or( false ) )
.is_none();
if no_js_generated {
debug!( "No artifacts were generated yet build succeeded; retrying..." );
result = self.build_internal( &mut postprocess );
}
}
return result;
}
fn launch_cargo( &self, should_build: bool ) -> Option< (i32, Vec< cargo_output::Artifact >) > {
let mut command = self.as_command( should_build );
let env_paths = env::var_os( "PATH" )
.map( |paths| env::split_paths( &paths ).collect() )
.unwrap_or( Vec::new() );
let mut paths = Vec::new();
paths.extend( self.extra_paths.clone().into_iter() );
paths.extend( env_paths.into_iter() );
let new_paths = env::join_paths( paths ).unwrap();
debug!( "Will launch cargo with PATH: {:?}", new_paths );
command.env( "PATH", new_paths );
let mut rustflags = OsString::new();
for flag in &self.extra_rustflags {
if !rustflags.is_empty() {
rustflags.push( " " );
}
rustflags.push( flag );
}
if let Some( env_rustflags ) = env::var_os( "RUSTFLAGS" ) {
if !rustflags.is_empty() {
rustflags.push( " " );
}
rustflags.push( env_rustflags );
}
debug!( "Will launch cargo with RUSTFLAGS: {:?}", rustflags );
command.env( "RUSTFLAGS", rustflags );
for &(ref key, ref value) in &self.extra_environment {
debug!( "Will launch cargo with variable \"{}\" set to \"{}\"", key, value );
command.env( key, value );
}
command.stdout( Stdio::piped() );
command.stderr( Stdio::piped() );
debug!( "Launching cargo: {:?}", command );
let mut child = match command.spawn() {
Ok( child ) => child,
Err( _ ) => return None
};
let stderr = BufReader::new( child.stderr.take().unwrap() );
let stdout = BufReader::new( child.stdout.take().unwrap() );
let is_verbose = self.is_verbose;
thread::spawn( move || {
let mut skip = 0;
for line in stderr.lines() {
let line = match line {
Ok( line ) => line,
Err( _ ) => break
};
if skip > 0 {
skip -= 1;
continue;
}
// This is really ugly, so let's skip it.
if line.trim() == "Caused by:" && !is_verbose {
skip += 1;
continue;
}
eprintln!( "{}", line );
}
});
let mut artifacts = Vec::new();
for line in stdout.lines() {
let line = match line {
Ok( line ) => line,
Err( _ ) => break
};
let line = line.trim();
if line.is_empty() {
continue;
}
let json: serde_json::Value = serde_json::from_str( &line ).expect( "failed to parse cargo output" );
let line = serde_json::to_string_pretty( &json ).unwrap();
if let Some( output ) = CargoOutput::parse( &line ) {
match output {
CargoOutput::Message( message ) => {
match self.message_format {
MessageFormat::Human => diagnostic_formatter::print( self.use_color, &message ),
MessageFormat::Json => {
println!( "{}", serde_json::to_string( &message.to_json_value() ).unwrap() );
}
}
},
CargoOutput::Artifact( artifact ) => {
for filename in &artifact.filenames {
debug!( "Built artifact: {}", filename );
}
artifacts.push( artifact );
},
CargoOutput::BuildScriptExecuted( executed ) => {
match self.message_format {
MessageFormat::Human => {},
MessageFormat::Json => {
println!( "{}", serde_json::to_string( &executed.to_json_value() ).unwrap() );
}
}
}
}
}
}
let result = child.wait();
let status = result.unwrap().code().expect( "failed to grab cargo status code" );
debug!( "Cargo finished with status: {}", status );
Some( (status, artifacts) )
}
fn build_internal< F >( &self, postprocess: &mut Option< F > ) -> CargoResult
where F: for <'a> FnMut( Vec< PathBuf > ) -> Vec< PathBuf >
{
let (status, mut artifacts) = match self.launch_cargo( true ) {
Some( result ) => result,
None => {
return CargoResult {
status: None,
artifacts: Vec::new()
}
}
};
fn has_extension< P: AsRef< Path > >( path: P, extension: &str ) -> bool {
path.as_ref().extension().map( |ext| ext == extension ).unwrap_or( false )
}
fn find_artifact( artifacts: &[cargo_output::Artifact], extension: &str ) -> Option< (usize, usize) > {
artifacts.iter().enumerate().filter_map( |(artifact_index, artifact)| {
if let Some( filename_index ) = artifact.filenames.iter().position( |filename| has_extension( filename, extension ) ) {
Some( (artifact_index, filename_index) )
} else {
None
}
}).next()
}
// For some reason when building tests cargo doesn't treat
// the `.wasm` file as an artifact.
if status == 0 && self.triplet.as_ref().map( |triplet| triplet == "wasm32-unknown-emscripten" ).unwrap_or( false ) {
match self.build_target {
BuildTarget::Bin( _, Profile::Test ) |
BuildTarget::Lib( _, Profile::Test ) |
BuildTarget::IntegrationTest( _ ) => {
if find_artifact( &artifacts, "wasm" ).is_none() {
if let Some( (artifact_index, filename_index) ) = find_artifact( &artifacts, "js" ) {
let wasm_path = {
let main_artifact = Path::new( &artifacts[ artifact_index ].filenames[ filename_index ] );
let filename = main_artifact.file_name().unwrap();
main_artifact.parent().unwrap().join( "deps" ).join( filename ).with_extension( "wasm" )
};
assert!( wasm_path.exists(), "internal error: wasm doesn't exist where I expected it to be" );
artifacts[ artifact_index ].filenames.push( wasm_path.to_str().unwrap().to_owned() );
debug!( "Found `.wasm` test artifact: {:?}", wasm_path );
}
}
},
_ => {}
}
}
let mut artifact_paths = Vec::new();
for mut artifact in &mut artifacts {
if let Some( ref mut callback ) = postprocess.as_mut() {
let filenames = artifact.filenames.iter().map( |filename| Path::new( &filename ).to_owned() ).collect();
let filenames = callback( filenames );
artifact.filenames = filenames.into_iter().map( |filename| filename.to_str().unwrap().to_owned() ).collect();
}
}
for mut artifact in artifacts {
if artifact.filenames.is_empty() {
continue;
}
match self.message_format {
MessageFormat::Human => {},
MessageFormat::Json => {
println!( "{}", serde_json::to_string( &artifact.to_json_value() ).unwrap() );
}
}
for filename in artifact.filenames {
// NOTE: Since we extract the paths from the JSON
// we get a list of artifacts as `String`s instead of `PathBuf`s.
artifact_paths.push( filename.into() )
}
}
CargoResult {
status: Some( status ),
artifacts: artifact_paths
}
}
}
pub struct CargoResult {
status: Option< i32 >,
artifacts: Vec< PathBuf >
}
impl CargoResult {
pub fn is_ok( &self ) -> bool {
self.status == Some( 0 )
}
pub fn artifacts( &self ) -> &[PathBuf] {
&self.artifacts
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::DocumentBinding;
use dom::bindings::codegen::Bindings::DocumentBinding::{DocumentMethods, DocumentReadyState};
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::EventTargetBinding::EventTargetMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::NodeFilterBinding::NodeFilter;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::{DocumentDerived, EventCast, HTMLElementCast};
use dom::bindings::codegen::InheritTypes::{HTMLHeadElementCast, TextCast, ElementCast};
use dom::bindings::codegen::InheritTypes::{DocumentTypeCast, HTMLHtmlElementCast, NodeCast};
use dom::bindings::codegen::InheritTypes::{EventTargetCast, HTMLAnchorElementCast};
use dom::bindings::codegen::InheritTypes::{HTMLAnchorElementDerived, HTMLAppletElementDerived};
use dom::bindings::codegen::InheritTypes::{HTMLAreaElementDerived, HTMLEmbedElementDerived};
use dom::bindings::codegen::InheritTypes::{HTMLFormElementDerived, HTMLImageElementDerived};
use dom::bindings::codegen::InheritTypes::{HTMLScriptElementDerived};
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::error::Error::{NotSupported, InvalidCharacter, Security};
use dom::bindings::error::Error::{HierarchyRequest, NamespaceError};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JS, JSRef, LayoutJS, Temporary, TemporaryPushable};
use dom::bindings::js::{OptionalRootable, RootedReference};
use dom::bindings::refcounted::Trusted;
use dom::bindings::utils::reflect_dom_object;
use dom::bindings::utils::xml_name_type;
use dom::bindings::utils::XMLName::{QName, Name, InvalidXMLName};
use dom::comment::Comment;
use dom::customevent::CustomEvent;
use dom::documentfragment::DocumentFragment;
use dom::documenttype::DocumentType;
use dom::domimplementation::DOMImplementation;
use dom::element::{Element, ElementCreator, AttributeHandlers, get_attribute_parts};
use dom::element::{ElementTypeId, ActivationElementHelpers};
use dom::event::{Event, EventBubbles, EventCancelable, EventHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId, EventTargetHelpers};
use dom::htmlanchorelement::HTMLAnchorElement;
use dom::htmlcollection::{HTMLCollection, CollectionFilter};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::htmlheadelement::HTMLHeadElement;
use dom::htmlhtmlelement::HTMLHtmlElement;
use dom::htmltitleelement::HTMLTitleElement;
use dom::location::Location;
use dom::mouseevent::MouseEvent;
use dom::keyboardevent::KeyboardEvent;
use dom::messageevent::MessageEvent;
use dom::node::{self, Node, NodeHelpers, NodeTypeId, CloneChildrenFlag, NodeDamage, window_from_node};
use dom::nodelist::NodeList;
use dom::text::Text;
use dom::processinginstruction::ProcessingInstruction;
use dom::range::Range;
use dom::treewalker::TreeWalker;
use dom::uievent::UIEvent;
use dom::window::{Window, WindowHelpers};
use net::resource_task::ControlMsg::{SetCookiesForUrl, GetCookiesForUrl};
use net::cookie_storage::CookieSource::NonHTTP;
use script_task::Runnable;
use util::namespace;
use util::str::{DOMString, split_html_space_chars};
use layout_interface::{ReflowGoal, ReflowQueryType};
use geom::point::Point2D;
use html5ever::tree_builder::{QuirksMode, NoQuirks, LimitedQuirks, Quirks};
use layout_interface::{LayoutChan, Msg};
use string_cache::{Atom, QualName};
use url::Url;
use js::jsapi::JSRuntime;
use std::borrow::ToOwned;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::ascii::AsciiExt;
use std::cell::{Cell, Ref};
use std::default::Default;
use std::sync::mpsc::channel;
use time;
#[derive(PartialEq)]
#[jstraceable]
pub enum IsHTMLDocument {
HTMLDocument,
NonHTMLDocument,
}
#[dom_struct]
pub struct Document {
node: Node,
window: JS<Window>,
idmap: DOMRefCell<HashMap<Atom, Vec<JS<Element>>>>,
implementation: MutNullableJS<DOMImplementation>,
location: MutNullableJS<Location>,
content_type: DOMString,
last_modified: DOMRefCell<Option<DOMString>>,
encoding_name: DOMRefCell<DOMString>,
is_html_document: bool,
url: Url,
quirks_mode: Cell<QuirksMode>,
images: MutNullableJS<HTMLCollection>,
embeds: MutNullableJS<HTMLCollection>,
links: MutNullableJS<HTMLCollection>,
forms: MutNullableJS<HTMLCollection>,
scripts: MutNullableJS<HTMLCollection>,
anchors: MutNullableJS<HTMLCollection>,
applets: MutNullableJS<HTMLCollection>,
ready_state: Cell<DocumentReadyState>,
/// The element that has most recently requested focus for itself.
possibly_focused: MutNullableJS<Element>,
/// The element that currently has the document focus context.
focused: MutNullableJS<Element>,
}
impl DocumentDerived for EventTarget {
fn is_document(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Document)
}
}
#[jstraceable]
struct ImagesFilter;
impl CollectionFilter for ImagesFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlimageelement()
}
}
#[jstraceable]
struct EmbedsFilter;
impl CollectionFilter for EmbedsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlembedelement()
}
}
#[jstraceable]
struct LinksFilter;
impl CollectionFilter for LinksFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
(elem.is_htmlanchorelement() || elem.is_htmlareaelement()) &&
elem.has_attribute(&atom!("href"))
}
}
#[jstraceable]
struct FormsFilter;
impl CollectionFilter for FormsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlformelement()
}
}
#[jstraceable]
struct ScriptsFilter;
impl CollectionFilter for ScriptsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlscriptelement()
}
}
#[jstraceable]
struct AnchorsFilter;
impl CollectionFilter for AnchorsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlanchorelement() && elem.has_attribute(&atom!("href"))
}
}
#[jstraceable]
struct AppletsFilter;
impl CollectionFilter for AppletsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlappletelement()
}
}
pub trait DocumentHelpers<'a> {
fn window(self) -> Temporary<Window>;
fn encoding_name(self) -> Ref<'a, DOMString>;
fn is_html_document(self) -> bool;
fn url(self) -> Url;
fn quirks_mode(self) -> QuirksMode;
fn set_quirks_mode(self, mode: QuirksMode);
fn set_last_modified(self, value: DOMString);
fn set_encoding_name(self, name: DOMString);
fn content_changed(self, node: JSRef<Node>, damage: NodeDamage);
fn content_and_heritage_changed(self, node: JSRef<Node>, damage: NodeDamage);
fn unregister_named_element(self, to_unregister: JSRef<Element>, id: Atom);
fn register_named_element(self, element: JSRef<Element>, id: Atom);
fn load_anchor_href(self, href: DOMString);
fn find_fragment_node(self, fragid: DOMString) -> Option<Temporary<Element>>;
fn set_ready_state(self, state: DocumentReadyState);
fn get_focused_element(self) -> Option<Temporary<Element>>;
fn begin_focus_transaction(self);
fn request_focus(self, elem: JSRef<Element>);
fn commit_focus_transaction(self);
fn send_title_to_compositor(self);
fn dirty_all_nodes(self);
fn handle_click_event(self, js_runtime: *mut JSRuntime, _button: uint, point: Point2D<f32>);
}
impl<'a> DocumentHelpers<'a> for JSRef<'a, Document> {
#[inline]
fn window(self) -> Temporary<Window> {
Temporary::new(self.window)
}
#[inline]
fn encoding_name(self) -> Ref<'a, DOMString> {
self.extended_deref().encoding_name.borrow()
}
#[inline]
fn is_html_document(self) -> bool {
self.is_html_document
}
// http://dom.spec.whatwg.org/#dom-document-url
fn url(self) -> Url {
self.url.clone()
}
fn quirks_mode(self) -> QuirksMode {
self.quirks_mode.get()
}
fn set_quirks_mode(self, mode: QuirksMode) {
self.quirks_mode.set(mode);
match mode {
Quirks => {
let window = self.window.root();
let window = window.r();
let LayoutChan(ref layout_chan) = window.page().layout_chan;
layout_chan.send(Msg::SetQuirksMode).unwrap();
}
NoQuirks | LimitedQuirks => {}
}
}
fn set_last_modified(self, value: DOMString) {
*self.last_modified.borrow_mut() = Some(value);
}
fn set_encoding_name(self, name: DOMString) {
*self.encoding_name.borrow_mut() = name;
}
fn content_changed(self, node: JSRef<Node>, damage: NodeDamage) {
node.dirty(damage);
}
fn content_and_heritage_changed(self, node: JSRef<Node>, damage: NodeDamage) {
debug!("content_and_heritage_changed on {}", node.debug_str());
node.force_dirty_ancestors(damage);
node.dirty(damage);
}
/// Remove any existing association between the provided id and any elements in this document.
fn unregister_named_element(self,
to_unregister: JSRef<Element>,
id: Atom) {
let mut idmap = self.idmap.borrow_mut();
let is_empty = match idmap.get_mut(&id) {
None => false,
Some(elements) => {
let position = elements.iter()
.map(|elem| elem.root())
.position(|element| element.r() == to_unregister)
.expect("This element should be in registered.");
elements.remove(position);
elements.is_empty()
}
};
if is_empty {
idmap.remove(&id);
}
}
/// Associate an element present in this document with the provided id.
fn register_named_element(self,
element: JSRef<Element>,
id: Atom) {
assert!({
let node: JSRef<Node> = NodeCast::from_ref(element);
node.is_in_doc()
});
assert!(!id.as_slice().is_empty());
let mut idmap = self.idmap.borrow_mut();
let root = self.GetDocumentElement().expect("The element is in the document, so there must be a document element.").root();
match idmap.entry(id) {
Vacant(entry) => {
entry.insert(vec!(element.unrooted()));
}
Occupied(entry) => {
let elements = entry.into_mut();
let new_node: JSRef<Node> = NodeCast::from_ref(element);
let mut head: uint = 0u;
let root: JSRef<Node> = NodeCast::from_ref(root.r());
for node in root.traverse_preorder() {
let elem: Option<JSRef<Element>> = ElementCast::to_ref(node);
match elem {
None => {},
Some(elem) => {
if (*elements)[head].root().r() == elem {
head += 1;
}
if new_node == node || head == elements.len() {
break;
}
}
}
}
elements.insert_unrooted(head, &element);
}
}
}
fn load_anchor_href(self, href: DOMString) {
let window = self.window.root();
window.r().load_url(href);
}
/// Attempt to find a named element in this page's document.
/// https://html.spec.whatwg.org/multipage/#the-indicated-part-of-the-document
fn find_fragment_node(self, fragid: DOMString) -> Option<Temporary<Element>> {
self.GetElementById(fragid.clone()).or_else(|| {
let check_anchor = |&:&node: &JSRef<HTMLAnchorElement>| {
let elem: JSRef<Element> = ElementCast::from_ref(node);
elem.get_attribute(ns!(""), &atom!("name")).root().map_or(false, |attr| {
attr.r().value().as_slice() == fragid.as_slice()
})
};
let doc_node: JSRef<Node> = NodeCast::from_ref(self);
doc_node.traverse_preorder()
.filter_map(|node| HTMLAnchorElementCast::to_ref(node))
.find(check_anchor)
.map(|node| Temporary::from_rooted(ElementCast::from_ref(node)))
})
}
// https://html.spec.whatwg.org/multipage/dom.html#current-document-readiness
fn set_ready_state(self, state: DocumentReadyState) {
self.ready_state.set(state);
let window = self.window.root();
let event = Event::new(GlobalRef::Window(window.r()), "readystatechange".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(self);
let _ = event.r().fire(target);
}
/// Return the element that currently has focus.
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#events-focusevent-doc-focus
fn get_focused_element(self) -> Option<Temporary<Element>> {
self.focused.get()
}
/// Initiate a new round of checking for elements requesting focus. The last element to call
/// `request_focus` before `commit_focus_transaction` is called will receive focus.
fn begin_focus_transaction(self) {
self.possibly_focused.clear();
}
/// Request that the given element receive focus once the current transaction is complete.
fn request_focus(self, elem: JSRef<Element>) {
self.possibly_focused.assign(Some(elem))
}
/// Reassign the focus context to the element that last requested focus during this
/// transaction, or none if no elements requested it.
fn commit_focus_transaction(self) {
//TODO: dispatch blur, focus, focusout, and focusin events
self.focused.assign(self.possibly_focused.get());
}
/// Sends this document's title to the compositor.
fn send_title_to_compositor(self) {
let window = self.window().root();
window.r().page().send_title_to_compositor();
}
fn dirty_all_nodes(self) {
let root: JSRef<Node> = NodeCast::from_ref(self);
for node in root.traverse_preorder() {
node.dirty(NodeDamage::OtherNodeDamage)
}
}
fn handle_click_event(self, js_runtime: *mut JSRuntime, _button: uint, point: Point2D<f32>) {
debug!("ClickEvent: clicked at {:?}", point);
let window = self.window.root();
let window = window.r();
let page = window.page();
let node = match page.hit_test(&point) {
Some(node_address) => {
debug!("node address is {:?}", node_address.0);
node::from_untrusted_node_address(js_runtime, node_address)
},
None => return,
}.root();
let el = match ElementCast::to_ref(node.r()) {
Some(el) => el,
None => {
let ancestor = node.r()
.ancestors()
.filter_map(ElementCast::to_ref)
.next();
match ancestor {
Some(ancestor) => ancestor,
None => return,
}
},
};
let node: JSRef<Node> = NodeCast::from_ref(el);
debug!("clicked on {:?}", node.debug_str());
// Prevent click event if form control element is disabled.
if node.click_event_filter_by_disabled_state() {
return;
}
match *page.frame() {
Some(ref frame) => {
let window = frame.window.root();
let doc = window.r().Document().root();
doc.r().begin_focus_transaction();
let event = Event::new(GlobalRef::Window(window.r()),
"click".to_owned(),
EventBubbles::Bubbles,
EventCancelable::Cancelable).root();
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#trusted-events
event.r().set_trusted(true);
// https://html.spec.whatwg.org/multipage/interaction.html#run-authentic-click-activation-steps
el.authentic_click_activation(event.r());
doc.r().commit_focus_transaction();
window.r().flush_layout(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery);
}
None => {}
}
}
}
#[derive(PartialEq)]
pub enum DocumentSource {
FromParser,
NotFromParser,
}
pub trait LayoutDocumentHelpers {
unsafe fn is_html_document_for_layout(&self) -> bool;
}
impl LayoutDocumentHelpers for LayoutJS<Document> {
#[allow(unrooted_must_root)]
#[inline]
unsafe fn is_html_document_for_layout(&self) -> bool {
(*self.unsafe_get()).is_html_document
}
}
impl Document {
fn new_inherited(window: JSRef<Window>,
url: Option<Url>,
is_html_document: IsHTMLDocument,
content_type: Option<DOMString>,
source: DocumentSource) -> Document {
let url = url.unwrap_or_else(|| Url::parse("about:blank").unwrap());
let ready_state = if source == DocumentSource::FromParser {
DocumentReadyState::Loading
} else {
DocumentReadyState::Complete
};
Document {
node: Node::new_without_doc(NodeTypeId::Document),
window: JS::from_rooted(window),
idmap: DOMRefCell::new(HashMap::new()),
implementation: Default::default(),
location: Default::default(),
content_type: match content_type {
Some(string) => string.clone(),
None => match is_html_document {
// http://dom.spec.whatwg.org/#dom-domimplementation-createhtmldocument
IsHTMLDocument::HTMLDocument => "text/html".to_owned(),
// http://dom.spec.whatwg.org/#concept-document-content-type
IsHTMLDocument::NonHTMLDocument => "application/xml".to_owned()
}
},
last_modified: DOMRefCell::new(None),
url: url,
// http://dom.spec.whatwg.org/#concept-document-quirks
quirks_mode: Cell::new(NoQuirks),
// http://dom.spec.whatwg.org/#concept-document-encoding
encoding_name: DOMRefCell::new("UTF-8".to_owned()),
is_html_document: is_html_document == IsHTMLDocument::HTMLDocument,
images: Default::default(),
embeds: Default::default(),
links: Default::default(),
forms: Default::default(),
scripts: Default::default(),
anchors: Default::default(),
applets: Default::default(),
ready_state: Cell::new(ready_state),
possibly_focused: Default::default(),
focused: Default::default(),
}
}
// http://dom.spec.whatwg.org/#dom-document
pub fn Constructor(global: GlobalRef) -> Fallible<Temporary<Document>> {
Ok(Document::new(global.as_window(), None,
IsHTMLDocument::NonHTMLDocument, None,
DocumentSource::NotFromParser))
}
pub fn new(window: JSRef<Window>,
url: Option<Url>,
doctype: IsHTMLDocument,
content_type: Option<DOMString>,
source: DocumentSource) -> Temporary<Document> {
let document = reflect_dom_object(box Document::new_inherited(window, url, doctype,
content_type, source),
GlobalRef::Window(window),
DocumentBinding::Wrap).root();
let node: JSRef<Node> = NodeCast::from_ref(document.r());
node.set_owner_doc(document.r());
Temporary::from_rooted(document.r())
}
}
trait PrivateDocumentHelpers {
fn createNodeList<F: Fn(JSRef<Node>) -> bool>(self, callback: F) -> Temporary<NodeList>;
fn get_html_element(self) -> Option<Temporary<HTMLHtmlElement>>;
}
impl<'a> PrivateDocumentHelpers for JSRef<'a, Document> {
fn createNodeList<F: Fn(JSRef<Node>) -> bool>(self, callback: F) -> Temporary<NodeList> {
let window = self.window.root();
let document_element = self.GetDocumentElement().root();
let nodes = match document_element {
None => vec!(),
Some(ref root) => {
let root: JSRef<Node> = NodeCast::from_ref(root.r());
root.traverse_preorder().filter(|&node| callback(node)).collect()
}
};
NodeList::new_simple_list(window.r(), nodes)
}
fn get_html_element(self) -> Option<Temporary<HTMLHtmlElement>> {
self.GetDocumentElement()
.root()
.r()
.and_then(HTMLHtmlElementCast::to_ref)
.map(Temporary::from_rooted)
}
}
trait PrivateClickEventHelpers {
fn click_event_filter_by_disabled_state(&self) -> bool;
}
impl<'a> PrivateClickEventHelpers for JSRef<'a, Node> {
fn click_event_filter_by_disabled_state(&self) -> bool {
match self.type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLButtonElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLInputElement)) |
// NodeTypeId::Element(ElementTypeId::HTMLKeygenElement) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLOptionElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSelectElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTextAreaElement)) if self.get_disabled_state() => true,
_ => false
}
}
}
impl<'a> DocumentMethods for JSRef<'a, Document> {
// http://dom.spec.whatwg.org/#dom-document-implementation
fn Implementation(self) -> Temporary<DOMImplementation> {
self.implementation.or_init(|| DOMImplementation::new(self))
}
// http://dom.spec.whatwg.org/#dom-document-url
fn URL(self) -> DOMString {
self.url().serialize()
}
// http://dom.spec.whatwg.org/#dom-document-documenturi
fn DocumentURI(self) -> DOMString {
self.URL()
}
// http://dom.spec.whatwg.org/#dom-document-compatmode
fn CompatMode(self) -> DOMString {
match self.quirks_mode.get() {
LimitedQuirks | NoQuirks => "CSS1Compat".to_owned(),
Quirks => "BackCompat".to_owned()
}
}
// http://dom.spec.whatwg.org/#dom-document-characterset
fn CharacterSet(self) -> DOMString {
self.encoding_name.borrow().clone()
}
// http://dom.spec.whatwg.org/#dom-document-inputencoding
fn InputEncoding(self) -> DOMString {
self.encoding_name.borrow().clone()
}
// http://dom.spec.whatwg.org/#dom-document-content_type
fn ContentType(self) -> DOMString {
self.content_type.clone()
}
// http://dom.spec.whatwg.org/#dom-document-doctype
fn GetDoctype(self) -> Option<Temporary<DocumentType>> {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.children()
.filter_map(DocumentTypeCast::to_ref)
.next()
.map(Temporary::from_rooted)
}
// http://dom.spec.whatwg.org/#dom-document-documentelement
fn GetDocumentElement(self) -> Option<Temporary<Element>> {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.child_elements().next().map(Temporary::from_rooted)
}
// http://dom.spec.whatwg.org/#dom-document-getelementsbytagname
fn GetElementsByTagName(self, tag_name: DOMString) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::by_tag_name(window.r(), NodeCast::from_ref(self), tag_name)
}
// http://dom.spec.whatwg.org/#dom-document-getelementsbytagnamens
fn GetElementsByTagNameNS(self, maybe_ns: Option<DOMString>, tag_name: DOMString) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::by_tag_name_ns(window.r(), NodeCast::from_ref(self), tag_name, maybe_ns)
}
// http://dom.spec.whatwg.org/#dom-document-getelementsbyclassname
fn GetElementsByClassName(self, classes: DOMString) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::by_class_name(window.r(), NodeCast::from_ref(self), classes)
}
// http://dom.spec.whatwg.org/#dom-nonelementparentnode-getelementbyid
fn GetElementById(self, id: DOMString) -> Option<Temporary<Element>> {
let id = Atom::from_slice(id.as_slice());
match self.idmap.borrow().get(&id) {
None => None,
Some(ref elements) => Some(Temporary::new((*elements)[0].clone())),
}
}
// http://dom.spec.whatwg.org/#dom-document-createelement
fn CreateElement(self, local_name: DOMString) -> Fallible<Temporary<Element>> {
if xml_name_type(local_name.as_slice()) == InvalidXMLName {
debug!("Not a valid element name");
return Err(InvalidCharacter);
}
let local_name = if self.is_html_document {
local_name.as_slice().to_ascii_lowercase()
} else {
local_name
};
let name = QualName::new(ns!(HTML), Atom::from_slice(local_name.as_slice()));
Ok(Element::create(name, None, self, ElementCreator::ScriptCreated))
}
// http://dom.spec.whatwg.org/#dom-document-createelementns
fn CreateElementNS(self,
namespace: Option<DOMString>,
qualified_name: DOMString) -> Fallible<Temporary<Element>> {
let ns = namespace::from_domstring(namespace);
match xml_name_type(qualified_name.as_slice()) {
InvalidXMLName => {
debug!("Not a valid element name");
return Err(InvalidCharacter);
},
Name => {
debug!("Not a valid qualified element name");
return Err(NamespaceError);
},
QName => {}
}
let (prefix_from_qname, local_name_from_qname)
= get_attribute_parts(qualified_name.as_slice());
match (&ns, prefix_from_qname, local_name_from_qname) {
// throw if prefix is not null and namespace is null
(&ns!(""), Some(_), _) => {
debug!("Namespace can't be null with a non-null prefix");
return Err(NamespaceError);
},
// throw if prefix is "xml" and namespace is not the XML namespace
(_, Some(ref prefix), _) if "xml" == prefix.as_slice() && ns != ns!(XML) => {
debug!("Namespace must be the xml namespace if the prefix is 'xml'");
return Err(NamespaceError);
},
// throw if namespace is the XMLNS namespace and neither qualifiedName nor prefix is "xmlns"
(&ns!(XMLNS), Some(ref prefix), _) if "xmlns" == prefix.as_slice() => {},
(&ns!(XMLNS), _, "xmlns") => {},
(&ns!(XMLNS), _, _) => {
debug!("The prefix or the qualified name must be 'xmlns' if namespace is the XMLNS namespace ");
return Err(NamespaceError);
},
_ => {}
}
let name = QualName::new(ns, Atom::from_slice(local_name_from_qname));
Ok(Element::create(name, prefix_from_qname.map(|s| s.to_owned()), self,
ElementCreator::ScriptCreated))
}
// http://dom.spec.whatwg.org/#dom-document-createattribute
fn CreateAttribute(self, local_name: DOMString) -> Fallible<Temporary<Attr>> {
if xml_name_type(local_name.as_slice()) == InvalidXMLName {
debug!("Not a valid element name");
return Err(InvalidCharacter);
}
let window = self.window.root();
let name = Atom::from_slice(local_name.as_slice());
// repetition used because string_cache::atom::Atom is non-copyable
let l_name = Atom::from_slice(local_name.as_slice());
let value = AttrValue::String("".to_owned());
Ok(Attr::new(window.r(), name, value, l_name, ns!(""), None, None))
}
// http://dom.spec.whatwg.org/#dom-document-createdocumentfragment
fn CreateDocumentFragment(self) -> Temporary<DocumentFragment> {
DocumentFragment::new(self)
}
// http://dom.spec.whatwg.org/#dom-document-createtextnode
fn CreateTextNode(self, data: DOMString)
-> Temporary<Text> {
Text::new(data, self)
}
// http://dom.spec.whatwg.org/#dom-document-createcomment
fn CreateComment(self, data: DOMString) -> Temporary<Comment> {
Comment::new(data, self)
}
// http://dom.spec.whatwg.org/#dom-document-createprocessinginstruction
fn CreateProcessingInstruction(self, target: DOMString,
data: DOMString) -> Fallible<Temporary<ProcessingInstruction>> {
// Step 1.
if xml_name_type(target.as_slice()) == InvalidXMLName {
return Err(InvalidCharacter);
}
// Step 2.
if data.as_slice().contains("?>") {
return Err(InvalidCharacter);
}
// Step 3.
Ok(ProcessingInstruction::new(target, data, self))
}
// http://dom.spec.whatwg.org/#dom-document-importnode
fn ImportNode(self, node: JSRef<Node>, deep: bool) -> Fallible<Temporary<Node>> {
// Step 1.
if node.is_document() {
return Err(NotSupported);
}
// Step 2.
let clone_children = match deep {
true => CloneChildrenFlag::CloneChildren,
false => CloneChildrenFlag::DoNotCloneChildren
};
Ok(Node::clone(node, Some(self), clone_children))
}
// http://dom.spec.whatwg.org/#dom-document-adoptnode
fn AdoptNode(self, node: JSRef<Node>) -> Fallible<Temporary<Node>> {
// Step 1.
if node.is_document() {
return Err(NotSupported);
}
// Step 2.
Node::adopt(node, self);
// Step 3.
Ok(Temporary::from_rooted(node))
}
// http://dom.spec.whatwg.org/#dom-document-createevent
fn CreateEvent(self, interface: DOMString) -> Fallible<Temporary<Event>> {
let window = self.window.root();
match interface.as_slice().to_ascii_lowercase().as_slice() {
"uievents" | "uievent" => Ok(EventCast::from_temporary(
UIEvent::new_uninitialized(window.r()))),
"mouseevents" | "mouseevent" => Ok(EventCast::from_temporary(
MouseEvent::new_uninitialized(window.r()))),
"customevent" => Ok(EventCast::from_temporary(
CustomEvent::new_uninitialized(GlobalRef::Window(window.r())))),
"htmlevents" | "events" | "event" => Ok(Event::new_uninitialized(
GlobalRef::Window(window.r()))),
"keyboardevent" | "keyevents" => Ok(EventCast::from_temporary(
KeyboardEvent::new_uninitialized(window.r()))),
"messageevent" => Ok(EventCast::from_temporary(
MessageEvent::new_uninitialized(GlobalRef::Window(window.r())))),
_ => Err(NotSupported)
}
}
// http://www.whatwg.org/html/#dom-document-lastmodified
fn LastModified(self) -> DOMString {
match *self.last_modified.borrow() {
Some(ref t) => t.clone(),
None => format!("{}", time::now().strftime("%m/%d/%Y %H:%M:%S").unwrap()),
}
}
// http://dom.spec.whatwg.org/#dom-document-createrange
fn CreateRange(self) -> Temporary<Range> {
Range::new(self)
}
// http://dom.spec.whatwg.org/#dom-document-createtreewalker
fn CreateTreeWalker(self, root: JSRef<Node>, whatToShow: u32, filter: Option<NodeFilter>)
-> Temporary<TreeWalker> {
TreeWalker::new(self, root, whatToShow, filter)
}
// http://www.whatwg.org/specs/web-apps/current-work/#document.title
fn Title(self) -> DOMString {
let mut title = String::new();
self.GetDocumentElement().root().map(|root| {
let root: JSRef<Node> = NodeCast::from_ref(root.r());
root.traverse_preorder()
.find(|node| node.type_id() == NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
.map(|title_elem| {
let children = title_elem.children().filter_map(|n| {
let t: Option<JSRef<Text>> = TextCast::to_ref(n);
t
});
for text in children {
title.push_str(text.characterdata().data().as_slice());
}
});
});
let v: Vec<&str> = split_html_space_chars(title.as_slice()).collect();
v.connect(" ")
}
// http://www.whatwg.org/specs/web-apps/current-work/#document.title
fn SetTitle(self, title: DOMString) -> ErrorResult {
self.GetDocumentElement().root().map(|root| {
let root: JSRef<Node> = NodeCast::from_ref(root.r());
let head_node = root.traverse_preorder().find(|child| {
child.type_id() == NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLHeadElement))
});
head_node.map(|head| {
let title_node = head.children().find(|child| {
child.type_id() == NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement))
});
match title_node {
Some(ref title_node) => {
for title_child in title_node.children() {
assert!(title_node.RemoveChild(title_child).is_ok());
}
if !title.is_empty() {
let new_text = self.CreateTextNode(title.clone()).root();
assert!(title_node.AppendChild(NodeCast::from_ref(new_text.r())).is_ok());
}
},
None => {
let new_title = HTMLTitleElement::new("title".to_owned(), None, self).root();
let new_title: JSRef<Node> = NodeCast::from_ref(new_title.r());
if !title.is_empty() {
let new_text = self.CreateTextNode(title.clone()).root();
assert!(new_title.AppendChild(NodeCast::from_ref(new_text.r())).is_ok());
}
assert!(head.AppendChild(new_title).is_ok());
},
}
});
});
Ok(())
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-head
fn GetHead(self) -> Option<Temporary<HTMLHeadElement>> {
self.get_html_element().and_then(|root| {
let root = root.root();
let node: JSRef<Node> = NodeCast::from_ref(root.r());
node.children().filter_map(HTMLHeadElementCast::to_ref).next().map(Temporary::from_rooted)
})
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-body
fn GetBody(self) -> Option<Temporary<HTMLElement>> {
self.get_html_element().and_then(|root| {
let root = root.root();
let node: JSRef<Node> = NodeCast::from_ref(root.r());
node.children().find(|child| {
match child.type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBodyElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFrameSetElement)) => true,
_ => false
}
}).map(|node| {
Temporary::from_rooted(HTMLElementCast::to_ref(node).unwrap())
})
})
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-body
fn SetBody(self, new_body: Option<JSRef<HTMLElement>>) -> ErrorResult {
// Step 1.
let new_body = match new_body {
Some(new_body) => new_body,
None => return Err(HierarchyRequest),
};
let node: JSRef<Node> = NodeCast::from_ref(new_body);
match node.type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBodyElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFrameSetElement)) => {}
_ => return Err(HierarchyRequest)
}
// Step 2.
let old_body = self.GetBody().root();
if old_body.as_ref().map(|body| body.r()) == Some(new_body) {
return Ok(());
}
// Step 3.
match self.get_html_element().root() {
// Step 4.
None => return Err(HierarchyRequest),
Some(ref root) => {
let new_body: JSRef<Node> = NodeCast::from_ref(new_body);
let root: JSRef<Node> = NodeCast::from_ref(root.r());
match old_body {
Some(ref child) => {
let child: JSRef<Node> = NodeCast::from_ref(child.r());
assert!(root.ReplaceChild(new_body, child).is_ok())
}
None => assert!(root.AppendChild(new_body).is_ok())
};
}
}
Ok(())
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-getelementsbyname
fn GetElementsByName(self, name: DOMString) -> Temporary<NodeList> {
self.createNodeList(|node| {
let element: JSRef<Element> = match ElementCast::to_ref(node) {
Some(element) => element,
None => return false,
};
element.get_attribute(ns!(""), &atom!("name")).root().map_or(false, |attr| {
attr.r().value().as_slice() == name.as_slice()
})
})
}
fn Images(self) -> Temporary<HTMLCollection> {
self.images.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box ImagesFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Embeds(self) -> Temporary<HTMLCollection> {
self.embeds.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box EmbedsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Plugins(self) -> Temporary<HTMLCollection> {
self.Embeds()
}
fn Links(self) -> Temporary<HTMLCollection> {
self.links.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box LinksFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Forms(self) -> Temporary<HTMLCollection> {
self.forms.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box FormsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Scripts(self) -> Temporary<HTMLCollection> {
self.scripts.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box ScriptsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Anchors(self) -> Temporary<HTMLCollection> {
self.anchors.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box AnchorsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Applets(self) -> Temporary<HTMLCollection> {
// FIXME: This should be return OBJECT elements containing applets.
self.applets.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box AppletsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Location(self) -> Temporary<Location> {
let window = self.window.root();
let window = window.r();
self.location.or_init(|| Location::new(window, window.page_clone()))
}
// http://dom.spec.whatwg.org/#dom-parentnode-children
fn Children(self) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::children(window.r(), NodeCast::from_ref(self))
}
// http://dom.spec.whatwg.org/#dom-parentnode-queryselector
fn QuerySelector(self, selectors: DOMString) -> Fallible<Option<Temporary<Element>>> {
let root: JSRef<Node> = NodeCast::from_ref(self);
root.query_selector(selectors)
}
// http://dom.spec.whatwg.org/#dom-parentnode-queryselectorall
fn QuerySelectorAll(self, selectors: DOMString) -> Fallible<Temporary<NodeList>> {
let root: JSRef<Node> = NodeCast::from_ref(self);
root.query_selector_all(selectors)
}
// https://html.spec.whatwg.org/multipage/dom.html#dom-document-readystate
fn ReadyState(self) -> DocumentReadyState {
self.ready_state.get()
}
// https://html.spec.whatwg.org/multipage/browsers.html#dom-document-defaultview
fn DefaultView(self) -> Temporary<Window> {
Temporary::new(self.window)
}
// https://html.spec.whatwg.org/multipage/dom.html#dom-document-cookie
fn GetCookie(self) -> Fallible<DOMString> {
//TODO: return empty string for cookie-averse Document
let url = self.url();
if !is_scheme_host_port_tuple(&url) {
return Err(Security);
}
let window = self.window.root();
let window = window.r();
let page = window.page();
let (tx, rx) = channel();
let _ = page.resource_task.send(GetCookiesForUrl(url, tx, NonHTTP));
let cookies = rx.recv().unwrap();
Ok(cookies.unwrap_or("".to_owned()))
}
// https://html.spec.whatwg.org/multipage/dom.html#dom-document-cookie
fn SetCookie(self, cookie: DOMString) -> ErrorResult {
//TODO: ignore for cookie-averse Document
let url = self.url();
if !is_scheme_host_port_tuple(&url) {
return Err(Security);
}
let window = self.window.root();
let window = window.r();
let page = window.page();
let _ = page.resource_task.send(SetCookiesForUrl(url, cookie, NonHTTP));
Ok(())
}
global_event_handlers!();
event_handler!(readystatechange, GetOnreadystatechange, SetOnreadystatechange);
}
fn is_scheme_host_port_tuple(url: &Url) -> bool {
url.host().is_some() && url.port_or_default().is_some()
}
pub enum DocumentProgressTask {
DOMContentLoaded,
Load,
}
pub struct DocumentProgressHandler {
addr: Trusted<Document>,
task: DocumentProgressTask,
}
impl DocumentProgressHandler {
pub fn new(addr: Trusted<Document>, task: DocumentProgressTask) -> DocumentProgressHandler {
DocumentProgressHandler {
addr: addr,
task: task,
}
}
fn dispatch_dom_content_loaded(&self) {
let document = self.addr.to_temporary().root();
let window = document.r().window().root();
let event = Event::new(GlobalRef::Window(window.r()), "DOMContentLoaded".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let doctarget: JSRef<EventTarget> = EventTargetCast::from_ref(document.r());
let _ = doctarget.DispatchEvent(event.r());
}
fn set_ready_state_complete(&self) {
let document = self.addr.to_temporary().root();
document.r().set_ready_state(DocumentReadyState::Complete);
}
fn dispatch_load(&self) {
let document = self.addr.to_temporary().root();
let window = document.r().window().root();
let event = Event::new(GlobalRef::Window(window.r()), "load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let wintarget: JSRef<EventTarget> = EventTargetCast::from_ref(window.r());
let doctarget: JSRef<EventTarget> = EventTargetCast::from_ref(document.r());
event.r().set_trusted(true);
let _ = wintarget.dispatch_event_with_target(doctarget, event.r());
let window_ref = window.r();
let browser_context = window_ref.browser_context();
let browser_context = browser_context.as_ref().unwrap();
browser_context.frame_element().map(|frame_element| {
let frame_element = frame_element.root();
let frame_window = window_from_node(frame_element.r()).root();
let event = Event::new(GlobalRef::Window(frame_window.r()), "load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(frame_element.r());
event.r().fire(target);
});
}
}
impl Runnable for DocumentProgressHandler {
fn handler(self: Box<DocumentProgressHandler>) {
match self.task {
DocumentProgressTask::DOMContentLoaded => {
self.dispatch_dom_content_loaded();
}
DocumentProgressTask::Load => {
self.set_ready_state_complete();
self.dispatch_load();
}
}
}
}
Click event is now a MouseEvent.
This original commit is https://github.com/servo/servo/pull/4718.
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::DocumentBinding;
use dom::bindings::codegen::Bindings::DocumentBinding::{DocumentMethods, DocumentReadyState};
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::EventTargetBinding::EventTargetMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::NodeFilterBinding::NodeFilter;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::{DocumentDerived, EventCast, HTMLElementCast};
use dom::bindings::codegen::InheritTypes::{HTMLHeadElementCast, TextCast, ElementCast};
use dom::bindings::codegen::InheritTypes::{DocumentTypeCast, HTMLHtmlElementCast, NodeCast};
use dom::bindings::codegen::InheritTypes::{EventTargetCast, HTMLAnchorElementCast};
use dom::bindings::codegen::InheritTypes::{HTMLAnchorElementDerived, HTMLAppletElementDerived};
use dom::bindings::codegen::InheritTypes::{HTMLAreaElementDerived, HTMLEmbedElementDerived};
use dom::bindings::codegen::InheritTypes::{HTMLFormElementDerived, HTMLImageElementDerived};
use dom::bindings::codegen::InheritTypes::{HTMLScriptElementDerived};
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::error::Error::{NotSupported, InvalidCharacter, Security};
use dom::bindings::error::Error::{HierarchyRequest, NamespaceError};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JS, JSRef, LayoutJS, Temporary, TemporaryPushable};
use dom::bindings::js::{OptionalRootable, RootedReference};
use dom::bindings::refcounted::Trusted;
use dom::bindings::utils::reflect_dom_object;
use dom::bindings::utils::xml_name_type;
use dom::bindings::utils::XMLName::{QName, Name, InvalidXMLName};
use dom::comment::Comment;
use dom::customevent::CustomEvent;
use dom::documentfragment::DocumentFragment;
use dom::documenttype::DocumentType;
use dom::domimplementation::DOMImplementation;
use dom::element::{Element, ElementCreator, AttributeHandlers, get_attribute_parts};
use dom::element::{ElementTypeId, ActivationElementHelpers};
use dom::event::{Event, EventBubbles, EventCancelable, EventHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId, EventTargetHelpers};
use dom::htmlanchorelement::HTMLAnchorElement;
use dom::htmlcollection::{HTMLCollection, CollectionFilter};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::htmlheadelement::HTMLHeadElement;
use dom::htmlhtmlelement::HTMLHtmlElement;
use dom::htmltitleelement::HTMLTitleElement;
use dom::location::Location;
use dom::mouseevent::MouseEvent;
use dom::keyboardevent::KeyboardEvent;
use dom::messageevent::MessageEvent;
use dom::node::{self, Node, NodeHelpers, NodeTypeId, CloneChildrenFlag, NodeDamage, window_from_node};
use dom::nodelist::NodeList;
use dom::text::Text;
use dom::processinginstruction::ProcessingInstruction;
use dom::range::Range;
use dom::treewalker::TreeWalker;
use dom::uievent::UIEvent;
use dom::window::{Window, WindowHelpers};
use net::resource_task::ControlMsg::{SetCookiesForUrl, GetCookiesForUrl};
use net::cookie_storage::CookieSource::NonHTTP;
use script_task::Runnable;
use util::namespace;
use util::str::{DOMString, split_html_space_chars};
use layout_interface::{ReflowGoal, ReflowQueryType};
use geom::point::Point2D;
use html5ever::tree_builder::{QuirksMode, NoQuirks, LimitedQuirks, Quirks};
use layout_interface::{LayoutChan, Msg};
use string_cache::{Atom, QualName};
use url::Url;
use js::jsapi::JSRuntime;
use std::borrow::ToOwned;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::ascii::AsciiExt;
use std::cell::{Cell, Ref};
use std::default::Default;
use std::sync::mpsc::channel;
use time;
#[derive(PartialEq)]
#[jstraceable]
pub enum IsHTMLDocument {
HTMLDocument,
NonHTMLDocument,
}
#[dom_struct]
pub struct Document {
node: Node,
window: JS<Window>,
idmap: DOMRefCell<HashMap<Atom, Vec<JS<Element>>>>,
implementation: MutNullableJS<DOMImplementation>,
location: MutNullableJS<Location>,
content_type: DOMString,
last_modified: DOMRefCell<Option<DOMString>>,
encoding_name: DOMRefCell<DOMString>,
is_html_document: bool,
url: Url,
quirks_mode: Cell<QuirksMode>,
images: MutNullableJS<HTMLCollection>,
embeds: MutNullableJS<HTMLCollection>,
links: MutNullableJS<HTMLCollection>,
forms: MutNullableJS<HTMLCollection>,
scripts: MutNullableJS<HTMLCollection>,
anchors: MutNullableJS<HTMLCollection>,
applets: MutNullableJS<HTMLCollection>,
ready_state: Cell<DocumentReadyState>,
/// The element that has most recently requested focus for itself.
possibly_focused: MutNullableJS<Element>,
/// The element that currently has the document focus context.
focused: MutNullableJS<Element>,
}
impl DocumentDerived for EventTarget {
fn is_document(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Document)
}
}
#[jstraceable]
struct ImagesFilter;
impl CollectionFilter for ImagesFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlimageelement()
}
}
#[jstraceable]
struct EmbedsFilter;
impl CollectionFilter for EmbedsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlembedelement()
}
}
#[jstraceable]
struct LinksFilter;
impl CollectionFilter for LinksFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
(elem.is_htmlanchorelement() || elem.is_htmlareaelement()) &&
elem.has_attribute(&atom!("href"))
}
}
#[jstraceable]
struct FormsFilter;
impl CollectionFilter for FormsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlformelement()
}
}
#[jstraceable]
struct ScriptsFilter;
impl CollectionFilter for ScriptsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlscriptelement()
}
}
#[jstraceable]
struct AnchorsFilter;
impl CollectionFilter for AnchorsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlanchorelement() && elem.has_attribute(&atom!("href"))
}
}
#[jstraceable]
struct AppletsFilter;
impl CollectionFilter for AppletsFilter {
fn filter(&self, elem: JSRef<Element>, _root: JSRef<Node>) -> bool {
elem.is_htmlappletelement()
}
}
pub trait DocumentHelpers<'a> {
fn window(self) -> Temporary<Window>;
fn encoding_name(self) -> Ref<'a, DOMString>;
fn is_html_document(self) -> bool;
fn url(self) -> Url;
fn quirks_mode(self) -> QuirksMode;
fn set_quirks_mode(self, mode: QuirksMode);
fn set_last_modified(self, value: DOMString);
fn set_encoding_name(self, name: DOMString);
fn content_changed(self, node: JSRef<Node>, damage: NodeDamage);
fn content_and_heritage_changed(self, node: JSRef<Node>, damage: NodeDamage);
fn unregister_named_element(self, to_unregister: JSRef<Element>, id: Atom);
fn register_named_element(self, element: JSRef<Element>, id: Atom);
fn load_anchor_href(self, href: DOMString);
fn find_fragment_node(self, fragid: DOMString) -> Option<Temporary<Element>>;
fn set_ready_state(self, state: DocumentReadyState);
fn get_focused_element(self) -> Option<Temporary<Element>>;
fn begin_focus_transaction(self);
fn request_focus(self, elem: JSRef<Element>);
fn commit_focus_transaction(self);
fn send_title_to_compositor(self);
fn dirty_all_nodes(self);
fn handle_click_event(self, js_runtime: *mut JSRuntime, _button: uint, point: Point2D<f32>);
}
impl<'a> DocumentHelpers<'a> for JSRef<'a, Document> {
#[inline]
fn window(self) -> Temporary<Window> {
Temporary::new(self.window)
}
#[inline]
fn encoding_name(self) -> Ref<'a, DOMString> {
self.extended_deref().encoding_name.borrow()
}
#[inline]
fn is_html_document(self) -> bool {
self.is_html_document
}
// http://dom.spec.whatwg.org/#dom-document-url
fn url(self) -> Url {
self.url.clone()
}
fn quirks_mode(self) -> QuirksMode {
self.quirks_mode.get()
}
fn set_quirks_mode(self, mode: QuirksMode) {
self.quirks_mode.set(mode);
match mode {
Quirks => {
let window = self.window.root();
let window = window.r();
let LayoutChan(ref layout_chan) = window.page().layout_chan;
layout_chan.send(Msg::SetQuirksMode).unwrap();
}
NoQuirks | LimitedQuirks => {}
}
}
fn set_last_modified(self, value: DOMString) {
*self.last_modified.borrow_mut() = Some(value);
}
fn set_encoding_name(self, name: DOMString) {
*self.encoding_name.borrow_mut() = name;
}
fn content_changed(self, node: JSRef<Node>, damage: NodeDamage) {
node.dirty(damage);
}
fn content_and_heritage_changed(self, node: JSRef<Node>, damage: NodeDamage) {
debug!("content_and_heritage_changed on {}", node.debug_str());
node.force_dirty_ancestors(damage);
node.dirty(damage);
}
/// Remove any existing association between the provided id and any elements in this document.
fn unregister_named_element(self,
to_unregister: JSRef<Element>,
id: Atom) {
let mut idmap = self.idmap.borrow_mut();
let is_empty = match idmap.get_mut(&id) {
None => false,
Some(elements) => {
let position = elements.iter()
.map(|elem| elem.root())
.position(|element| element.r() == to_unregister)
.expect("This element should be in registered.");
elements.remove(position);
elements.is_empty()
}
};
if is_empty {
idmap.remove(&id);
}
}
/// Associate an element present in this document with the provided id.
fn register_named_element(self,
element: JSRef<Element>,
id: Atom) {
assert!({
let node: JSRef<Node> = NodeCast::from_ref(element);
node.is_in_doc()
});
assert!(!id.as_slice().is_empty());
let mut idmap = self.idmap.borrow_mut();
let root = self.GetDocumentElement().expect("The element is in the document, so there must be a document element.").root();
match idmap.entry(id) {
Vacant(entry) => {
entry.insert(vec!(element.unrooted()));
}
Occupied(entry) => {
let elements = entry.into_mut();
let new_node: JSRef<Node> = NodeCast::from_ref(element);
let mut head: uint = 0u;
let root: JSRef<Node> = NodeCast::from_ref(root.r());
for node in root.traverse_preorder() {
let elem: Option<JSRef<Element>> = ElementCast::to_ref(node);
match elem {
None => {},
Some(elem) => {
if (*elements)[head].root().r() == elem {
head += 1;
}
if new_node == node || head == elements.len() {
break;
}
}
}
}
elements.insert_unrooted(head, &element);
}
}
}
fn load_anchor_href(self, href: DOMString) {
let window = self.window.root();
window.r().load_url(href);
}
/// Attempt to find a named element in this page's document.
/// https://html.spec.whatwg.org/multipage/#the-indicated-part-of-the-document
fn find_fragment_node(self, fragid: DOMString) -> Option<Temporary<Element>> {
self.GetElementById(fragid.clone()).or_else(|| {
let check_anchor = |&:&node: &JSRef<HTMLAnchorElement>| {
let elem: JSRef<Element> = ElementCast::from_ref(node);
elem.get_attribute(ns!(""), &atom!("name")).root().map_or(false, |attr| {
attr.r().value().as_slice() == fragid.as_slice()
})
};
let doc_node: JSRef<Node> = NodeCast::from_ref(self);
doc_node.traverse_preorder()
.filter_map(|node| HTMLAnchorElementCast::to_ref(node))
.find(check_anchor)
.map(|node| Temporary::from_rooted(ElementCast::from_ref(node)))
})
}
// https://html.spec.whatwg.org/multipage/dom.html#current-document-readiness
fn set_ready_state(self, state: DocumentReadyState) {
self.ready_state.set(state);
let window = self.window.root();
let event = Event::new(GlobalRef::Window(window.r()), "readystatechange".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(self);
let _ = event.r().fire(target);
}
/// Return the element that currently has focus.
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#events-focusevent-doc-focus
fn get_focused_element(self) -> Option<Temporary<Element>> {
self.focused.get()
}
/// Initiate a new round of checking for elements requesting focus. The last element to call
/// `request_focus` before `commit_focus_transaction` is called will receive focus.
fn begin_focus_transaction(self) {
self.possibly_focused.clear();
}
/// Request that the given element receive focus once the current transaction is complete.
fn request_focus(self, elem: JSRef<Element>) {
self.possibly_focused.assign(Some(elem))
}
/// Reassign the focus context to the element that last requested focus during this
/// transaction, or none if no elements requested it.
fn commit_focus_transaction(self) {
//TODO: dispatch blur, focus, focusout, and focusin events
self.focused.assign(self.possibly_focused.get());
}
/// Sends this document's title to the compositor.
fn send_title_to_compositor(self) {
let window = self.window().root();
window.r().page().send_title_to_compositor();
}
fn dirty_all_nodes(self) {
let root: JSRef<Node> = NodeCast::from_ref(self);
for node in root.traverse_preorder() {
node.dirty(NodeDamage::OtherNodeDamage)
}
}
fn handle_click_event(self, js_runtime: *mut JSRuntime, _button: uint, point: Point2D<f32>) {
debug!("ClickEvent: clicked at {:?}", point);
let window = self.window.root();
let window = window.r();
let page = window.page();
let node = match page.hit_test(&point) {
Some(node_address) => {
debug!("node address is {:?}", node_address.0);
node::from_untrusted_node_address(js_runtime, node_address)
},
None => return,
}.root();
let el = match ElementCast::to_ref(node.r()) {
Some(el) => el,
None => {
let ancestor = node.r()
.ancestors()
.filter_map(ElementCast::to_ref)
.next();
match ancestor {
Some(ancestor) => ancestor,
None => return,
}
},
};
let node: JSRef<Node> = NodeCast::from_ref(el);
debug!("clicked on {:?}", node.debug_str());
// Prevent click event if form control element is disabled.
if node.click_event_filter_by_disabled_state() {
return;
}
match *page.frame() {
Some(ref frame) => {
let window = frame.window.root();
let doc = window.r().Document().root();
doc.r().begin_focus_transaction();
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#event-type-click
let x = point.x as i32;
let y = point.y as i32;
let event = MouseEvent::new(window.r(),
"click".to_owned(),
true,
true,
Some(window.r()),
0i32,
x, y, x, y,
false, false, false, false,
0i16,
None).root();
let event: JSRef<Event> = EventCast::from_ref(event.r());
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#trusted-events
event.set_trusted(true);
// https://html.spec.whatwg.org/multipage/interaction.html#run-authentic-click-activation-steps
el.authentic_click_activation(event);
doc.r().commit_focus_transaction();
window.r().flush_layout(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery);
}
None => {}
}
}
}
#[derive(PartialEq)]
pub enum DocumentSource {
FromParser,
NotFromParser,
}
pub trait LayoutDocumentHelpers {
unsafe fn is_html_document_for_layout(&self) -> bool;
}
impl LayoutDocumentHelpers for LayoutJS<Document> {
#[allow(unrooted_must_root)]
#[inline]
unsafe fn is_html_document_for_layout(&self) -> bool {
(*self.unsafe_get()).is_html_document
}
}
impl Document {
fn new_inherited(window: JSRef<Window>,
url: Option<Url>,
is_html_document: IsHTMLDocument,
content_type: Option<DOMString>,
source: DocumentSource) -> Document {
let url = url.unwrap_or_else(|| Url::parse("about:blank").unwrap());
let ready_state = if source == DocumentSource::FromParser {
DocumentReadyState::Loading
} else {
DocumentReadyState::Complete
};
Document {
node: Node::new_without_doc(NodeTypeId::Document),
window: JS::from_rooted(window),
idmap: DOMRefCell::new(HashMap::new()),
implementation: Default::default(),
location: Default::default(),
content_type: match content_type {
Some(string) => string.clone(),
None => match is_html_document {
// http://dom.spec.whatwg.org/#dom-domimplementation-createhtmldocument
IsHTMLDocument::HTMLDocument => "text/html".to_owned(),
// http://dom.spec.whatwg.org/#concept-document-content-type
IsHTMLDocument::NonHTMLDocument => "application/xml".to_owned()
}
},
last_modified: DOMRefCell::new(None),
url: url,
// http://dom.spec.whatwg.org/#concept-document-quirks
quirks_mode: Cell::new(NoQuirks),
// http://dom.spec.whatwg.org/#concept-document-encoding
encoding_name: DOMRefCell::new("UTF-8".to_owned()),
is_html_document: is_html_document == IsHTMLDocument::HTMLDocument,
images: Default::default(),
embeds: Default::default(),
links: Default::default(),
forms: Default::default(),
scripts: Default::default(),
anchors: Default::default(),
applets: Default::default(),
ready_state: Cell::new(ready_state),
possibly_focused: Default::default(),
focused: Default::default(),
}
}
// http://dom.spec.whatwg.org/#dom-document
pub fn Constructor(global: GlobalRef) -> Fallible<Temporary<Document>> {
Ok(Document::new(global.as_window(), None,
IsHTMLDocument::NonHTMLDocument, None,
DocumentSource::NotFromParser))
}
pub fn new(window: JSRef<Window>,
url: Option<Url>,
doctype: IsHTMLDocument,
content_type: Option<DOMString>,
source: DocumentSource) -> Temporary<Document> {
let document = reflect_dom_object(box Document::new_inherited(window, url, doctype,
content_type, source),
GlobalRef::Window(window),
DocumentBinding::Wrap).root();
let node: JSRef<Node> = NodeCast::from_ref(document.r());
node.set_owner_doc(document.r());
Temporary::from_rooted(document.r())
}
}
trait PrivateDocumentHelpers {
fn createNodeList<F: Fn(JSRef<Node>) -> bool>(self, callback: F) -> Temporary<NodeList>;
fn get_html_element(self) -> Option<Temporary<HTMLHtmlElement>>;
}
impl<'a> PrivateDocumentHelpers for JSRef<'a, Document> {
fn createNodeList<F: Fn(JSRef<Node>) -> bool>(self, callback: F) -> Temporary<NodeList> {
let window = self.window.root();
let document_element = self.GetDocumentElement().root();
let nodes = match document_element {
None => vec!(),
Some(ref root) => {
let root: JSRef<Node> = NodeCast::from_ref(root.r());
root.traverse_preorder().filter(|&node| callback(node)).collect()
}
};
NodeList::new_simple_list(window.r(), nodes)
}
fn get_html_element(self) -> Option<Temporary<HTMLHtmlElement>> {
self.GetDocumentElement()
.root()
.r()
.and_then(HTMLHtmlElementCast::to_ref)
.map(Temporary::from_rooted)
}
}
trait PrivateClickEventHelpers {
fn click_event_filter_by_disabled_state(&self) -> bool;
}
impl<'a> PrivateClickEventHelpers for JSRef<'a, Node> {
fn click_event_filter_by_disabled_state(&self) -> bool {
match self.type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLButtonElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLInputElement)) |
// NodeTypeId::Element(ElementTypeId::HTMLKeygenElement) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLOptionElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSelectElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTextAreaElement)) if self.get_disabled_state() => true,
_ => false
}
}
}
impl<'a> DocumentMethods for JSRef<'a, Document> {
// http://dom.spec.whatwg.org/#dom-document-implementation
fn Implementation(self) -> Temporary<DOMImplementation> {
self.implementation.or_init(|| DOMImplementation::new(self))
}
// http://dom.spec.whatwg.org/#dom-document-url
fn URL(self) -> DOMString {
self.url().serialize()
}
// http://dom.spec.whatwg.org/#dom-document-documenturi
fn DocumentURI(self) -> DOMString {
self.URL()
}
// http://dom.spec.whatwg.org/#dom-document-compatmode
fn CompatMode(self) -> DOMString {
match self.quirks_mode.get() {
LimitedQuirks | NoQuirks => "CSS1Compat".to_owned(),
Quirks => "BackCompat".to_owned()
}
}
// http://dom.spec.whatwg.org/#dom-document-characterset
fn CharacterSet(self) -> DOMString {
self.encoding_name.borrow().clone()
}
// http://dom.spec.whatwg.org/#dom-document-inputencoding
fn InputEncoding(self) -> DOMString {
self.encoding_name.borrow().clone()
}
// http://dom.spec.whatwg.org/#dom-document-content_type
fn ContentType(self) -> DOMString {
self.content_type.clone()
}
// http://dom.spec.whatwg.org/#dom-document-doctype
fn GetDoctype(self) -> Option<Temporary<DocumentType>> {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.children()
.filter_map(DocumentTypeCast::to_ref)
.next()
.map(Temporary::from_rooted)
}
// http://dom.spec.whatwg.org/#dom-document-documentelement
fn GetDocumentElement(self) -> Option<Temporary<Element>> {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.child_elements().next().map(Temporary::from_rooted)
}
// http://dom.spec.whatwg.org/#dom-document-getelementsbytagname
fn GetElementsByTagName(self, tag_name: DOMString) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::by_tag_name(window.r(), NodeCast::from_ref(self), tag_name)
}
// http://dom.spec.whatwg.org/#dom-document-getelementsbytagnamens
fn GetElementsByTagNameNS(self, maybe_ns: Option<DOMString>, tag_name: DOMString) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::by_tag_name_ns(window.r(), NodeCast::from_ref(self), tag_name, maybe_ns)
}
// http://dom.spec.whatwg.org/#dom-document-getelementsbyclassname
fn GetElementsByClassName(self, classes: DOMString) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::by_class_name(window.r(), NodeCast::from_ref(self), classes)
}
// http://dom.spec.whatwg.org/#dom-nonelementparentnode-getelementbyid
fn GetElementById(self, id: DOMString) -> Option<Temporary<Element>> {
let id = Atom::from_slice(id.as_slice());
match self.idmap.borrow().get(&id) {
None => None,
Some(ref elements) => Some(Temporary::new((*elements)[0].clone())),
}
}
// http://dom.spec.whatwg.org/#dom-document-createelement
fn CreateElement(self, local_name: DOMString) -> Fallible<Temporary<Element>> {
if xml_name_type(local_name.as_slice()) == InvalidXMLName {
debug!("Not a valid element name");
return Err(InvalidCharacter);
}
let local_name = if self.is_html_document {
local_name.as_slice().to_ascii_lowercase()
} else {
local_name
};
let name = QualName::new(ns!(HTML), Atom::from_slice(local_name.as_slice()));
Ok(Element::create(name, None, self, ElementCreator::ScriptCreated))
}
// http://dom.spec.whatwg.org/#dom-document-createelementns
fn CreateElementNS(self,
namespace: Option<DOMString>,
qualified_name: DOMString) -> Fallible<Temporary<Element>> {
let ns = namespace::from_domstring(namespace);
match xml_name_type(qualified_name.as_slice()) {
InvalidXMLName => {
debug!("Not a valid element name");
return Err(InvalidCharacter);
},
Name => {
debug!("Not a valid qualified element name");
return Err(NamespaceError);
},
QName => {}
}
let (prefix_from_qname, local_name_from_qname)
= get_attribute_parts(qualified_name.as_slice());
match (&ns, prefix_from_qname, local_name_from_qname) {
// throw if prefix is not null and namespace is null
(&ns!(""), Some(_), _) => {
debug!("Namespace can't be null with a non-null prefix");
return Err(NamespaceError);
},
// throw if prefix is "xml" and namespace is not the XML namespace
(_, Some(ref prefix), _) if "xml" == prefix.as_slice() && ns != ns!(XML) => {
debug!("Namespace must be the xml namespace if the prefix is 'xml'");
return Err(NamespaceError);
},
// throw if namespace is the XMLNS namespace and neither qualifiedName nor prefix is "xmlns"
(&ns!(XMLNS), Some(ref prefix), _) if "xmlns" == prefix.as_slice() => {},
(&ns!(XMLNS), _, "xmlns") => {},
(&ns!(XMLNS), _, _) => {
debug!("The prefix or the qualified name must be 'xmlns' if namespace is the XMLNS namespace ");
return Err(NamespaceError);
},
_ => {}
}
let name = QualName::new(ns, Atom::from_slice(local_name_from_qname));
Ok(Element::create(name, prefix_from_qname.map(|s| s.to_owned()), self,
ElementCreator::ScriptCreated))
}
// http://dom.spec.whatwg.org/#dom-document-createattribute
fn CreateAttribute(self, local_name: DOMString) -> Fallible<Temporary<Attr>> {
if xml_name_type(local_name.as_slice()) == InvalidXMLName {
debug!("Not a valid element name");
return Err(InvalidCharacter);
}
let window = self.window.root();
let name = Atom::from_slice(local_name.as_slice());
// repetition used because string_cache::atom::Atom is non-copyable
let l_name = Atom::from_slice(local_name.as_slice());
let value = AttrValue::String("".to_owned());
Ok(Attr::new(window.r(), name, value, l_name, ns!(""), None, None))
}
// http://dom.spec.whatwg.org/#dom-document-createdocumentfragment
fn CreateDocumentFragment(self) -> Temporary<DocumentFragment> {
DocumentFragment::new(self)
}
// http://dom.spec.whatwg.org/#dom-document-createtextnode
fn CreateTextNode(self, data: DOMString)
-> Temporary<Text> {
Text::new(data, self)
}
// http://dom.spec.whatwg.org/#dom-document-createcomment
fn CreateComment(self, data: DOMString) -> Temporary<Comment> {
Comment::new(data, self)
}
// http://dom.spec.whatwg.org/#dom-document-createprocessinginstruction
fn CreateProcessingInstruction(self, target: DOMString,
data: DOMString) -> Fallible<Temporary<ProcessingInstruction>> {
// Step 1.
if xml_name_type(target.as_slice()) == InvalidXMLName {
return Err(InvalidCharacter);
}
// Step 2.
if data.as_slice().contains("?>") {
return Err(InvalidCharacter);
}
// Step 3.
Ok(ProcessingInstruction::new(target, data, self))
}
// http://dom.spec.whatwg.org/#dom-document-importnode
fn ImportNode(self, node: JSRef<Node>, deep: bool) -> Fallible<Temporary<Node>> {
// Step 1.
if node.is_document() {
return Err(NotSupported);
}
// Step 2.
let clone_children = match deep {
true => CloneChildrenFlag::CloneChildren,
false => CloneChildrenFlag::DoNotCloneChildren
};
Ok(Node::clone(node, Some(self), clone_children))
}
// http://dom.spec.whatwg.org/#dom-document-adoptnode
fn AdoptNode(self, node: JSRef<Node>) -> Fallible<Temporary<Node>> {
// Step 1.
if node.is_document() {
return Err(NotSupported);
}
// Step 2.
Node::adopt(node, self);
// Step 3.
Ok(Temporary::from_rooted(node))
}
// http://dom.spec.whatwg.org/#dom-document-createevent
fn CreateEvent(self, interface: DOMString) -> Fallible<Temporary<Event>> {
let window = self.window.root();
match interface.as_slice().to_ascii_lowercase().as_slice() {
"uievents" | "uievent" => Ok(EventCast::from_temporary(
UIEvent::new_uninitialized(window.r()))),
"mouseevents" | "mouseevent" => Ok(EventCast::from_temporary(
MouseEvent::new_uninitialized(window.r()))),
"customevent" => Ok(EventCast::from_temporary(
CustomEvent::new_uninitialized(GlobalRef::Window(window.r())))),
"htmlevents" | "events" | "event" => Ok(Event::new_uninitialized(
GlobalRef::Window(window.r()))),
"keyboardevent" | "keyevents" => Ok(EventCast::from_temporary(
KeyboardEvent::new_uninitialized(window.r()))),
"messageevent" => Ok(EventCast::from_temporary(
MessageEvent::new_uninitialized(GlobalRef::Window(window.r())))),
_ => Err(NotSupported)
}
}
// http://www.whatwg.org/html/#dom-document-lastmodified
fn LastModified(self) -> DOMString {
match *self.last_modified.borrow() {
Some(ref t) => t.clone(),
None => format!("{}", time::now().strftime("%m/%d/%Y %H:%M:%S").unwrap()),
}
}
// http://dom.spec.whatwg.org/#dom-document-createrange
fn CreateRange(self) -> Temporary<Range> {
Range::new(self)
}
// http://dom.spec.whatwg.org/#dom-document-createtreewalker
fn CreateTreeWalker(self, root: JSRef<Node>, whatToShow: u32, filter: Option<NodeFilter>)
-> Temporary<TreeWalker> {
TreeWalker::new(self, root, whatToShow, filter)
}
// http://www.whatwg.org/specs/web-apps/current-work/#document.title
fn Title(self) -> DOMString {
let mut title = String::new();
self.GetDocumentElement().root().map(|root| {
let root: JSRef<Node> = NodeCast::from_ref(root.r());
root.traverse_preorder()
.find(|node| node.type_id() == NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
.map(|title_elem| {
let children = title_elem.children().filter_map(|n| {
let t: Option<JSRef<Text>> = TextCast::to_ref(n);
t
});
for text in children {
title.push_str(text.characterdata().data().as_slice());
}
});
});
let v: Vec<&str> = split_html_space_chars(title.as_slice()).collect();
v.connect(" ")
}
// http://www.whatwg.org/specs/web-apps/current-work/#document.title
fn SetTitle(self, title: DOMString) -> ErrorResult {
self.GetDocumentElement().root().map(|root| {
let root: JSRef<Node> = NodeCast::from_ref(root.r());
let head_node = root.traverse_preorder().find(|child| {
child.type_id() == NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLHeadElement))
});
head_node.map(|head| {
let title_node = head.children().find(|child| {
child.type_id() == NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement))
});
match title_node {
Some(ref title_node) => {
for title_child in title_node.children() {
assert!(title_node.RemoveChild(title_child).is_ok());
}
if !title.is_empty() {
let new_text = self.CreateTextNode(title.clone()).root();
assert!(title_node.AppendChild(NodeCast::from_ref(new_text.r())).is_ok());
}
},
None => {
let new_title = HTMLTitleElement::new("title".to_owned(), None, self).root();
let new_title: JSRef<Node> = NodeCast::from_ref(new_title.r());
if !title.is_empty() {
let new_text = self.CreateTextNode(title.clone()).root();
assert!(new_title.AppendChild(NodeCast::from_ref(new_text.r())).is_ok());
}
assert!(head.AppendChild(new_title).is_ok());
},
}
});
});
Ok(())
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-head
fn GetHead(self) -> Option<Temporary<HTMLHeadElement>> {
self.get_html_element().and_then(|root| {
let root = root.root();
let node: JSRef<Node> = NodeCast::from_ref(root.r());
node.children().filter_map(HTMLHeadElementCast::to_ref).next().map(Temporary::from_rooted)
})
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-body
fn GetBody(self) -> Option<Temporary<HTMLElement>> {
self.get_html_element().and_then(|root| {
let root = root.root();
let node: JSRef<Node> = NodeCast::from_ref(root.r());
node.children().find(|child| {
match child.type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBodyElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFrameSetElement)) => true,
_ => false
}
}).map(|node| {
Temporary::from_rooted(HTMLElementCast::to_ref(node).unwrap())
})
})
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-body
fn SetBody(self, new_body: Option<JSRef<HTMLElement>>) -> ErrorResult {
// Step 1.
let new_body = match new_body {
Some(new_body) => new_body,
None => return Err(HierarchyRequest),
};
let node: JSRef<Node> = NodeCast::from_ref(new_body);
match node.type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBodyElement)) |
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFrameSetElement)) => {}
_ => return Err(HierarchyRequest)
}
// Step 2.
let old_body = self.GetBody().root();
if old_body.as_ref().map(|body| body.r()) == Some(new_body) {
return Ok(());
}
// Step 3.
match self.get_html_element().root() {
// Step 4.
None => return Err(HierarchyRequest),
Some(ref root) => {
let new_body: JSRef<Node> = NodeCast::from_ref(new_body);
let root: JSRef<Node> = NodeCast::from_ref(root.r());
match old_body {
Some(ref child) => {
let child: JSRef<Node> = NodeCast::from_ref(child.r());
assert!(root.ReplaceChild(new_body, child).is_ok())
}
None => assert!(root.AppendChild(new_body).is_ok())
};
}
}
Ok(())
}
// http://www.whatwg.org/specs/web-apps/current-work/#dom-document-getelementsbyname
fn GetElementsByName(self, name: DOMString) -> Temporary<NodeList> {
self.createNodeList(|node| {
let element: JSRef<Element> = match ElementCast::to_ref(node) {
Some(element) => element,
None => return false,
};
element.get_attribute(ns!(""), &atom!("name")).root().map_or(false, |attr| {
attr.r().value().as_slice() == name.as_slice()
})
})
}
fn Images(self) -> Temporary<HTMLCollection> {
self.images.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box ImagesFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Embeds(self) -> Temporary<HTMLCollection> {
self.embeds.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box EmbedsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Plugins(self) -> Temporary<HTMLCollection> {
self.Embeds()
}
fn Links(self) -> Temporary<HTMLCollection> {
self.links.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box LinksFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Forms(self) -> Temporary<HTMLCollection> {
self.forms.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box FormsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Scripts(self) -> Temporary<HTMLCollection> {
self.scripts.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box ScriptsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Anchors(self) -> Temporary<HTMLCollection> {
self.anchors.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box AnchorsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Applets(self) -> Temporary<HTMLCollection> {
// FIXME: This should be return OBJECT elements containing applets.
self.applets.or_init(|| {
let window = self.window.root();
let root = NodeCast::from_ref(self);
let filter = box AppletsFilter;
HTMLCollection::create(window.r(), root, filter)
})
}
fn Location(self) -> Temporary<Location> {
let window = self.window.root();
let window = window.r();
self.location.or_init(|| Location::new(window, window.page_clone()))
}
// http://dom.spec.whatwg.org/#dom-parentnode-children
fn Children(self) -> Temporary<HTMLCollection> {
let window = self.window.root();
HTMLCollection::children(window.r(), NodeCast::from_ref(self))
}
// http://dom.spec.whatwg.org/#dom-parentnode-queryselector
fn QuerySelector(self, selectors: DOMString) -> Fallible<Option<Temporary<Element>>> {
let root: JSRef<Node> = NodeCast::from_ref(self);
root.query_selector(selectors)
}
// http://dom.spec.whatwg.org/#dom-parentnode-queryselectorall
fn QuerySelectorAll(self, selectors: DOMString) -> Fallible<Temporary<NodeList>> {
let root: JSRef<Node> = NodeCast::from_ref(self);
root.query_selector_all(selectors)
}
// https://html.spec.whatwg.org/multipage/dom.html#dom-document-readystate
fn ReadyState(self) -> DocumentReadyState {
self.ready_state.get()
}
// https://html.spec.whatwg.org/multipage/browsers.html#dom-document-defaultview
fn DefaultView(self) -> Temporary<Window> {
Temporary::new(self.window)
}
// https://html.spec.whatwg.org/multipage/dom.html#dom-document-cookie
fn GetCookie(self) -> Fallible<DOMString> {
//TODO: return empty string for cookie-averse Document
let url = self.url();
if !is_scheme_host_port_tuple(&url) {
return Err(Security);
}
let window = self.window.root();
let window = window.r();
let page = window.page();
let (tx, rx) = channel();
let _ = page.resource_task.send(GetCookiesForUrl(url, tx, NonHTTP));
let cookies = rx.recv().unwrap();
Ok(cookies.unwrap_or("".to_owned()))
}
// https://html.spec.whatwg.org/multipage/dom.html#dom-document-cookie
fn SetCookie(self, cookie: DOMString) -> ErrorResult {
//TODO: ignore for cookie-averse Document
let url = self.url();
if !is_scheme_host_port_tuple(&url) {
return Err(Security);
}
let window = self.window.root();
let window = window.r();
let page = window.page();
let _ = page.resource_task.send(SetCookiesForUrl(url, cookie, NonHTTP));
Ok(())
}
global_event_handlers!();
event_handler!(readystatechange, GetOnreadystatechange, SetOnreadystatechange);
}
fn is_scheme_host_port_tuple(url: &Url) -> bool {
url.host().is_some() && url.port_or_default().is_some()
}
pub enum DocumentProgressTask {
DOMContentLoaded,
Load,
}
pub struct DocumentProgressHandler {
addr: Trusted<Document>,
task: DocumentProgressTask,
}
impl DocumentProgressHandler {
pub fn new(addr: Trusted<Document>, task: DocumentProgressTask) -> DocumentProgressHandler {
DocumentProgressHandler {
addr: addr,
task: task,
}
}
fn dispatch_dom_content_loaded(&self) {
let document = self.addr.to_temporary().root();
let window = document.r().window().root();
let event = Event::new(GlobalRef::Window(window.r()), "DOMContentLoaded".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let doctarget: JSRef<EventTarget> = EventTargetCast::from_ref(document.r());
let _ = doctarget.DispatchEvent(event.r());
}
fn set_ready_state_complete(&self) {
let document = self.addr.to_temporary().root();
document.r().set_ready_state(DocumentReadyState::Complete);
}
fn dispatch_load(&self) {
let document = self.addr.to_temporary().root();
let window = document.r().window().root();
let event = Event::new(GlobalRef::Window(window.r()), "load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let wintarget: JSRef<EventTarget> = EventTargetCast::from_ref(window.r());
let doctarget: JSRef<EventTarget> = EventTargetCast::from_ref(document.r());
event.r().set_trusted(true);
let _ = wintarget.dispatch_event_with_target(doctarget, event.r());
let window_ref = window.r();
let browser_context = window_ref.browser_context();
let browser_context = browser_context.as_ref().unwrap();
browser_context.frame_element().map(|frame_element| {
let frame_element = frame_element.root();
let frame_window = window_from_node(frame_element.r()).root();
let event = Event::new(GlobalRef::Window(frame_window.r()), "load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable).root();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(frame_element.r());
event.r().fire(target);
});
}
}
impl Runnable for DocumentProgressHandler {
fn handler(self: Box<DocumentProgressHandler>) {
match self.task {
DocumentProgressTask::DOMContentLoaded => {
self.dispatch_dom_content_loaded();
}
DocumentProgressTask::Load => {
self.set_ready_state_complete();
self.dispatch_load();
}
}
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Wrapper definitions on top of Gecko types in order to be used in the style
//! system.
//!
//! This really follows the Servo pattern in
//! `components/script/layout_wrapper.rs`.
//!
//! This theoretically should live in its own crate, but now it lives in the
//! style system it's kind of pointless in the Stylo case, and only Servo forces
//! the separation between the style system implementation and everything else.
use CaseSensitivityExt;
use app_units::Au;
use applicable_declarations::ApplicableDeclarationBlock;
use atomic_refcell::{AtomicRefCell, AtomicRefMut};
use context::{QuirksMode, SharedStyleContext, PostAnimationTasks, UpdateAnimationsTasks};
use data::ElementData;
use dom::{LayoutIterator, NodeInfo, TElement, TNode};
use dom::{OpaqueNode, PresentationalHintsSynthesizer};
use element_state::{ElementState, DocumentState, NS_DOCUMENT_STATE_WINDOW_INACTIVE};
use error_reporting::ParseErrorReporter;
use font_metrics::{FontMetrics, FontMetricsProvider, FontMetricsQueryResult};
use gecko::data::PerDocumentStyleData;
use gecko::global_style_data::GLOBAL_STYLE_DATA;
use gecko::selector_parser::{SelectorImpl, NonTSPseudoClass, PseudoElement};
use gecko::snapshot_helpers;
use gecko_bindings::bindings;
use gecko_bindings::bindings::{Gecko_ConstructStyleChildrenIterator, Gecko_DestroyStyleChildrenIterator};
use gecko_bindings::bindings::{Gecko_DocumentState, Gecko_ElementState, Gecko_GetDocumentLWTheme};
use gecko_bindings::bindings::{Gecko_GetLastChild, Gecko_GetNextStyleChild};
use gecko_bindings::bindings::{Gecko_IsRootElement, Gecko_MatchesElement, Gecko_Namespace};
use gecko_bindings::bindings::{Gecko_SetNodeFlags, Gecko_UnsetNodeFlags};
use gecko_bindings::bindings::Gecko_ClassOrClassList;
use gecko_bindings::bindings::Gecko_ElementHasAnimations;
use gecko_bindings::bindings::Gecko_ElementHasCSSAnimations;
use gecko_bindings::bindings::Gecko_ElementHasCSSTransitions;
use gecko_bindings::bindings::Gecko_GetActiveLinkAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetAnimationRule;
use gecko_bindings::bindings::Gecko_GetExtraContentStyleDeclarations;
use gecko_bindings::bindings::Gecko_GetHTMLPresentationAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetSMILOverrideDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetStyleAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetUnvisitedLinkAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetVisitedLinkAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_IsSignificantChild;
use gecko_bindings::bindings::Gecko_MatchLang;
use gecko_bindings::bindings::Gecko_MatchStringArgPseudo;
use gecko_bindings::bindings::Gecko_UnsetDirtyStyleAttr;
use gecko_bindings::bindings::Gecko_UpdateAnimations;
use gecko_bindings::structs;
use gecko_bindings::structs::{RawGeckoElement, RawGeckoNode, RawGeckoXBLBinding};
use gecko_bindings::structs::{nsIAtom, nsIContent, nsINode_BooleanFlag};
use gecko_bindings::structs::ELEMENT_HANDLED_SNAPSHOT;
use gecko_bindings::structs::ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::ELEMENT_HAS_SNAPSHOT;
use gecko_bindings::structs::EffectCompositor_CascadeLevel as CascadeLevel;
use gecko_bindings::structs::NODE_DESCENDANTS_NEED_FRAMES;
use gecko_bindings::structs::NODE_NEEDS_FRAME;
use gecko_bindings::structs::nsChangeHint;
use gecko_bindings::structs::nsIDocument_DocumentTheme as DocumentTheme;
use gecko_bindings::structs::nsRestyleHint;
use gecko_bindings::sugar::ownership::{HasArcFFI, HasSimpleFFI};
use hash::FnvHashMap;
use logical_geometry::WritingMode;
use media_queries::Device;
use properties::{ComputedValues, LonghandId, parse_style_attribute};
use properties::{Importance, PropertyDeclaration, PropertyDeclarationBlock};
use properties::animated_properties::{AnimationValue, AnimationValueMap};
use properties::animated_properties::TransitionProperty;
use properties::style_structs::Font;
use rule_tree::CascadeLevel as ServoCascadeLevel;
use selector_parser::{AttrValue, ElementExt, PseudoClassStringArg};
use selectors::{Element, OpaqueElement};
use selectors::attr::{AttrSelectorOperation, AttrSelectorOperator, CaseSensitivity, NamespaceConstraint};
use selectors::matching::{ElementSelectorFlags, LocalMatchingContext, MatchingContext};
use selectors::matching::{RelevantLinkStatus, VisitedHandlingMode};
use selectors::sink::Push;
use servo_arc::{Arc, ArcBorrow, RawOffsetArc};
use shared_lock::Locked;
use std::cell::RefCell;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::DerefMut;
use std::ptr;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
use stylesheets::UrlExtraData;
use stylist::Stylist;
/// A simple wrapper over a non-null Gecko node (`nsINode`) pointer.
///
/// Important: We don't currently refcount the DOM, because the wrapper lifetime
/// magic guarantees that our LayoutFoo references won't outlive the root, and
/// we don't mutate any of the references on the Gecko side during restyle.
///
/// We could implement refcounting if need be (at a potentially non-trivial
/// performance cost) by implementing Drop and making LayoutFoo non-Copy.
#[derive(Clone, Copy)]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
#[inline]
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
#[inline]
fn flags(&self) -> u32 {
(self.0)._base._base_1.mFlags
}
#[inline]
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
// These live in different locations depending on processor architecture.
#[cfg(target_pointer_width = "64")]
#[inline]
fn bool_flags(&self) -> u32 {
(self.0)._base._base_1.mBoolFlags
}
#[cfg(target_pointer_width = "32")]
#[inline]
fn bool_flags(&self) -> u32 {
(self.0).mBoolFlags
}
#[inline]
fn get_bool_flag(&self, flag: nsINode_BooleanFlag) -> bool {
self.bool_flags() & (1u32 << flag as u32) != 0
}
fn owner_doc(&self) -> &structs::nsIDocument {
debug_assert!(!self.node_info().mDocument.is_null());
unsafe { &*self.node_info().mDocument }
}
#[inline]
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
#[inline]
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
#[inline]
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
#[inline]
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
/// Simple iterator over all this node's children. Unlike `.children()`, this iterator does
/// not filter out nodes that don't need layout.
fn dom_children(self) -> GeckoChildrenIterator<'ln> {
GeckoChildrenIterator::Current(self.first_child())
}
/// WARNING: This logic is duplicated in Gecko's FlattenedTreeParentIsParent.
/// Make sure to mirror any modifications in both places.
fn flattened_tree_parent_is_parent(&self) -> bool {
use gecko_bindings::structs::*;
let flags = self.flags();
if flags & (NODE_MAY_BE_IN_BINDING_MNGR as u32 |
NODE_IS_IN_SHADOW_TREE as u32) != 0 {
return false;
}
let parent = unsafe { self.0.mParent.as_ref() }.map(GeckoNode);
let parent_el = parent.and_then(|p| p.as_element());
if flags & (NODE_IS_NATIVE_ANONYMOUS_ROOT as u32) != 0 &&
parent_el.map_or(false, |el| el.is_root())
{
return false;
}
if parent_el.map_or(false, |el| el.has_shadow_root()) {
return false;
}
true
}
fn flattened_tree_parent(&self) -> Option<Self> {
let fast_path = self.flattened_tree_parent_is_parent();
debug_assert!(fast_path == unsafe { bindings::Gecko_FlattenedTreeParentIsParent(self.0) });
if fast_path {
unsafe { self.0.mParent.as_ref().map(GeckoNode) }
} else {
unsafe { bindings::Gecko_GetFlattenedTreeParentNode(self.0).map(GeckoNode) }
}
}
fn contains_non_whitespace_content(&self) -> bool {
unsafe { Gecko_IsSignificantChild(self.0, true, false) }
}
#[inline]
fn may_have_anonymous_children(&self) -> bool {
self.get_bool_flag(nsINode_BooleanFlag::ElementMayHaveAnonymousChildren)
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
#[inline]
fn is_element(&self) -> bool {
self.get_bool_flag(nsINode_BooleanFlag::NodeIsElement)
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn parent_node(&self) -> Option<Self> {
unsafe { self.0.mParent.as_ref().map(GeckoNode) }
}
fn children(&self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
LayoutIterator(self.dom_children())
}
fn traversal_parent(&self) -> Option<GeckoElement<'ln>> {
self.flattened_tree_parent().and_then(|n| n.as_element())
}
fn traversal_children(&self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
if let Some(element) = self.as_element() {
// This condition is similar to the check that
// StyleChildrenIterator::IsNeeded does, except that it might return
// true if we used to (but no longer) have anonymous content from
// ::before/::after, XBL bindings, or nsIAnonymousContentCreators.
if element.is_in_anonymous_subtree() ||
element.has_xbl_binding_with_content() ||
self.may_have_anonymous_children() {
unsafe {
let mut iter: structs::StyleChildrenIterator = ::std::mem::zeroed();
Gecko_ConstructStyleChildrenIterator(element.0, &mut iter);
return LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter));
}
}
}
LayoutIterator(self.dom_children())
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
#[inline]
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(structs::StyleChildrenIterator),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref mut it) = *self {
unsafe {
Gecko_DestroyStyleChildrenIterator(it);
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
// We do this unsafe lengthening of the lifetime here because
// structs::StyleChildrenIterator is actually StyleChildrenIterator<'a>,
// however we can't express this easily with bindgen, and it would
// introduce functions with two input lifetimes into bindgen,
// which would be out of scope for elision.
Gecko_GetNextStyleChild(&mut * (it as *mut _)).map(GeckoNode)
}
}
}
}
/// A Simple wrapper over a non-null Gecko `nsXBLBinding` pointer.
#[derive(Clone, Copy)]
pub struct GeckoXBLBinding<'lb>(pub &'lb RawGeckoXBLBinding);
impl<'lb> GeckoXBLBinding<'lb> {
fn base_binding(&self) -> Option<Self> {
unsafe { self.0.mNextBinding.mRawPtr.as_ref().map(GeckoXBLBinding) }
}
fn anon_content(&self) -> *const nsIContent {
unsafe { self.0.mContent.raw::<nsIContent>() }
}
fn inherits_style(&self) -> bool {
unsafe { bindings::Gecko_XBLBinding_InheritsStyle(self.0) }
}
// This duplicates the logic in Gecko's
// nsBindingManager::GetBindingWithContent.
fn get_binding_with_content(&self) -> Option<Self> {
let mut binding = *self;
loop {
if !binding.anon_content().is_null() {
return Some(binding);
}
binding = match binding.base_binding() {
Some(b) => b,
None => return None,
};
}
}
fn each_xbl_stylist<F>(self, f: &mut F)
where
F: FnMut(&Stylist),
{
if let Some(base) = self.base_binding() {
base.each_xbl_stylist(f);
}
let raw_data = unsafe {
bindings::Gecko_XBLBinding_GetRawServoStyleSet(self.0)
};
if let Some(raw_data) = raw_data {
let data = PerDocumentStyleData::from_ffi(&*raw_data).borrow();
f(&data.stylist);
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<{}", self.get_local_name())?;
if let Some(id) = self.get_id() {
write!(f, " id={}", id)?;
}
let mut first = true;
let mut any = false;
self.each_class(|c| {
if first {
first = false;
any = true;
let _ = f.write_str(" class=\"");
} else {
let _ = f.write_str(" ");
}
let _ = write!(f, "{}", c);
});
if any {
f.write_str("\"")?;
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute<R>(
value: &str,
url_data: &UrlExtraData,
quirks_mode: QuirksMode,
reporter: &R,
) -> PropertyDeclarationBlock
where
R: ParseErrorReporter,
{
parse_style_attribute(value, url_data, reporter, quirks_mode)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Returns true if this element has descendants for lazy frame construction.
pub fn descendants_need_frames(&self) -> bool {
self.flags() & (NODE_DESCENDANTS_NEED_FRAMES as u32) != 0
}
/// Returns true if this element needs lazy frame construction.
pub fn needs_frame(&self) -> bool {
self.flags() & (NODE_NEEDS_FRAME as u32) != 0
}
/// Returns true if this element has a shadow root.
fn has_shadow_root(&self) -> bool {
self.get_extended_slots()
.map_or(false, |slots| !slots.mShadowRoot.mRawPtr.is_null())
}
/// Returns a reference to the DOM slots for this Element, if they exist.
fn get_dom_slots(&self) -> Option<&structs::FragmentOrElement_nsDOMSlots> {
let slots = self.as_node().0.mSlots as *const structs::FragmentOrElement_nsDOMSlots;
unsafe { slots.as_ref() }
}
/// Returns a reference to the extended DOM slots for this Element.
fn get_extended_slots(
&self,
) -> Option<&structs::FragmentOrElement_nsExtendedDOMSlots> {
self.get_dom_slots()
.and_then(|s| unsafe { s.mExtendedSlots.mPtr.as_ref() })
}
#[inline]
fn get_xbl_binding(&self) -> Option<GeckoXBLBinding> {
if self.flags() & (structs::NODE_MAY_BE_IN_BINDING_MNGR as u32) == 0 {
return None;
}
unsafe { bindings::Gecko_GetXBLBinding(self.0).map(GeckoXBLBinding) }
}
#[inline]
fn get_xbl_binding_with_content(&self) -> Option<GeckoXBLBinding> {
self.get_xbl_binding()
.and_then(|b| b.get_binding_with_content())
}
#[inline]
fn has_xbl_binding_with_content(&self) -> bool {
!self.get_xbl_binding_with_content().is_none()
}
/// This and has_xbl_binding_parent duplicate the logic in Gecko's virtual
/// nsINode::GetBindingParent function, which only has two implementations:
/// one for XUL elements, and one for other elements. We just hard code in
/// our knowledge of those two implementations here.
fn get_xbl_binding_parent(&self) -> Option<Self> {
if self.is_xul_element() {
// FIXME(heycam): Having trouble with bindgen on nsXULElement,
// where the binding parent is stored in a member variable
// rather than in slots. So just get it through FFI for now.
unsafe {
bindings::Gecko_GetBindingParent(self.0).map(GeckoElement)
}
} else {
let binding_parent = unsafe {
self.get_non_xul_xbl_binding_parent_raw_content().as_ref()
}.map(GeckoNode::from_content)
.and_then(|n| n.as_element());
debug_assert!(binding_parent == unsafe { bindings::Gecko_GetBindingParent(self.0).map(GeckoElement) });
binding_parent
}
}
fn get_non_xul_xbl_binding_parent_raw_content(&self) -> *mut nsIContent {
debug_assert!(!self.is_xul_element());
self.get_extended_slots()
.map_or(ptr::null_mut(), |slots| slots.mBindingParent)
}
fn has_xbl_binding_parent(&self) -> bool {
if self.is_xul_element() {
// FIXME(heycam): Having trouble with bindgen on nsXULElement,
// where the binding parent is stored in a member variable
// rather than in slots. So just get it through FFI for now.
unsafe { bindings::Gecko_GetBindingParent(self.0).is_some() }
} else {
!self.get_non_xul_xbl_binding_parent_raw_content().is_null()
}
}
fn namespace_id(&self) -> i32 {
self.as_node().node_info().mInner.mNamespaceID
}
fn is_html_element(&self) -> bool {
self.namespace_id() == (structs::root::kNameSpaceID_XHTML as i32)
}
fn is_xul_element(&self) -> bool {
self.namespace_id() == (structs::root::kNameSpaceID_XUL as i32)
}
/// Sets the specified element data, return any existing data.
///
/// Like `ensure_data`, only safe to call with exclusive access to the
/// element.
pub unsafe fn set_data(&self, replace_data: Option<ElementData>) -> Option<ElementData> {
match (self.get_data(), replace_data) {
(Some(old), Some(replace_data)) => {
Some(mem::replace(old.borrow_mut().deref_mut(), replace_data))
}
(Some(old), None) => {
let old_data = mem::replace(old.borrow_mut().deref_mut(), ElementData::default());
self.0.mServoData.set(ptr::null_mut());
Some(old_data)
}
(None, Some(replace_data)) => {
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(replace_data)));
self.0.mServoData.set(ptr);
None
}
(None, None) => None,
}
}
#[inline]
fn has_id(&self) -> bool {
self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasID)
}
#[inline]
fn get_state_internal(&self) -> u64 {
if !self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasLockedStyleStates) {
return self.0.mState.mStates;
}
unsafe { Gecko_ElementState(self.0) }
}
fn document_state(&self) -> DocumentState {
let node = self.as_node();
unsafe {
let states = Gecko_DocumentState(node.owner_doc());
DocumentState::from_bits_truncate(states)
}
}
#[inline]
fn may_have_class(&self) -> bool {
self.as_node()
.get_bool_flag(nsINode_BooleanFlag::ElementMayHaveClass)
}
#[inline]
fn has_properties(&self) -> bool {
use gecko_bindings::structs::NODE_HAS_PROPERTIES;
(self.flags() & NODE_HAS_PROPERTIES as u32) != 0
}
#[inline]
fn get_before_or_after_pseudo(&self, is_before: bool) -> Option<Self> {
if !self.has_properties() {
return None;
}
unsafe { bindings::Gecko_GetBeforeOrAfterPseudo(self.0, is_before).map(GeckoElement) }
}
#[inline]
fn may_have_style_attribute(&self) -> bool {
self.as_node()
.get_bool_flag(nsINode_BooleanFlag::ElementMayHaveStyle)
}
#[inline]
fn get_document_theme(&self) -> DocumentTheme {
let node = self.as_node();
unsafe { Gecko_GetDocumentLWTheme(node.owner_doc()) }
}
/// Owner document quirks mode getter.
pub fn owner_document_quirks_mode(&self) -> QuirksMode {
self.as_node().owner_doc().mCompatMode.into()
}
/// Only safe to call on the main thread, with exclusive access to the element and
/// its ancestors.
/// This function is also called after display property changed for SMIL animation.
///
/// Also this function schedules style flush.
unsafe fn maybe_restyle<'a>(
&self,
data: &'a mut ElementData,
animation_only: bool,
) -> bool {
if !data.has_styles() {
return false;
}
// Propagate the bit up the chain.
if animation_only {
bindings::Gecko_NoteAnimationOnlyDirtyElement(self.0);
} else {
bindings::Gecko_NoteDirtyElement(self.0);
}
// Ensure and return the RestyleData.
true
}
/// Set restyle and change hints to the element data.
pub fn note_explicit_hints(
&self,
restyle_hint: nsRestyleHint,
change_hint: nsChangeHint,
) {
use gecko::restyle_damage::GeckoRestyleDamage;
use invalidation::element::restyle_hints::RestyleHint;
let damage = GeckoRestyleDamage::new(change_hint);
debug!("note_explicit_hints: {:?}, restyle_hint={:?}, change_hint={:?}",
self, restyle_hint, change_hint);
let restyle_hint: RestyleHint = restyle_hint.into();
debug_assert!(!(restyle_hint.has_animation_hint() &&
restyle_hint.has_non_animation_hint()),
"Animation restyle hints should not appear with non-animation restyle hints");
let mut maybe_data = self.mutate_data();
let should_restyle = maybe_data.as_mut().map_or(false, |d| unsafe {
self.maybe_restyle(d, restyle_hint.has_animation_hint())
});
if should_restyle {
maybe_data
.as_mut()
.unwrap()
.hint
.insert(restyle_hint.into());
maybe_data.as_mut().unwrap().damage |= damage;
} else {
debug!("(Element not styled, discarding hints)");
}
}
/// This logic is duplicated in Gecko's nsIContent::IsRootOfAnonymousSubtree.
#[inline]
fn is_root_of_anonymous_subtree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_ANONYMOUS_ROOT;
self.flags() & (NODE_IS_ANONYMOUS_ROOT as u32) != 0
}
/// This logic is duplicated in Gecko's nsIContent::IsRootOfNativeAnonymousSubtree.
#[inline]
fn is_root_of_native_anonymous_subtree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_NATIVE_ANONYMOUS_ROOT;
return self.flags() & (NODE_IS_NATIVE_ANONYMOUS_ROOT as u32) != 0
}
/// This logic is duplicated in Gecko's nsINode::IsInNativeAnonymousSubtree.
#[inline]
fn is_in_native_anonymous_subtree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE;
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) != 0
}
/// This logic is duplicate in Gecko's nsIContent::IsInShadowTree().
#[inline]
fn is_in_shadow_tree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_IN_SHADOW_TREE;
self.flags() & (NODE_IS_IN_SHADOW_TREE as u32) != 0
}
/// This logic is duplicated in Gecko's nsIContent::IsInAnonymousSubtree.
#[inline]
fn is_in_anonymous_subtree(&self) -> bool {
self.is_in_native_anonymous_subtree() ||
(!self.is_in_shadow_tree() && self.has_xbl_binding_parent())
}
}
/// Converts flags from the layout used by rust-selectors to the layout used
/// by Gecko. We could align these and then do this without conditionals, but
/// it's probably not worth the trouble.
fn selector_flags_to_node_flags(flags: ElementSelectorFlags) -> u32 {
use gecko_bindings::structs::*;
use selectors::matching::*;
let mut gecko_flags = 0u32;
if flags.contains(HAS_SLOW_SELECTOR) {
gecko_flags |= NODE_HAS_SLOW_SELECTOR as u32;
}
if flags.contains(HAS_SLOW_SELECTOR_LATER_SIBLINGS) {
gecko_flags |= NODE_HAS_SLOW_SELECTOR_LATER_SIBLINGS as u32;
}
if flags.contains(HAS_EDGE_CHILD_SELECTOR) {
gecko_flags |= NODE_HAS_EDGE_CHILD_SELECTOR as u32;
}
if flags.contains(HAS_EMPTY_SELECTOR) {
gecko_flags |= NODE_HAS_EMPTY_SELECTOR as u32;
}
gecko_flags
}
fn get_animation_rule(
element: &GeckoElement,
cascade_level: CascadeLevel,
) -> Option<Arc<Locked<PropertyDeclarationBlock>>> {
use gecko_bindings::sugar::ownership::HasSimpleFFI;
// Also, we should try to reuse the PDB, to avoid creating extra rule nodes.
let mut animation_values = AnimationValueMap::default();
if unsafe { Gecko_GetAnimationRule(element.0,
cascade_level,
AnimationValueMap::as_ffi_mut(&mut animation_values)) } {
let shared_lock = &GLOBAL_STYLE_DATA.shared_lock;
Some(Arc::new(shared_lock.wrap(
PropertyDeclarationBlock::from_animation_value_map(&animation_values))))
} else {
None
}
}
#[derive(Debug)]
/// Gecko font metrics provider
pub struct GeckoFontMetricsProvider {
/// Cache of base font sizes for each language
///
/// Usually will have 1 element.
///
// This may be slow on pages using more languages, might be worth optimizing
// by caching lang->group mapping separately and/or using a hashmap on larger
// loads.
pub font_size_cache: RefCell<Vec<(Atom, ::gecko_bindings::structs::FontSizePrefs)>>,
}
impl GeckoFontMetricsProvider {
/// Construct
pub fn new() -> Self {
GeckoFontMetricsProvider {
font_size_cache: RefCell::new(Vec::new()),
}
}
}
impl FontMetricsProvider for GeckoFontMetricsProvider {
fn create_from(_: &SharedStyleContext) -> GeckoFontMetricsProvider {
GeckoFontMetricsProvider::new()
}
fn get_size(&self, font_name: &Atom, font_family: u8) -> Au {
use gecko_bindings::bindings::Gecko_GetBaseSize;
let mut cache = self.font_size_cache.borrow_mut();
if let Some(sizes) = cache.iter().find(|el| el.0 == *font_name) {
return sizes.1.size_for_generic(font_family);
}
let sizes = unsafe { Gecko_GetBaseSize(font_name.as_ptr()) };
cache.push((font_name.clone(), sizes));
sizes.size_for_generic(font_family)
}
fn query(
&self,
font: &Font,
font_size: Au,
wm: WritingMode,
in_media_query: bool,
device: &Device,
) -> FontMetricsQueryResult {
use gecko_bindings::bindings::Gecko_GetFontMetrics;
let gecko_metrics = unsafe {
Gecko_GetFontMetrics(
device.pres_context(),
wm.is_vertical() && !wm.is_sideways(),
font.gecko(),
font_size.0,
// we don't use the user font set in a media query
!in_media_query,
)
};
let metrics = FontMetrics {
x_height: Au(gecko_metrics.mXSize),
zero_advance_measure: Au(gecko_metrics.mChSize),
};
FontMetricsQueryResult::Available(metrics)
}
}
impl structs::FontSizePrefs {
fn size_for_generic(&self, font_family: u8) -> Au {
Au(match font_family {
structs::kPresContext_DefaultVariableFont_ID => self.mDefaultVariableSize,
structs::kPresContext_DefaultFixedFont_ID => self.mDefaultFixedSize,
structs::kGenericFont_serif => self.mDefaultSerifSize,
structs::kGenericFont_sans_serif => self.mDefaultSansSerifSize,
structs::kGenericFont_monospace => self.mDefaultMonospaceSize,
structs::kGenericFont_cursive => self.mDefaultCursiveSize,
structs::kGenericFont_fantasy => self.mDefaultFantasySize,
x => unreachable!("Unknown generic ID {}", x),
})
}
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
type FontMetricsProvider = GeckoFontMetricsProvider;
fn inheritance_parent(&self) -> Option<Self> {
if self.is_native_anonymous() {
self.closest_non_native_anonymous_ancestor()
} else {
self.as_node()
.flattened_tree_parent()
.and_then(|n| n.as_element())
}
}
fn before_pseudo_element(&self) -> Option<Self> {
self.get_before_or_after_pseudo(/* is_before = */ true)
}
fn after_pseudo_element(&self) -> Option<Self> {
self.get_before_or_after_pseudo(/* is_before = */ false)
}
/// Execute `f` for each anonymous content child element (apart from
/// ::before and ::after) whose originating element is `self`.
fn each_anonymous_content_child<F>(&self, mut f: F)
where
F: FnMut(Self),
{
let array: *mut structs::nsTArray<*mut nsIContent> =
unsafe { bindings::Gecko_GetAnonymousContentForElement(self.0) };
if array.is_null() {
return;
}
for content in unsafe { &**array } {
let node = GeckoNode::from_content(unsafe { &**content });
let element = match node.as_element() {
Some(e) => e,
None => continue,
};
f(element);
}
unsafe { bindings::Gecko_DestroyAnonymousContentList(array) };
}
fn closest_non_native_anonymous_ancestor(&self) -> Option<Self> {
debug_assert!(self.is_native_anonymous());
let mut parent = match self.traversal_parent() {
Some(e) => e,
None => return None,
};
loop {
if !parent.is_native_anonymous() {
return Some(parent);
}
parent = match parent.traversal_parent() {
Some(p) => p,
None => return None,
};
}
}
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn owner_doc_matches_for_testing(&self, device: &Device) -> bool {
self.as_node().owner_doc() as *const structs::nsIDocument ==
device.pres_context().mDocument.raw::<structs::nsIDocument>()
}
fn style_attribute(&self) -> Option<ArcBorrow<Locked<PropertyDeclarationBlock>>> {
if !self.may_have_style_attribute() {
return None;
}
let declarations = unsafe { Gecko_GetStyleAttrDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
declarations.map(|s| s.borrow_arc())
}
fn unset_dirty_style_attribute(&self) {
if !self.may_have_style_attribute() {
return;
}
unsafe { Gecko_UnsetDirtyStyleAttr(self.0) };
}
fn get_smil_override(&self) -> Option<ArcBorrow<Locked<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetSMILOverrideDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
declarations.map(|s| s.borrow_arc())
}
fn get_animation_rule_by_cascade(&self, cascade_level: ServoCascadeLevel)
-> Option<Arc<Locked<PropertyDeclarationBlock>>> {
match cascade_level {
ServoCascadeLevel::Animations => self.get_animation_rule(),
ServoCascadeLevel::Transitions => self.get_transition_rule(),
_ => panic!("Unsupported cascade level for getting the animation rule")
}
}
fn get_animation_rule(
&self,
) -> Option<Arc<Locked<PropertyDeclarationBlock>>> {
get_animation_rule(self, CascadeLevel::Animations)
}
fn get_transition_rule(
&self,
) -> Option<Arc<Locked<PropertyDeclarationBlock>>> {
get_animation_rule(self, CascadeLevel::Transitions)
}
fn get_state(&self) -> ElementState {
ElementState::from_bits_truncate(self.get_state_internal())
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0, namespace.0.as_ptr(), attr.as_ptr())
}
}
fn get_id(&self) -> Option<Atom> {
if !self.has_id() {
return None;
}
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0, atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn each_class<F>(&self, callback: F)
where
F: FnMut(&Atom),
{
snapshot_helpers::each_class(self.0, callback, Gecko_ClassOrClassList)
}
#[inline]
fn has_snapshot(&self) -> bool {
self.flags() & (ELEMENT_HAS_SNAPSHOT as u32) != 0
}
#[inline]
fn handled_snapshot(&self) -> bool {
self.flags() & (ELEMENT_HANDLED_SNAPSHOT as u32) != 0
}
unsafe fn set_handled_snapshot(&self) {
debug_assert!(self.get_data().is_some());
self.set_flags(ELEMENT_HANDLED_SNAPSHOT as u32)
}
#[inline]
fn has_dirty_descendants(&self) -> bool {
self.flags() & (ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_dirty_descendants(&self) {
debug_assert!(self.get_data().is_some());
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
#[inline]
fn has_animation_only_dirty_descendants(&self) -> bool {
self.flags() & (ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_animation_only_dirty_descendants(&self) {
self.set_flags(ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_animation_only_dirty_descendants(&self) {
self.unset_flags(ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn clear_descendant_bits(&self) {
self.unset_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
NODE_DESCENDANTS_NEED_FRAMES as u32)
}
#[inline]
unsafe fn clear_dirty_bits(&self) {
self.unset_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
NODE_DESCENDANTS_NEED_FRAMES as u32 |
NODE_NEEDS_FRAME as u32)
}
fn is_visited_link(&self) -> bool {
use element_state::IN_VISITED_STATE;
self.get_state().intersects(IN_VISITED_STATE)
}
#[inline]
fn is_native_anonymous(&self) -> bool {
use gecko_bindings::structs::NODE_IS_NATIVE_ANONYMOUS;
self.flags() & (NODE_IS_NATIVE_ANONYMOUS as u32) != 0
}
fn implemented_pseudo_element(&self) -> Option<PseudoElement> {
if !self.is_native_anonymous() {
return None;
}
if !self.has_properties() {
return None;
}
let pseudo_type =
unsafe { bindings::Gecko_GetImplementedPseudo(self.0) };
PseudoElement::from_pseudo_type(pseudo_type)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
#[inline(always)]
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
unsafe fn ensure_data(&self) -> AtomicRefMut<ElementData> {
if self.get_data().is_none() {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::default())));
self.0.mServoData.set(ptr);
}
self.mutate_data().unwrap()
}
unsafe fn clear_data(&self) {
let ptr = self.0.mServoData.get();
unsafe {
self.unset_flags(ELEMENT_HAS_SNAPSHOT as u32 |
ELEMENT_HANDLED_SNAPSHOT as u32 |
structs::Element_kAllServoDescendantBits |
NODE_NEEDS_FRAME as u32);
}
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
#[inline]
fn skip_root_and_item_based_display_fixup(&self) -> bool {
if !self.is_native_anonymous() {
return false;
}
if let Some(p) = self.implemented_pseudo_element() {
return p.skip_item_based_display_fixup();
}
self.is_root_of_native_anonymous_subtree()
}
unsafe fn set_selector_flags(&self, flags: ElementSelectorFlags) {
debug_assert!(!flags.is_empty());
self.set_flags(selector_flags_to_node_flags(flags));
}
fn has_selector_flags(&self, flags: ElementSelectorFlags) -> bool {
let node_flags = selector_flags_to_node_flags(flags);
(self.flags() & node_flags) == node_flags
}
#[inline]
fn may_have_animations(&self) -> bool {
if let Some(pseudo) = self.implemented_pseudo_element() {
if !pseudo.is_before_or_after() {
return false;
}
return self.parent_element()
.map_or(false, |p| {
p.as_node()
.get_bool_flag(nsINode_BooleanFlag::ElementHasAnimations)
});
}
self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasAnimations)
}
/// Process various tasks that are a result of animation-only restyle.
fn process_post_animation(&self,
tasks: PostAnimationTasks) {
use context::DISPLAY_CHANGED_FROM_NONE_FOR_SMIL;
use gecko_bindings::structs::nsChangeHint_nsChangeHint_Empty;
use gecko_bindings::structs::nsRestyleHint_eRestyle_Subtree;
debug_assert!(!tasks.is_empty(), "Should be involved a task");
// If display style was changed from none to other, we need to resolve
// the descendants in the display:none subtree. Instead of resolving
// those styles in animation-only restyle, we defer it to a subsequent
// normal restyle.
if tasks.intersects(DISPLAY_CHANGED_FROM_NONE_FOR_SMIL) {
debug_assert!(self.implemented_pseudo_element()
.map_or(true, |p| !p.is_before_or_after()),
"display property animation shouldn't run on pseudo elements \
since it's only for SMIL");
self.note_explicit_hints(nsRestyleHint_eRestyle_Subtree,
nsChangeHint_nsChangeHint_Empty);
}
}
/// Update various animation-related state on a given (pseudo-)element as
/// results of normal restyle.
fn update_animations(&self,
before_change_style: Option<Arc<ComputedValues>>,
tasks: UpdateAnimationsTasks) {
// We have to update animations even if the element has no computed
// style since it means the element is in a display:none subtree, we
// should destroy all CSS animations in display:none subtree.
let computed_data = self.borrow_data();
let computed_values =
computed_data.as_ref().map(|d| d.styles.primary());
let before_change_values =
before_change_style.as_ref().map(|x| &**x);
let computed_values_opt = computed_values.as_ref().map(|x| &***x);
unsafe {
Gecko_UpdateAnimations(self.0,
before_change_values,
computed_values_opt,
tasks.bits());
}
}
fn has_animations(&self) -> bool {
self.may_have_animations() && unsafe { Gecko_ElementHasAnimations(self.0) }
}
fn has_css_animations(&self) -> bool {
self.may_have_animations() && unsafe { Gecko_ElementHasCSSAnimations(self.0) }
}
fn has_css_transitions(&self) -> bool {
self.may_have_animations() && unsafe { Gecko_ElementHasCSSTransitions(self.0) }
}
fn each_xbl_stylist<F>(&self, mut f: F) -> bool
where
F: FnMut(&Stylist),
{
// Walk the binding scope chain, starting with the binding attached to
// our content, up till we run out of scopes or we get cut off.
//
// If we are a NAC pseudo-element, we want to get rules from our
// rule_hash_target, that is, our originating element.
let mut current = Some(self.rule_hash_target());
while let Some(element) = current {
if let Some(binding) = element.get_xbl_binding() {
binding.each_xbl_stylist(&mut f);
// If we're not looking at our original element, allow the
// binding to cut off style inheritance.
if element != *self {
if !binding.inherits_style() {
// Go no further; we're not inheriting style from
// anything above here.
break;
}
}
}
if element.is_root_of_native_anonymous_subtree() {
// Deliberately cut off style inheritance here.
break;
}
current = element.get_xbl_binding_parent();
}
// If current has something, this means we cut off inheritance at some
// point in the loop.
current.is_some()
}
fn xbl_binding_anonymous_content(&self) -> Option<GeckoNode<'le>> {
self.get_xbl_binding_with_content()
.map(|b| unsafe { b.anon_content().as_ref() }.unwrap())
.map(GeckoNode::from_content)
}
fn get_css_transitions_info(
&self,
) -> FnvHashMap<LonghandId, Arc<AnimationValue>> {
use gecko_bindings::bindings::Gecko_ElementTransitions_EndValueAt;
use gecko_bindings::bindings::Gecko_ElementTransitions_Length;
use gecko_bindings::bindings::Gecko_ElementTransitions_PropertyAt;
let collection_length =
unsafe { Gecko_ElementTransitions_Length(self.0) } as usize;
let mut map = FnvHashMap::with_capacity_and_hasher(
collection_length,
Default::default()
);
for i in 0..collection_length {
let property = unsafe {
Gecko_ElementTransitions_PropertyAt(self.0, i as usize)
};
let property = LonghandId::from_nscsspropertyid(property)
.expect("Only longhands should be in the element transitions");
let raw_end_value = unsafe {
Gecko_ElementTransitions_EndValueAt(self.0, i)
};
let end_value = AnimationValue::arc_from_borrowed(&raw_end_value);
debug_assert!(end_value.is_some());
map.insert(property, end_value.unwrap().clone_arc());
}
map
}
fn might_need_transitions_update(
&self,
old_values: Option<&ComputedValues>,
new_values: &ComputedValues,
) -> bool {
use properties::longhands::display::computed_value as display;
let old_values = match old_values {
Some(v) => v,
None => return false,
};
let new_box_style = new_values.get_box();
let transition_not_running = !self.has_css_transitions() &&
new_box_style.transition_property_count() == 1 &&
new_box_style.transition_combined_duration_at(0) <= 0.0f32;
let new_display_style = new_box_style.clone_display();
let old_display_style = old_values.get_box().clone_display();
new_box_style.transition_property_count() > 0 &&
!transition_not_running &&
(new_display_style != display::T::none &&
old_display_style != display::T::none)
}
// Detect if there are any changes that require us to update transitions.
// This is used as a more thoroughgoing check than the, cheaper
// might_need_transitions_update check.
//
// The following logic shadows the logic used on the Gecko side
// (nsTransitionManager::DoUpdateTransitions) where we actually perform the
// update.
//
// https://drafts.csswg.org/css-transitions/#starting
fn needs_transitions_update(
&self,
before_change_style: &ComputedValues,
after_change_style: &ComputedValues
) -> bool {
use gecko_bindings::structs::nsCSSPropertyID;
use properties::LonghandIdSet;
debug_assert!(self.might_need_transitions_update(Some(before_change_style),
after_change_style),
"We should only call needs_transitions_update if \
might_need_transitions_update returns true");
let after_change_box_style = after_change_style.get_box();
let transitions_count = after_change_box_style.transition_property_count();
let existing_transitions = self.get_css_transitions_info();
// Check if this property is none, custom or unknown.
let is_none_or_custom_property = |property: nsCSSPropertyID| -> bool {
return property == nsCSSPropertyID::eCSSPropertyExtra_no_properties ||
property == nsCSSPropertyID::eCSSPropertyExtra_variable ||
property == nsCSSPropertyID::eCSSProperty_UNKNOWN;
};
let mut transitions_to_keep = LonghandIdSet::new();
for i in 0..transitions_count {
let property = after_change_box_style.transition_nscsspropertyid_at(i);
let combined_duration = after_change_box_style.transition_combined_duration_at(i);
// We don't need to update transition for none/custom properties.
if is_none_or_custom_property(property) {
continue;
}
let transition_property: TransitionProperty = property.into();
let mut property_check_helper = |property: &LonghandId| -> bool {
transitions_to_keep.insert(*property);
self.needs_transitions_update_per_property(
property,
combined_duration,
before_change_style,
after_change_style,
&existing_transitions
)
};
match transition_property {
TransitionProperty::All => {
if TransitionProperty::any(property_check_helper) {
return true;
}
},
TransitionProperty::Unsupported(..) => {},
TransitionProperty::Shorthand(ref shorthand) => {
if shorthand.longhands().iter().any(property_check_helper) {
return true;
}
},
TransitionProperty::Longhand(ref longhand_id) => {
if property_check_helper(longhand_id) {
return true;
}
},
}
}
// Check if we have to cancel the running transition because this is not
// a matching transition-property value.
existing_transitions.keys().any(|property| {
!transitions_to_keep.contains(*property)
})
}
fn needs_transitions_update_per_property(
&self,
longhand_id: &LonghandId,
combined_duration: f32,
before_change_style: &ComputedValues,
after_change_style: &ComputedValues,
existing_transitions: &FnvHashMap<LonghandId, Arc<AnimationValue>>,
) -> bool {
use values::animated::{Animate, Procedure};
// If there is an existing transition, update only if the end value
// differs.
//
// If the end value has not changed, we should leave the currently
// running transition as-is since we don't want to interrupt its timing
// function.
if let Some(ref existing) = existing_transitions.get(longhand_id) {
let after_value =
AnimationValue::from_computed_values(
longhand_id,
after_change_style
).unwrap();
return ***existing != after_value
}
let from = AnimationValue::from_computed_values(
&longhand_id,
before_change_style,
);
let to = AnimationValue::from_computed_values(
&longhand_id,
after_change_style,
);
debug_assert_eq!(to.is_some(), from.is_some());
combined_duration > 0.0f32 &&
from != to &&
from.unwrap().animate(
to.as_ref().unwrap(),
Procedure::Interpolate { progress: 0.5 }
).is_ok()
}
#[inline]
fn lang_attr(&self) -> Option<AttrValue> {
let ptr = unsafe { bindings::Gecko_LangValue(self.0) };
if ptr.is_null() {
None
} else {
Some(unsafe { Atom::from_addrefed(ptr) })
}
}
fn match_element_lang(
&self,
override_lang: Option<Option<AttrValue>>,
value: &PseudoClassStringArg
) -> bool {
// Gecko supports :lang() from CSS Selectors 3, which only accepts a
// single language tag, and which performs simple dash-prefix matching
// on it.
debug_assert!(value.len() > 0 && value[value.len() - 1] == 0,
"expected value to be null terminated");
let override_lang_ptr = match &override_lang {
&Some(Some(ref atom)) => atom.as_ptr(),
_ => ptr::null_mut(),
};
unsafe {
Gecko_MatchLang(self.0, override_lang_ptr, override_lang.is_some(), value.as_ptr())
}
}
fn is_html_document_body_element(&self) -> bool {
if self.get_local_name() != &*local_name!("body") {
return false;
}
if !self.is_html_element() {
return false;
}
unsafe { bindings::Gecko_IsDocumentBody(self.0) }
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> Eq for GeckoElement<'le> {}
impl<'le> Hash for GeckoElement<'le> {
fn hash<H: Hasher>(&self, state: &mut H) {
(self.0 as *const _).hash(state);
}
}
impl<'le> PresentationalHintsSynthesizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(
&self,
visited_handling: VisitedHandlingMode,
hints: &mut V
)
where
V: Push<ApplicableDeclarationBlock>,
{
use properties::longhands::_x_lang::SpecifiedValue as SpecifiedLang;
use properties::longhands::_x_text_zoom::SpecifiedValue as SpecifiedZoom;
use properties::longhands::color::SpecifiedValue as SpecifiedColor;
use properties::longhands::text_align::SpecifiedValue as SpecifiedTextAlign;
use values::specified::color::Color;
lazy_static! {
static ref TH_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::TextAlign(SpecifiedTextAlign::MozCenterOrInherit),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
static ref TABLE_COLOR_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::Color(SpecifiedColor(Color::InheritFromBodyQuirk.into())),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
static ref MATHML_LANG_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::XLang(SpecifiedLang(atom!("x-math"))),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
static ref SVG_TEXT_DISABLE_ZOOM_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::XTextZoom(SpecifiedZoom(false)),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
};
let ns = self.namespace_id();
// <th> elements get a default MozCenterOrInherit which may get overridden
if ns == structs::kNameSpaceID_XHTML as i32 {
if self.get_local_name().as_ptr() == atom!("th").as_ptr() {
hints.push(TH_RULE.clone());
} else if self.get_local_name().as_ptr() == atom!("table").as_ptr() &&
self.as_node().owner_doc().mCompatMode == structs::nsCompatibility::eCompatibility_NavQuirks {
hints.push(TABLE_COLOR_RULE.clone());
}
}
if ns == structs::kNameSpaceID_SVG as i32 {
if self.get_local_name().as_ptr() == atom!("text").as_ptr() {
hints.push(SVG_TEXT_DISABLE_ZOOM_RULE.clone());
}
}
let declarations = unsafe { Gecko_GetHTMLPresentationAttrDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
let declarations = unsafe { Gecko_GetExtraContentStyleDeclarations(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
// Support for link, vlink, and alink presentation hints on <body>
if self.is_link() {
// Unvisited vs. visited styles are computed up-front based on the
// visited mode (not the element's actual state).
let declarations = match visited_handling {
VisitedHandlingMode::AllLinksVisitedAndUnvisited => {
unreachable!("We should never try to selector match with \
AllLinksVisitedAndUnvisited");
},
VisitedHandlingMode::AllLinksUnvisited => unsafe {
Gecko_GetUnvisitedLinkAttrDeclarationBlock(self.0)
},
VisitedHandlingMode::RelevantLinkVisited => unsafe {
Gecko_GetVisitedLinkAttrDeclarationBlock(self.0)
},
};
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
let active = self.get_state().intersects(NonTSPseudoClass::Active.state_flag());
if active {
let declarations = unsafe { Gecko_GetActiveLinkAttrDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
}
}
// xml:lang has precedence over lang, which can be
// set by Gecko_GetHTMLPresentationAttrDeclarationBlock
//
// http://www.whatwg.org/specs/web-apps/current-work/multipage/elements.html#language
let ptr = unsafe {
bindings::Gecko_GetXMLLangValue(self.0)
};
if !ptr.is_null() {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::XLang(SpecifiedLang(unsafe { Atom::from_addrefed(ptr) })),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
hints.push(ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints))
}
// MathML's default lang has precedence over both `lang` and `xml:lang`
if ns == structs::kNameSpaceID_MathML as i32 {
if self.get_local_name().as_ptr() == atom!("math").as_ptr() {
hints.push(MATHML_LANG_RULE.clone());
}
}
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
type Impl = SelectorImpl;
fn opaque(&self) -> OpaqueElement {
OpaqueElement::new(self.0)
}
fn parent_element(&self) -> Option<Self> {
// FIXME(emilio): This will need to jump across if the parent node is a
// shadow root to get the shadow host.
let parent_node = self.as_node().parent_node();
parent_node.and_then(|n| n.as_element())
}
fn pseudo_element_originating_element(&self) -> Option<Self> {
debug_assert!(self.implemented_pseudo_element().is_some());
self.closest_non_native_anonymous_ancestor()
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn attr_matches(
&self,
ns: &NamespaceConstraint<&Namespace>,
local_name: &Atom,
operation: &AttrSelectorOperation<&Atom>
) -> bool {
unsafe {
match *operation {
AttrSelectorOperation::Exists => {
bindings::Gecko_HasAttr(self.0,
ns.atom_or_null(),
local_name.as_ptr())
}
AttrSelectorOperation::WithValue { operator, case_sensitivity, expected_value } => {
let ignore_case = match case_sensitivity {
CaseSensitivity::CaseSensitive => false,
CaseSensitivity::AsciiCaseInsensitive => true,
};
// FIXME: case sensitivity for operators other than Equal
match operator {
AttrSelectorOperator::Equal => bindings::Gecko_AttrEquals(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case
),
AttrSelectorOperator::Includes => bindings::Gecko_AttrIncludes(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::DashMatch => bindings::Gecko_AttrDashEquals(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::Prefix => bindings::Gecko_AttrHasPrefix(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::Suffix => bindings::Gecko_AttrHasSuffix(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::Substring => bindings::Gecko_AttrHasSubstring(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
}
}
}
}
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
!self.as_node().dom_children().any(|child| unsafe {
Gecko_IsSignificantChild(child.0, true, true)
})
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName)
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class<F>(
&self,
pseudo_class: &NonTSPseudoClass,
context: &mut LocalMatchingContext<Self::Impl>,
relevant_link: &RelevantLinkStatus,
flags_setter: &mut F,
) -> bool
where
F: FnMut(&Self, ElementSelectorFlags),
{
use selectors::matching::*;
match *pseudo_class {
NonTSPseudoClass::Focus |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::MozFullScreen |
NonTSPseudoClass::Indeterminate |
NonTSPseudoClass::PlaceholderShown |
NonTSPseudoClass::Target |
NonTSPseudoClass::Valid |
NonTSPseudoClass::Invalid |
NonTSPseudoClass::MozUIValid |
NonTSPseudoClass::MozBroken |
NonTSPseudoClass::MozUserDisabled |
NonTSPseudoClass::MozSuppressed |
NonTSPseudoClass::MozLoading |
NonTSPseudoClass::MozHandlerBlocked |
NonTSPseudoClass::MozHandlerDisabled |
NonTSPseudoClass::MozHandlerCrashed |
NonTSPseudoClass::Required |
NonTSPseudoClass::Optional |
NonTSPseudoClass::MozReadOnly |
NonTSPseudoClass::MozReadWrite |
NonTSPseudoClass::Unresolved |
NonTSPseudoClass::FocusWithin |
NonTSPseudoClass::MozDragOver |
NonTSPseudoClass::MozDevtoolsHighlighted |
NonTSPseudoClass::MozStyleeditorTransitioning |
NonTSPseudoClass::MozFocusRing |
NonTSPseudoClass::MozHandlerClickToPlay |
NonTSPseudoClass::MozHandlerVulnerableUpdatable |
NonTSPseudoClass::MozHandlerVulnerableNoUpdate |
NonTSPseudoClass::MozMathIncrementScriptLevel |
NonTSPseudoClass::InRange |
NonTSPseudoClass::OutOfRange |
NonTSPseudoClass::Default |
NonTSPseudoClass::MozSubmitInvalid |
NonTSPseudoClass::MozUIInvalid |
NonTSPseudoClass::MozMeterOptimum |
NonTSPseudoClass::MozMeterSubOptimum |
NonTSPseudoClass::MozMeterSubSubOptimum |
NonTSPseudoClass::MozHasDirAttr |
NonTSPseudoClass::MozDirAttrLTR |
NonTSPseudoClass::MozDirAttrRTL |
NonTSPseudoClass::MozDirAttrLikeAuto |
NonTSPseudoClass::MozAutofill |
NonTSPseudoClass::MozAutofillPreview => {
self.get_state().intersects(pseudo_class.state_flag())
},
NonTSPseudoClass::AnyLink => self.is_link(),
NonTSPseudoClass::Link => relevant_link.is_unvisited(self, context.shared),
NonTSPseudoClass::Visited => relevant_link.is_visited(self, context.shared),
NonTSPseudoClass::Active |
NonTSPseudoClass::Hover => {
if context.active_hover_quirk_matches() && !self.is_link() {
false
} else {
self.get_state().contains(pseudo_class.state_flag())
}
},
NonTSPseudoClass::MozFirstNode => {
flags_setter(self, HAS_EDGE_CHILD_SELECTOR);
let mut elem = self.as_node();
while let Some(prev) = elem.prev_sibling() {
if prev.contains_non_whitespace_content() {
return false
}
elem = prev;
}
true
}
NonTSPseudoClass::MozLastNode => {
flags_setter(self, HAS_EDGE_CHILD_SELECTOR);
let mut elem = self.as_node();
while let Some(next) = elem.next_sibling() {
if next.contains_non_whitespace_content() {
return false
}
elem = next;
}
true
}
NonTSPseudoClass::MozOnlyWhitespace => {
flags_setter(self, HAS_EMPTY_SELECTOR);
if self.as_node().dom_children().any(|c| c.contains_non_whitespace_content()) {
return false
}
true
}
NonTSPseudoClass::MozTableBorderNonzero |
NonTSPseudoClass::MozBrowserFrame |
NonTSPseudoClass::MozNativeAnonymous |
NonTSPseudoClass::MozUseShadowTreeRoot => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
},
NonTSPseudoClass::MozIsHTML => {
self.is_html_element_in_html_document()
}
NonTSPseudoClass::MozLWTheme => {
self.get_document_theme() != DocumentTheme::Doc_Theme_None
}
NonTSPseudoClass::MozLWThemeBrightText => {
self.get_document_theme() == DocumentTheme::Doc_Theme_Bright
}
NonTSPseudoClass::MozLWThemeDarkText => {
self.get_document_theme() == DocumentTheme::Doc_Theme_Dark
}
NonTSPseudoClass::MozWindowInactive => {
self.document_state().contains(NS_DOCUMENT_STATE_WINDOW_INACTIVE)
}
NonTSPseudoClass::MozPlaceholder => false,
NonTSPseudoClass::MozAny(ref sels) => {
context.nesting_level += 1;
let result = sels.iter().any(|s| {
matches_complex_selector(s.iter(), self, context, flags_setter)
});
context.nesting_level -= 1;
result
}
NonTSPseudoClass::Lang(ref lang_arg) => {
self.match_element_lang(None, lang_arg)
}
NonTSPseudoClass::MozSystemMetric(ref s) |
NonTSPseudoClass::MozLocaleDir(ref s) |
NonTSPseudoClass::Dir(ref s) => {
unsafe {
Gecko_MatchStringArgPseudo(
self.0,
pseudo_class.to_gecko_pseudoclasstype().unwrap(),
s.as_ptr(),
)
}
}
}
}
fn match_pseudo_element(
&self,
pseudo_element: &PseudoElement,
_context: &mut MatchingContext
) -> bool {
// TODO(emilio): I believe we could assert we are a pseudo-element and
// match the proper pseudo-element, given how we rulehash the stuff
// based on the pseudo.
match self.implemented_pseudo_element() {
Some(ref pseudo) => *pseudo == pseudo_element.canonical(),
None => false,
}
}
#[inline]
fn is_link(&self) -> bool {
self.get_state().intersects(NonTSPseudoClass::AnyLink.state_flag())
}
fn has_id(&self, id: &Atom, case_sensitivity: CaseSensitivity) -> bool {
if !self.has_id() {
return false
}
unsafe {
let ptr = bindings::Gecko_AtomAttrValue(self.0, atom!("id").as_ptr());
if ptr.is_null() {
false
} else {
case_sensitivity.eq_atom(WeakAtom::new(ptr), id)
}
}
}
fn has_class(&self, name: &Atom, case_sensitivity: CaseSensitivity) -> bool {
if !self.may_have_class() {
return false;
}
snapshot_helpers::has_class(self.0,
name,
case_sensitivity,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
self.is_html_element() &&
self.as_node().owner_doc().mType == structs::root::nsIDocument_Type::eHTML
}
fn ignores_nth_child_selectors(&self) -> bool {
self.is_root_of_anonymous_subtree()
}
fn blocks_ancestor_combinators(&self) -> bool {
if !self.is_root_of_anonymous_subtree() {
return false
}
match self.parent_element() {
Some(e) => {
// If this element is the shadow root of an use-element shadow
// tree, according to the spec, we should not match rules
// cross the shadow DOM boundary.
e.get_local_name() == &*local_name!("use") &&
e.get_namespace() == &*ns!("http://www.w3.org/2000/svg")
},
None => false,
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait NamespaceConstraintHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn atom_or_null(&self) -> *mut nsIAtom;
}
impl<'a> NamespaceConstraintHelpers for NamespaceConstraint<&'a Namespace> {
fn atom_or_null(&self) -> *mut nsIAtom {
match *self {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.0.as_ptr(),
}
}
}
impl<'le> ElementExt for GeckoElement<'le> {
#[inline]
fn matches_user_and_author_rules(&self) -> bool {
!self.is_in_native_anonymous_subtree()
}
}
style: Simplify it even more.
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Wrapper definitions on top of Gecko types in order to be used in the style
//! system.
//!
//! This really follows the Servo pattern in
//! `components/script/layout_wrapper.rs`.
//!
//! This theoretically should live in its own crate, but now it lives in the
//! style system it's kind of pointless in the Stylo case, and only Servo forces
//! the separation between the style system implementation and everything else.
use CaseSensitivityExt;
use app_units::Au;
use applicable_declarations::ApplicableDeclarationBlock;
use atomic_refcell::{AtomicRefCell, AtomicRefMut};
use context::{QuirksMode, SharedStyleContext, PostAnimationTasks, UpdateAnimationsTasks};
use data::ElementData;
use dom::{LayoutIterator, NodeInfo, TElement, TNode};
use dom::{OpaqueNode, PresentationalHintsSynthesizer};
use element_state::{ElementState, DocumentState, NS_DOCUMENT_STATE_WINDOW_INACTIVE};
use error_reporting::ParseErrorReporter;
use font_metrics::{FontMetrics, FontMetricsProvider, FontMetricsQueryResult};
use gecko::data::PerDocumentStyleData;
use gecko::global_style_data::GLOBAL_STYLE_DATA;
use gecko::selector_parser::{SelectorImpl, NonTSPseudoClass, PseudoElement};
use gecko::snapshot_helpers;
use gecko_bindings::bindings;
use gecko_bindings::bindings::{Gecko_ConstructStyleChildrenIterator, Gecko_DestroyStyleChildrenIterator};
use gecko_bindings::bindings::{Gecko_DocumentState, Gecko_ElementState, Gecko_GetDocumentLWTheme};
use gecko_bindings::bindings::{Gecko_GetLastChild, Gecko_GetNextStyleChild};
use gecko_bindings::bindings::{Gecko_IsRootElement, Gecko_MatchesElement, Gecko_Namespace};
use gecko_bindings::bindings::{Gecko_SetNodeFlags, Gecko_UnsetNodeFlags};
use gecko_bindings::bindings::Gecko_ClassOrClassList;
use gecko_bindings::bindings::Gecko_ElementHasAnimations;
use gecko_bindings::bindings::Gecko_ElementHasCSSAnimations;
use gecko_bindings::bindings::Gecko_ElementHasCSSTransitions;
use gecko_bindings::bindings::Gecko_GetActiveLinkAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetAnimationRule;
use gecko_bindings::bindings::Gecko_GetExtraContentStyleDeclarations;
use gecko_bindings::bindings::Gecko_GetHTMLPresentationAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetSMILOverrideDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetStyleAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetUnvisitedLinkAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_GetVisitedLinkAttrDeclarationBlock;
use gecko_bindings::bindings::Gecko_IsSignificantChild;
use gecko_bindings::bindings::Gecko_MatchLang;
use gecko_bindings::bindings::Gecko_MatchStringArgPseudo;
use gecko_bindings::bindings::Gecko_UnsetDirtyStyleAttr;
use gecko_bindings::bindings::Gecko_UpdateAnimations;
use gecko_bindings::structs;
use gecko_bindings::structs::{RawGeckoElement, RawGeckoNode, RawGeckoXBLBinding};
use gecko_bindings::structs::{nsIAtom, nsIContent, nsINode_BooleanFlag};
use gecko_bindings::structs::ELEMENT_HANDLED_SNAPSHOT;
use gecko_bindings::structs::ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO;
use gecko_bindings::structs::ELEMENT_HAS_SNAPSHOT;
use gecko_bindings::structs::EffectCompositor_CascadeLevel as CascadeLevel;
use gecko_bindings::structs::NODE_DESCENDANTS_NEED_FRAMES;
use gecko_bindings::structs::NODE_NEEDS_FRAME;
use gecko_bindings::structs::nsChangeHint;
use gecko_bindings::structs::nsIDocument_DocumentTheme as DocumentTheme;
use gecko_bindings::structs::nsRestyleHint;
use gecko_bindings::sugar::ownership::{HasArcFFI, HasSimpleFFI};
use hash::FnvHashMap;
use logical_geometry::WritingMode;
use media_queries::Device;
use properties::{ComputedValues, LonghandId, parse_style_attribute};
use properties::{Importance, PropertyDeclaration, PropertyDeclarationBlock};
use properties::animated_properties::{AnimationValue, AnimationValueMap};
use properties::animated_properties::TransitionProperty;
use properties::style_structs::Font;
use rule_tree::CascadeLevel as ServoCascadeLevel;
use selector_parser::{AttrValue, ElementExt, PseudoClassStringArg};
use selectors::{Element, OpaqueElement};
use selectors::attr::{AttrSelectorOperation, AttrSelectorOperator, CaseSensitivity, NamespaceConstraint};
use selectors::matching::{ElementSelectorFlags, LocalMatchingContext, MatchingContext};
use selectors::matching::{RelevantLinkStatus, VisitedHandlingMode};
use selectors::sink::Push;
use servo_arc::{Arc, ArcBorrow, RawOffsetArc};
use shared_lock::Locked;
use std::cell::RefCell;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::DerefMut;
use std::ptr;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
use stylesheets::UrlExtraData;
use stylist::Stylist;
/// A simple wrapper over a non-null Gecko node (`nsINode`) pointer.
///
/// Important: We don't currently refcount the DOM, because the wrapper lifetime
/// magic guarantees that our LayoutFoo references won't outlive the root, and
/// we don't mutate any of the references on the Gecko side during restyle.
///
/// We could implement refcounting if need be (at a potentially non-trivial
/// performance cost) by implementing Drop and making LayoutFoo non-Copy.
#[derive(Clone, Copy)]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
#[inline]
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
#[inline]
fn flags(&self) -> u32 {
(self.0)._base._base_1.mFlags
}
#[inline]
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
// These live in different locations depending on processor architecture.
#[cfg(target_pointer_width = "64")]
#[inline]
fn bool_flags(&self) -> u32 {
(self.0)._base._base_1.mBoolFlags
}
#[cfg(target_pointer_width = "32")]
#[inline]
fn bool_flags(&self) -> u32 {
(self.0).mBoolFlags
}
#[inline]
fn get_bool_flag(&self, flag: nsINode_BooleanFlag) -> bool {
self.bool_flags() & (1u32 << flag as u32) != 0
}
fn owner_doc(&self) -> &structs::nsIDocument {
debug_assert!(!self.node_info().mDocument.is_null());
unsafe { &*self.node_info().mDocument }
}
#[inline]
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
#[inline]
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
#[inline]
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
#[inline]
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
/// Simple iterator over all this node's children. Unlike `.children()`, this iterator does
/// not filter out nodes that don't need layout.
fn dom_children(self) -> GeckoChildrenIterator<'ln> {
GeckoChildrenIterator::Current(self.first_child())
}
/// WARNING: This logic is duplicated in Gecko's FlattenedTreeParentIsParent.
/// Make sure to mirror any modifications in both places.
fn flattened_tree_parent_is_parent(&self) -> bool {
use gecko_bindings::structs::*;
let flags = self.flags();
if flags & (NODE_MAY_BE_IN_BINDING_MNGR as u32 |
NODE_IS_IN_SHADOW_TREE as u32) != 0 {
return false;
}
let parent = unsafe { self.0.mParent.as_ref() }.map(GeckoNode);
let parent_el = parent.and_then(|p| p.as_element());
if flags & (NODE_IS_NATIVE_ANONYMOUS_ROOT as u32) != 0 &&
parent_el.map_or(false, |el| el.is_root())
{
return false;
}
if parent_el.map_or(false, |el| el.has_shadow_root()) {
return false;
}
true
}
fn flattened_tree_parent(&self) -> Option<Self> {
let fast_path = self.flattened_tree_parent_is_parent();
debug_assert!(fast_path == unsafe { bindings::Gecko_FlattenedTreeParentIsParent(self.0) });
if fast_path {
unsafe { self.0.mParent.as_ref().map(GeckoNode) }
} else {
unsafe { bindings::Gecko_GetFlattenedTreeParentNode(self.0).map(GeckoNode) }
}
}
fn contains_non_whitespace_content(&self) -> bool {
unsafe { Gecko_IsSignificantChild(self.0, true, false) }
}
#[inline]
fn may_have_anonymous_children(&self) -> bool {
self.get_bool_flag(nsINode_BooleanFlag::ElementMayHaveAnonymousChildren)
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
#[inline]
fn is_element(&self) -> bool {
self.get_bool_flag(nsINode_BooleanFlag::NodeIsElement)
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn parent_node(&self) -> Option<Self> {
unsafe { self.0.mParent.as_ref().map(GeckoNode) }
}
fn children(&self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
LayoutIterator(self.dom_children())
}
fn traversal_parent(&self) -> Option<GeckoElement<'ln>> {
self.flattened_tree_parent().and_then(|n| n.as_element())
}
fn traversal_children(&self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
if let Some(element) = self.as_element() {
// This condition is similar to the check that
// StyleChildrenIterator::IsNeeded does, except that it might return
// true if we used to (but no longer) have anonymous content from
// ::before/::after, XBL bindings, or nsIAnonymousContentCreators.
if element.is_in_anonymous_subtree() ||
element.has_xbl_binding_with_content() ||
self.may_have_anonymous_children() {
unsafe {
let mut iter: structs::StyleChildrenIterator = ::std::mem::zeroed();
Gecko_ConstructStyleChildrenIterator(element.0, &mut iter);
return LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter));
}
}
}
LayoutIterator(self.dom_children())
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
#[inline]
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(structs::StyleChildrenIterator),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref mut it) = *self {
unsafe {
Gecko_DestroyStyleChildrenIterator(it);
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
// We do this unsafe lengthening of the lifetime here because
// structs::StyleChildrenIterator is actually StyleChildrenIterator<'a>,
// however we can't express this easily with bindgen, and it would
// introduce functions with two input lifetimes into bindgen,
// which would be out of scope for elision.
Gecko_GetNextStyleChild(&mut * (it as *mut _)).map(GeckoNode)
}
}
}
}
/// A Simple wrapper over a non-null Gecko `nsXBLBinding` pointer.
#[derive(Clone, Copy)]
pub struct GeckoXBLBinding<'lb>(pub &'lb RawGeckoXBLBinding);
impl<'lb> GeckoXBLBinding<'lb> {
fn base_binding(&self) -> Option<Self> {
unsafe { self.0.mNextBinding.mRawPtr.as_ref().map(GeckoXBLBinding) }
}
fn anon_content(&self) -> *const nsIContent {
unsafe { self.0.mContent.raw::<nsIContent>() }
}
fn inherits_style(&self) -> bool {
unsafe { bindings::Gecko_XBLBinding_InheritsStyle(self.0) }
}
// This duplicates the logic in Gecko's
// nsBindingManager::GetBindingWithContent.
fn get_binding_with_content(&self) -> Option<Self> {
let mut binding = *self;
loop {
if !binding.anon_content().is_null() {
return Some(binding);
}
binding = match binding.base_binding() {
Some(b) => b,
None => return None,
};
}
}
fn each_xbl_stylist<F>(self, f: &mut F)
where
F: FnMut(&Stylist),
{
if let Some(base) = self.base_binding() {
base.each_xbl_stylist(f);
}
let raw_data = unsafe {
bindings::Gecko_XBLBinding_GetRawServoStyleSet(self.0)
};
if let Some(raw_data) = raw_data {
let data = PerDocumentStyleData::from_ffi(&*raw_data).borrow();
f(&data.stylist);
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<{}", self.get_local_name())?;
if let Some(id) = self.get_id() {
write!(f, " id={}", id)?;
}
let mut first = true;
let mut any = false;
self.each_class(|c| {
if first {
first = false;
any = true;
let _ = f.write_str(" class=\"");
} else {
let _ = f.write_str(" ");
}
let _ = write!(f, "{}", c);
});
if any {
f.write_str("\"")?;
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute<R>(
value: &str,
url_data: &UrlExtraData,
quirks_mode: QuirksMode,
reporter: &R,
) -> PropertyDeclarationBlock
where
R: ParseErrorReporter,
{
parse_style_attribute(value, url_data, reporter, quirks_mode)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Returns true if this element has descendants for lazy frame construction.
pub fn descendants_need_frames(&self) -> bool {
self.flags() & (NODE_DESCENDANTS_NEED_FRAMES as u32) != 0
}
/// Returns true if this element needs lazy frame construction.
pub fn needs_frame(&self) -> bool {
self.flags() & (NODE_NEEDS_FRAME as u32) != 0
}
/// Returns true if this element has a shadow root.
fn has_shadow_root(&self) -> bool {
self.get_extended_slots()
.map_or(false, |slots| !slots.mShadowRoot.mRawPtr.is_null())
}
/// Returns a reference to the DOM slots for this Element, if they exist.
fn get_dom_slots(&self) -> Option<&structs::FragmentOrElement_nsDOMSlots> {
let slots = self.as_node().0.mSlots as *const structs::FragmentOrElement_nsDOMSlots;
unsafe { slots.as_ref() }
}
/// Returns a reference to the extended DOM slots for this Element.
fn get_extended_slots(
&self,
) -> Option<&structs::FragmentOrElement_nsExtendedDOMSlots> {
self.get_dom_slots()
.and_then(|s| unsafe { s.mExtendedSlots.mPtr.as_ref() })
}
#[inline]
fn get_xbl_binding(&self) -> Option<GeckoXBLBinding> {
if self.flags() & (structs::NODE_MAY_BE_IN_BINDING_MNGR as u32) == 0 {
return None;
}
unsafe { bindings::Gecko_GetXBLBinding(self.0).map(GeckoXBLBinding) }
}
#[inline]
fn get_xbl_binding_with_content(&self) -> Option<GeckoXBLBinding> {
self.get_xbl_binding()
.and_then(|b| b.get_binding_with_content())
}
#[inline]
fn has_xbl_binding_with_content(&self) -> bool {
!self.get_xbl_binding_with_content().is_none()
}
/// This and has_xbl_binding_parent duplicate the logic in Gecko's virtual
/// nsINode::GetBindingParent function, which only has two implementations:
/// one for XUL elements, and one for other elements. We just hard code in
/// our knowledge of those two implementations here.
fn get_xbl_binding_parent(&self) -> Option<Self> {
if self.is_xul_element() {
// FIXME(heycam): Having trouble with bindgen on nsXULElement,
// where the binding parent is stored in a member variable
// rather than in slots. So just get it through FFI for now.
unsafe {
bindings::Gecko_GetBindingParent(self.0).map(GeckoElement)
}
} else {
let binding_parent = unsafe {
self.get_non_xul_xbl_binding_parent_raw_content().as_ref()
}.map(GeckoNode::from_content)
.and_then(|n| n.as_element());
debug_assert!(binding_parent == unsafe { bindings::Gecko_GetBindingParent(self.0).map(GeckoElement) });
binding_parent
}
}
fn get_non_xul_xbl_binding_parent_raw_content(&self) -> *mut nsIContent {
debug_assert!(!self.is_xul_element());
self.get_extended_slots()
.map_or(ptr::null_mut(), |slots| slots.mBindingParent)
}
fn has_xbl_binding_parent(&self) -> bool {
if self.is_xul_element() {
// FIXME(heycam): Having trouble with bindgen on nsXULElement,
// where the binding parent is stored in a member variable
// rather than in slots. So just get it through FFI for now.
unsafe { bindings::Gecko_GetBindingParent(self.0).is_some() }
} else {
!self.get_non_xul_xbl_binding_parent_raw_content().is_null()
}
}
fn namespace_id(&self) -> i32 {
self.as_node().node_info().mInner.mNamespaceID
}
fn is_html_element(&self) -> bool {
self.namespace_id() == (structs::root::kNameSpaceID_XHTML as i32)
}
fn is_xul_element(&self) -> bool {
self.namespace_id() == (structs::root::kNameSpaceID_XUL as i32)
}
/// Sets the specified element data, return any existing data.
///
/// Like `ensure_data`, only safe to call with exclusive access to the
/// element.
pub unsafe fn set_data(&self, replace_data: Option<ElementData>) -> Option<ElementData> {
match (self.get_data(), replace_data) {
(Some(old), Some(replace_data)) => {
Some(mem::replace(old.borrow_mut().deref_mut(), replace_data))
}
(Some(old), None) => {
let old_data = mem::replace(old.borrow_mut().deref_mut(), ElementData::default());
self.0.mServoData.set(ptr::null_mut());
Some(old_data)
}
(None, Some(replace_data)) => {
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(replace_data)));
self.0.mServoData.set(ptr);
None
}
(None, None) => None,
}
}
#[inline]
fn has_id(&self) -> bool {
self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasID)
}
#[inline]
fn get_state_internal(&self) -> u64 {
if !self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasLockedStyleStates) {
return self.0.mState.mStates;
}
unsafe { Gecko_ElementState(self.0) }
}
fn document_state(&self) -> DocumentState {
let node = self.as_node();
unsafe {
let states = Gecko_DocumentState(node.owner_doc());
DocumentState::from_bits_truncate(states)
}
}
#[inline]
fn may_have_class(&self) -> bool {
self.as_node()
.get_bool_flag(nsINode_BooleanFlag::ElementMayHaveClass)
}
#[inline]
fn has_properties(&self) -> bool {
use gecko_bindings::structs::NODE_HAS_PROPERTIES;
(self.flags() & NODE_HAS_PROPERTIES as u32) != 0
}
#[inline]
fn get_before_or_after_pseudo(&self, is_before: bool) -> Option<Self> {
if !self.has_properties() {
return None;
}
unsafe { bindings::Gecko_GetBeforeOrAfterPseudo(self.0, is_before).map(GeckoElement) }
}
#[inline]
fn may_have_style_attribute(&self) -> bool {
self.as_node()
.get_bool_flag(nsINode_BooleanFlag::ElementMayHaveStyle)
}
#[inline]
fn get_document_theme(&self) -> DocumentTheme {
let node = self.as_node();
unsafe { Gecko_GetDocumentLWTheme(node.owner_doc()) }
}
/// Owner document quirks mode getter.
pub fn owner_document_quirks_mode(&self) -> QuirksMode {
self.as_node().owner_doc().mCompatMode.into()
}
/// Only safe to call on the main thread, with exclusive access to the element and
/// its ancestors.
/// This function is also called after display property changed for SMIL animation.
///
/// Also this function schedules style flush.
unsafe fn maybe_restyle<'a>(
&self,
data: &'a mut ElementData,
animation_only: bool,
) -> bool {
if !data.has_styles() {
return false;
}
// Propagate the bit up the chain.
if animation_only {
bindings::Gecko_NoteAnimationOnlyDirtyElement(self.0);
} else {
bindings::Gecko_NoteDirtyElement(self.0);
}
// Ensure and return the RestyleData.
true
}
/// Set restyle and change hints to the element data.
pub fn note_explicit_hints(
&self,
restyle_hint: nsRestyleHint,
change_hint: nsChangeHint,
) {
use gecko::restyle_damage::GeckoRestyleDamage;
use invalidation::element::restyle_hints::RestyleHint;
let damage = GeckoRestyleDamage::new(change_hint);
debug!("note_explicit_hints: {:?}, restyle_hint={:?}, change_hint={:?}",
self, restyle_hint, change_hint);
let restyle_hint: RestyleHint = restyle_hint.into();
debug_assert!(!(restyle_hint.has_animation_hint() &&
restyle_hint.has_non_animation_hint()),
"Animation restyle hints should not appear with non-animation restyle hints");
let mut maybe_data = self.mutate_data();
let should_restyle = maybe_data.as_mut().map_or(false, |d| unsafe {
self.maybe_restyle(d, restyle_hint.has_animation_hint())
});
if should_restyle {
maybe_data
.as_mut()
.unwrap()
.hint
.insert(restyle_hint.into());
maybe_data.as_mut().unwrap().damage |= damage;
} else {
debug!("(Element not styled, discarding hints)");
}
}
/// This logic is duplicated in Gecko's nsIContent::IsRootOfAnonymousSubtree.
#[inline]
fn is_root_of_anonymous_subtree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_ANONYMOUS_ROOT;
self.flags() & (NODE_IS_ANONYMOUS_ROOT as u32) != 0
}
/// This logic is duplicated in Gecko's nsIContent::IsRootOfNativeAnonymousSubtree.
#[inline]
fn is_root_of_native_anonymous_subtree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_NATIVE_ANONYMOUS_ROOT;
return self.flags() & (NODE_IS_NATIVE_ANONYMOUS_ROOT as u32) != 0
}
/// This logic is duplicated in Gecko's nsINode::IsInNativeAnonymousSubtree.
#[inline]
fn is_in_native_anonymous_subtree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE;
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32) != 0
}
/// This logic is duplicate in Gecko's nsIContent::IsInShadowTree().
#[inline]
fn is_in_shadow_tree(&self) -> bool {
use gecko_bindings::structs::NODE_IS_IN_SHADOW_TREE;
self.flags() & (NODE_IS_IN_SHADOW_TREE as u32) != 0
}
/// This logic is duplicated in Gecko's nsIContent::IsInAnonymousSubtree.
#[inline]
fn is_in_anonymous_subtree(&self) -> bool {
self.is_in_native_anonymous_subtree() ||
(!self.is_in_shadow_tree() && self.has_xbl_binding_parent())
}
}
/// Converts flags from the layout used by rust-selectors to the layout used
/// by Gecko. We could align these and then do this without conditionals, but
/// it's probably not worth the trouble.
fn selector_flags_to_node_flags(flags: ElementSelectorFlags) -> u32 {
use gecko_bindings::structs::*;
use selectors::matching::*;
let mut gecko_flags = 0u32;
if flags.contains(HAS_SLOW_SELECTOR) {
gecko_flags |= NODE_HAS_SLOW_SELECTOR as u32;
}
if flags.contains(HAS_SLOW_SELECTOR_LATER_SIBLINGS) {
gecko_flags |= NODE_HAS_SLOW_SELECTOR_LATER_SIBLINGS as u32;
}
if flags.contains(HAS_EDGE_CHILD_SELECTOR) {
gecko_flags |= NODE_HAS_EDGE_CHILD_SELECTOR as u32;
}
if flags.contains(HAS_EMPTY_SELECTOR) {
gecko_flags |= NODE_HAS_EMPTY_SELECTOR as u32;
}
gecko_flags
}
fn get_animation_rule(
element: &GeckoElement,
cascade_level: CascadeLevel,
) -> Option<Arc<Locked<PropertyDeclarationBlock>>> {
use gecko_bindings::sugar::ownership::HasSimpleFFI;
// Also, we should try to reuse the PDB, to avoid creating extra rule nodes.
let mut animation_values = AnimationValueMap::default();
if unsafe { Gecko_GetAnimationRule(element.0,
cascade_level,
AnimationValueMap::as_ffi_mut(&mut animation_values)) } {
let shared_lock = &GLOBAL_STYLE_DATA.shared_lock;
Some(Arc::new(shared_lock.wrap(
PropertyDeclarationBlock::from_animation_value_map(&animation_values))))
} else {
None
}
}
#[derive(Debug)]
/// Gecko font metrics provider
pub struct GeckoFontMetricsProvider {
/// Cache of base font sizes for each language
///
/// Usually will have 1 element.
///
// This may be slow on pages using more languages, might be worth optimizing
// by caching lang->group mapping separately and/or using a hashmap on larger
// loads.
pub font_size_cache: RefCell<Vec<(Atom, ::gecko_bindings::structs::FontSizePrefs)>>,
}
impl GeckoFontMetricsProvider {
/// Construct
pub fn new() -> Self {
GeckoFontMetricsProvider {
font_size_cache: RefCell::new(Vec::new()),
}
}
}
impl FontMetricsProvider for GeckoFontMetricsProvider {
fn create_from(_: &SharedStyleContext) -> GeckoFontMetricsProvider {
GeckoFontMetricsProvider::new()
}
fn get_size(&self, font_name: &Atom, font_family: u8) -> Au {
use gecko_bindings::bindings::Gecko_GetBaseSize;
let mut cache = self.font_size_cache.borrow_mut();
if let Some(sizes) = cache.iter().find(|el| el.0 == *font_name) {
return sizes.1.size_for_generic(font_family);
}
let sizes = unsafe { Gecko_GetBaseSize(font_name.as_ptr()) };
cache.push((font_name.clone(), sizes));
sizes.size_for_generic(font_family)
}
fn query(
&self,
font: &Font,
font_size: Au,
wm: WritingMode,
in_media_query: bool,
device: &Device,
) -> FontMetricsQueryResult {
use gecko_bindings::bindings::Gecko_GetFontMetrics;
let gecko_metrics = unsafe {
Gecko_GetFontMetrics(
device.pres_context(),
wm.is_vertical() && !wm.is_sideways(),
font.gecko(),
font_size.0,
// we don't use the user font set in a media query
!in_media_query,
)
};
let metrics = FontMetrics {
x_height: Au(gecko_metrics.mXSize),
zero_advance_measure: Au(gecko_metrics.mChSize),
};
FontMetricsQueryResult::Available(metrics)
}
}
impl structs::FontSizePrefs {
fn size_for_generic(&self, font_family: u8) -> Au {
Au(match font_family {
structs::kPresContext_DefaultVariableFont_ID => self.mDefaultVariableSize,
structs::kPresContext_DefaultFixedFont_ID => self.mDefaultFixedSize,
structs::kGenericFont_serif => self.mDefaultSerifSize,
structs::kGenericFont_sans_serif => self.mDefaultSansSerifSize,
structs::kGenericFont_monospace => self.mDefaultMonospaceSize,
structs::kGenericFont_cursive => self.mDefaultCursiveSize,
structs::kGenericFont_fantasy => self.mDefaultFantasySize,
x => unreachable!("Unknown generic ID {}", x),
})
}
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
type FontMetricsProvider = GeckoFontMetricsProvider;
fn inheritance_parent(&self) -> Option<Self> {
if self.is_native_anonymous() {
self.closest_non_native_anonymous_ancestor()
} else {
self.as_node()
.flattened_tree_parent()
.and_then(|n| n.as_element())
}
}
fn before_pseudo_element(&self) -> Option<Self> {
self.get_before_or_after_pseudo(/* is_before = */ true)
}
fn after_pseudo_element(&self) -> Option<Self> {
self.get_before_or_after_pseudo(/* is_before = */ false)
}
/// Execute `f` for each anonymous content child element (apart from
/// ::before and ::after) whose originating element is `self`.
fn each_anonymous_content_child<F>(&self, mut f: F)
where
F: FnMut(Self),
{
let array: *mut structs::nsTArray<*mut nsIContent> =
unsafe { bindings::Gecko_GetAnonymousContentForElement(self.0) };
if array.is_null() {
return;
}
for content in unsafe { &**array } {
let node = GeckoNode::from_content(unsafe { &**content });
let element = match node.as_element() {
Some(e) => e,
None => continue,
};
f(element);
}
unsafe { bindings::Gecko_DestroyAnonymousContentList(array) };
}
fn closest_non_native_anonymous_ancestor(&self) -> Option<Self> {
debug_assert!(self.is_native_anonymous());
let mut parent = match self.traversal_parent() {
Some(e) => e,
None => return None,
};
loop {
if !parent.is_native_anonymous() {
return Some(parent);
}
parent = match parent.traversal_parent() {
Some(p) => p,
None => return None,
};
}
}
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn owner_doc_matches_for_testing(&self, device: &Device) -> bool {
self.as_node().owner_doc() as *const structs::nsIDocument ==
device.pres_context().mDocument.raw::<structs::nsIDocument>()
}
fn style_attribute(&self) -> Option<ArcBorrow<Locked<PropertyDeclarationBlock>>> {
if !self.may_have_style_attribute() {
return None;
}
let declarations = unsafe { Gecko_GetStyleAttrDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
declarations.map(|s| s.borrow_arc())
}
fn unset_dirty_style_attribute(&self) {
if !self.may_have_style_attribute() {
return;
}
unsafe { Gecko_UnsetDirtyStyleAttr(self.0) };
}
fn get_smil_override(&self) -> Option<ArcBorrow<Locked<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetSMILOverrideDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
declarations.map(|s| s.borrow_arc())
}
fn get_animation_rule_by_cascade(&self, cascade_level: ServoCascadeLevel)
-> Option<Arc<Locked<PropertyDeclarationBlock>>> {
match cascade_level {
ServoCascadeLevel::Animations => self.get_animation_rule(),
ServoCascadeLevel::Transitions => self.get_transition_rule(),
_ => panic!("Unsupported cascade level for getting the animation rule")
}
}
fn get_animation_rule(
&self,
) -> Option<Arc<Locked<PropertyDeclarationBlock>>> {
get_animation_rule(self, CascadeLevel::Animations)
}
fn get_transition_rule(
&self,
) -> Option<Arc<Locked<PropertyDeclarationBlock>>> {
get_animation_rule(self, CascadeLevel::Transitions)
}
fn get_state(&self) -> ElementState {
ElementState::from_bits_truncate(self.get_state_internal())
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0, namespace.0.as_ptr(), attr.as_ptr())
}
}
fn get_id(&self) -> Option<Atom> {
if !self.has_id() {
return None;
}
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0, atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn each_class<F>(&self, callback: F)
where
F: FnMut(&Atom),
{
snapshot_helpers::each_class(self.0, callback, Gecko_ClassOrClassList)
}
#[inline]
fn has_snapshot(&self) -> bool {
self.flags() & (ELEMENT_HAS_SNAPSHOT as u32) != 0
}
#[inline]
fn handled_snapshot(&self) -> bool {
self.flags() & (ELEMENT_HANDLED_SNAPSHOT as u32) != 0
}
unsafe fn set_handled_snapshot(&self) {
debug_assert!(self.get_data().is_some());
self.set_flags(ELEMENT_HANDLED_SNAPSHOT as u32)
}
#[inline]
fn has_dirty_descendants(&self) -> bool {
self.flags() & (ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_dirty_descendants(&self) {
debug_assert!(self.get_data().is_some());
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
#[inline]
fn has_animation_only_dirty_descendants(&self) -> bool {
self.flags() & (ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32) != 0
}
unsafe fn set_animation_only_dirty_descendants(&self) {
self.set_flags(ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_animation_only_dirty_descendants(&self) {
self.unset_flags(ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn clear_descendant_bits(&self) {
self.unset_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
NODE_DESCENDANTS_NEED_FRAMES as u32)
}
#[inline]
unsafe fn clear_dirty_bits(&self) {
self.unset_flags(ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
ELEMENT_HAS_ANIMATION_ONLY_DIRTY_DESCENDANTS_FOR_SERVO as u32 |
NODE_DESCENDANTS_NEED_FRAMES as u32 |
NODE_NEEDS_FRAME as u32)
}
fn is_visited_link(&self) -> bool {
use element_state::IN_VISITED_STATE;
self.get_state().intersects(IN_VISITED_STATE)
}
#[inline]
fn is_native_anonymous(&self) -> bool {
use gecko_bindings::structs::NODE_IS_NATIVE_ANONYMOUS;
self.flags() & (NODE_IS_NATIVE_ANONYMOUS as u32) != 0
}
fn implemented_pseudo_element(&self) -> Option<PseudoElement> {
if !self.is_native_anonymous() {
return None;
}
if !self.has_properties() {
return None;
}
let pseudo_type =
unsafe { bindings::Gecko_GetImplementedPseudo(self.0) };
PseudoElement::from_pseudo_type(pseudo_type)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
#[inline(always)]
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
unsafe fn ensure_data(&self) -> AtomicRefMut<ElementData> {
if self.get_data().is_none() {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::default())));
self.0.mServoData.set(ptr);
}
self.mutate_data().unwrap()
}
unsafe fn clear_data(&self) {
let ptr = self.0.mServoData.get();
unsafe {
self.unset_flags(ELEMENT_HAS_SNAPSHOT as u32 |
ELEMENT_HANDLED_SNAPSHOT as u32 |
structs::Element_kAllServoDescendantBits |
NODE_NEEDS_FRAME as u32);
}
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
#[inline]
fn skip_root_and_item_based_display_fixup(&self) -> bool {
if !self.is_native_anonymous() {
return false;
}
if let Some(p) = self.implemented_pseudo_element() {
return p.skip_item_based_display_fixup();
}
self.is_root_of_native_anonymous_subtree()
}
unsafe fn set_selector_flags(&self, flags: ElementSelectorFlags) {
debug_assert!(!flags.is_empty());
self.set_flags(selector_flags_to_node_flags(flags));
}
fn has_selector_flags(&self, flags: ElementSelectorFlags) -> bool {
let node_flags = selector_flags_to_node_flags(flags);
(self.flags() & node_flags) == node_flags
}
#[inline]
fn may_have_animations(&self) -> bool {
if let Some(pseudo) = self.implemented_pseudo_element() {
if !pseudo.is_before_or_after() {
return false;
}
return self.parent_element()
.map_or(false, |p| {
p.as_node()
.get_bool_flag(nsINode_BooleanFlag::ElementHasAnimations)
});
}
self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasAnimations)
}
/// Process various tasks that are a result of animation-only restyle.
fn process_post_animation(&self,
tasks: PostAnimationTasks) {
use context::DISPLAY_CHANGED_FROM_NONE_FOR_SMIL;
use gecko_bindings::structs::nsChangeHint_nsChangeHint_Empty;
use gecko_bindings::structs::nsRestyleHint_eRestyle_Subtree;
debug_assert!(!tasks.is_empty(), "Should be involved a task");
// If display style was changed from none to other, we need to resolve
// the descendants in the display:none subtree. Instead of resolving
// those styles in animation-only restyle, we defer it to a subsequent
// normal restyle.
if tasks.intersects(DISPLAY_CHANGED_FROM_NONE_FOR_SMIL) {
debug_assert!(self.implemented_pseudo_element()
.map_or(true, |p| !p.is_before_or_after()),
"display property animation shouldn't run on pseudo elements \
since it's only for SMIL");
self.note_explicit_hints(nsRestyleHint_eRestyle_Subtree,
nsChangeHint_nsChangeHint_Empty);
}
}
/// Update various animation-related state on a given (pseudo-)element as
/// results of normal restyle.
fn update_animations(&self,
before_change_style: Option<Arc<ComputedValues>>,
tasks: UpdateAnimationsTasks) {
// We have to update animations even if the element has no computed
// style since it means the element is in a display:none subtree, we
// should destroy all CSS animations in display:none subtree.
let computed_data = self.borrow_data();
let computed_values =
computed_data.as_ref().map(|d| d.styles.primary());
let before_change_values =
before_change_style.as_ref().map(|x| &**x);
let computed_values_opt = computed_values.as_ref().map(|x| &***x);
unsafe {
Gecko_UpdateAnimations(self.0,
before_change_values,
computed_values_opt,
tasks.bits());
}
}
fn has_animations(&self) -> bool {
self.may_have_animations() && unsafe { Gecko_ElementHasAnimations(self.0) }
}
fn has_css_animations(&self) -> bool {
self.may_have_animations() && unsafe { Gecko_ElementHasCSSAnimations(self.0) }
}
fn has_css_transitions(&self) -> bool {
self.may_have_animations() && unsafe { Gecko_ElementHasCSSTransitions(self.0) }
}
fn each_xbl_stylist<F>(&self, mut f: F) -> bool
where
F: FnMut(&Stylist),
{
// Walk the binding scope chain, starting with the binding attached to
// our content, up till we run out of scopes or we get cut off.
//
// If we are a NAC pseudo-element, we want to get rules from our
// rule_hash_target, that is, our originating element.
let mut current = Some(self.rule_hash_target());
while let Some(element) = current {
if let Some(binding) = element.get_xbl_binding() {
binding.each_xbl_stylist(&mut f);
// If we're not looking at our original element, allow the
// binding to cut off style inheritance.
if element != *self {
if !binding.inherits_style() {
// Go no further; we're not inheriting style from
// anything above here.
break;
}
}
}
if element.is_root_of_native_anonymous_subtree() {
// Deliberately cut off style inheritance here.
break;
}
current = element.get_xbl_binding_parent();
}
// If current has something, this means we cut off inheritance at some
// point in the loop.
current.is_some()
}
fn xbl_binding_anonymous_content(&self) -> Option<GeckoNode<'le>> {
self.get_xbl_binding_with_content()
.map(|b| unsafe { b.anon_content().as_ref() }.unwrap())
.map(GeckoNode::from_content)
}
fn get_css_transitions_info(
&self,
) -> FnvHashMap<LonghandId, Arc<AnimationValue>> {
use gecko_bindings::bindings::Gecko_ElementTransitions_EndValueAt;
use gecko_bindings::bindings::Gecko_ElementTransitions_Length;
let collection_length =
unsafe { Gecko_ElementTransitions_Length(self.0) } as usize;
let mut map = FnvHashMap::with_capacity_and_hasher(
collection_length,
Default::default()
);
for i in 0..collection_length {
let raw_end_value = unsafe {
Gecko_ElementTransitions_EndValueAt(self.0, i)
};
let end_value = AnimationValue::arc_from_borrowed(&raw_end_value)
.expect("AnimationValue not found in ElementTransitions");
let property = end_value.id();
map.insert(property, end_value.clone_arc());
}
map
}
fn might_need_transitions_update(
&self,
old_values: Option<&ComputedValues>,
new_values: &ComputedValues,
) -> bool {
use properties::longhands::display::computed_value as display;
let old_values = match old_values {
Some(v) => v,
None => return false,
};
let new_box_style = new_values.get_box();
let transition_not_running = !self.has_css_transitions() &&
new_box_style.transition_property_count() == 1 &&
new_box_style.transition_combined_duration_at(0) <= 0.0f32;
let new_display_style = new_box_style.clone_display();
let old_display_style = old_values.get_box().clone_display();
new_box_style.transition_property_count() > 0 &&
!transition_not_running &&
(new_display_style != display::T::none &&
old_display_style != display::T::none)
}
// Detect if there are any changes that require us to update transitions.
// This is used as a more thoroughgoing check than the, cheaper
// might_need_transitions_update check.
//
// The following logic shadows the logic used on the Gecko side
// (nsTransitionManager::DoUpdateTransitions) where we actually perform the
// update.
//
// https://drafts.csswg.org/css-transitions/#starting
fn needs_transitions_update(
&self,
before_change_style: &ComputedValues,
after_change_style: &ComputedValues
) -> bool {
use gecko_bindings::structs::nsCSSPropertyID;
use properties::LonghandIdSet;
debug_assert!(self.might_need_transitions_update(Some(before_change_style),
after_change_style),
"We should only call needs_transitions_update if \
might_need_transitions_update returns true");
let after_change_box_style = after_change_style.get_box();
let transitions_count = after_change_box_style.transition_property_count();
let existing_transitions = self.get_css_transitions_info();
// Check if this property is none, custom or unknown.
let is_none_or_custom_property = |property: nsCSSPropertyID| -> bool {
return property == nsCSSPropertyID::eCSSPropertyExtra_no_properties ||
property == nsCSSPropertyID::eCSSPropertyExtra_variable ||
property == nsCSSPropertyID::eCSSProperty_UNKNOWN;
};
let mut transitions_to_keep = LonghandIdSet::new();
for i in 0..transitions_count {
let property = after_change_box_style.transition_nscsspropertyid_at(i);
let combined_duration = after_change_box_style.transition_combined_duration_at(i);
// We don't need to update transition for none/custom properties.
if is_none_or_custom_property(property) {
continue;
}
let transition_property: TransitionProperty = property.into();
let mut property_check_helper = |property: &LonghandId| -> bool {
transitions_to_keep.insert(*property);
self.needs_transitions_update_per_property(
property,
combined_duration,
before_change_style,
after_change_style,
&existing_transitions
)
};
match transition_property {
TransitionProperty::All => {
if TransitionProperty::any(property_check_helper) {
return true;
}
},
TransitionProperty::Unsupported(..) => {},
TransitionProperty::Shorthand(ref shorthand) => {
if shorthand.longhands().iter().any(property_check_helper) {
return true;
}
},
TransitionProperty::Longhand(ref longhand_id) => {
if property_check_helper(longhand_id) {
return true;
}
},
}
}
// Check if we have to cancel the running transition because this is not
// a matching transition-property value.
existing_transitions.keys().any(|property| {
!transitions_to_keep.contains(*property)
})
}
fn needs_transitions_update_per_property(
&self,
longhand_id: &LonghandId,
combined_duration: f32,
before_change_style: &ComputedValues,
after_change_style: &ComputedValues,
existing_transitions: &FnvHashMap<LonghandId, Arc<AnimationValue>>,
) -> bool {
use values::animated::{Animate, Procedure};
// If there is an existing transition, update only if the end value
// differs.
//
// If the end value has not changed, we should leave the currently
// running transition as-is since we don't want to interrupt its timing
// function.
if let Some(ref existing) = existing_transitions.get(longhand_id) {
let after_value =
AnimationValue::from_computed_values(
longhand_id,
after_change_style
).unwrap();
return ***existing != after_value
}
let from = AnimationValue::from_computed_values(
&longhand_id,
before_change_style,
);
let to = AnimationValue::from_computed_values(
&longhand_id,
after_change_style,
);
debug_assert_eq!(to.is_some(), from.is_some());
combined_duration > 0.0f32 &&
from != to &&
from.unwrap().animate(
to.as_ref().unwrap(),
Procedure::Interpolate { progress: 0.5 }
).is_ok()
}
#[inline]
fn lang_attr(&self) -> Option<AttrValue> {
let ptr = unsafe { bindings::Gecko_LangValue(self.0) };
if ptr.is_null() {
None
} else {
Some(unsafe { Atom::from_addrefed(ptr) })
}
}
fn match_element_lang(
&self,
override_lang: Option<Option<AttrValue>>,
value: &PseudoClassStringArg
) -> bool {
// Gecko supports :lang() from CSS Selectors 3, which only accepts a
// single language tag, and which performs simple dash-prefix matching
// on it.
debug_assert!(value.len() > 0 && value[value.len() - 1] == 0,
"expected value to be null terminated");
let override_lang_ptr = match &override_lang {
&Some(Some(ref atom)) => atom.as_ptr(),
_ => ptr::null_mut(),
};
unsafe {
Gecko_MatchLang(self.0, override_lang_ptr, override_lang.is_some(), value.as_ptr())
}
}
fn is_html_document_body_element(&self) -> bool {
if self.get_local_name() != &*local_name!("body") {
return false;
}
if !self.is_html_element() {
return false;
}
unsafe { bindings::Gecko_IsDocumentBody(self.0) }
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> Eq for GeckoElement<'le> {}
impl<'le> Hash for GeckoElement<'le> {
fn hash<H: Hasher>(&self, state: &mut H) {
(self.0 as *const _).hash(state);
}
}
impl<'le> PresentationalHintsSynthesizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(
&self,
visited_handling: VisitedHandlingMode,
hints: &mut V
)
where
V: Push<ApplicableDeclarationBlock>,
{
use properties::longhands::_x_lang::SpecifiedValue as SpecifiedLang;
use properties::longhands::_x_text_zoom::SpecifiedValue as SpecifiedZoom;
use properties::longhands::color::SpecifiedValue as SpecifiedColor;
use properties::longhands::text_align::SpecifiedValue as SpecifiedTextAlign;
use values::specified::color::Color;
lazy_static! {
static ref TH_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::TextAlign(SpecifiedTextAlign::MozCenterOrInherit),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
static ref TABLE_COLOR_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::Color(SpecifiedColor(Color::InheritFromBodyQuirk.into())),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
static ref MATHML_LANG_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::XLang(SpecifiedLang(atom!("x-math"))),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
static ref SVG_TEXT_DISABLE_ZOOM_RULE: ApplicableDeclarationBlock = {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::XTextZoom(SpecifiedZoom(false)),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints)
};
};
let ns = self.namespace_id();
// <th> elements get a default MozCenterOrInherit which may get overridden
if ns == structs::kNameSpaceID_XHTML as i32 {
if self.get_local_name().as_ptr() == atom!("th").as_ptr() {
hints.push(TH_RULE.clone());
} else if self.get_local_name().as_ptr() == atom!("table").as_ptr() &&
self.as_node().owner_doc().mCompatMode == structs::nsCompatibility::eCompatibility_NavQuirks {
hints.push(TABLE_COLOR_RULE.clone());
}
}
if ns == structs::kNameSpaceID_SVG as i32 {
if self.get_local_name().as_ptr() == atom!("text").as_ptr() {
hints.push(SVG_TEXT_DISABLE_ZOOM_RULE.clone());
}
}
let declarations = unsafe { Gecko_GetHTMLPresentationAttrDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
let declarations = unsafe { Gecko_GetExtraContentStyleDeclarations(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
// Support for link, vlink, and alink presentation hints on <body>
if self.is_link() {
// Unvisited vs. visited styles are computed up-front based on the
// visited mode (not the element's actual state).
let declarations = match visited_handling {
VisitedHandlingMode::AllLinksVisitedAndUnvisited => {
unreachable!("We should never try to selector match with \
AllLinksVisitedAndUnvisited");
},
VisitedHandlingMode::AllLinksUnvisited => unsafe {
Gecko_GetUnvisitedLinkAttrDeclarationBlock(self.0)
},
VisitedHandlingMode::RelevantLinkVisited => unsafe {
Gecko_GetVisitedLinkAttrDeclarationBlock(self.0)
},
};
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
let active = self.get_state().intersects(NonTSPseudoClass::Active.state_flag());
if active {
let declarations = unsafe { Gecko_GetActiveLinkAttrDeclarationBlock(self.0) };
let declarations: Option<&RawOffsetArc<Locked<PropertyDeclarationBlock>>> =
declarations.and_then(|s| s.as_arc_opt());
if let Some(decl) = declarations {
hints.push(
ApplicableDeclarationBlock::from_declarations(decl.clone_arc(), ServoCascadeLevel::PresHints)
);
}
}
}
// xml:lang has precedence over lang, which can be
// set by Gecko_GetHTMLPresentationAttrDeclarationBlock
//
// http://www.whatwg.org/specs/web-apps/current-work/multipage/elements.html#language
let ptr = unsafe {
bindings::Gecko_GetXMLLangValue(self.0)
};
if !ptr.is_null() {
let global_style_data = &*GLOBAL_STYLE_DATA;
let pdb = PropertyDeclarationBlock::with_one(
PropertyDeclaration::XLang(SpecifiedLang(unsafe { Atom::from_addrefed(ptr) })),
Importance::Normal
);
let arc = Arc::new(global_style_data.shared_lock.wrap(pdb));
hints.push(ApplicableDeclarationBlock::from_declarations(arc, ServoCascadeLevel::PresHints))
}
// MathML's default lang has precedence over both `lang` and `xml:lang`
if ns == structs::kNameSpaceID_MathML as i32 {
if self.get_local_name().as_ptr() == atom!("math").as_ptr() {
hints.push(MATHML_LANG_RULE.clone());
}
}
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
type Impl = SelectorImpl;
fn opaque(&self) -> OpaqueElement {
OpaqueElement::new(self.0)
}
fn parent_element(&self) -> Option<Self> {
// FIXME(emilio): This will need to jump across if the parent node is a
// shadow root to get the shadow host.
let parent_node = self.as_node().parent_node();
parent_node.and_then(|n| n.as_element())
}
fn pseudo_element_originating_element(&self) -> Option<Self> {
debug_assert!(self.implemented_pseudo_element().is_some());
self.closest_non_native_anonymous_ancestor()
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn attr_matches(
&self,
ns: &NamespaceConstraint<&Namespace>,
local_name: &Atom,
operation: &AttrSelectorOperation<&Atom>
) -> bool {
unsafe {
match *operation {
AttrSelectorOperation::Exists => {
bindings::Gecko_HasAttr(self.0,
ns.atom_or_null(),
local_name.as_ptr())
}
AttrSelectorOperation::WithValue { operator, case_sensitivity, expected_value } => {
let ignore_case = match case_sensitivity {
CaseSensitivity::CaseSensitive => false,
CaseSensitivity::AsciiCaseInsensitive => true,
};
// FIXME: case sensitivity for operators other than Equal
match operator {
AttrSelectorOperator::Equal => bindings::Gecko_AttrEquals(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case
),
AttrSelectorOperator::Includes => bindings::Gecko_AttrIncludes(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::DashMatch => bindings::Gecko_AttrDashEquals(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::Prefix => bindings::Gecko_AttrHasPrefix(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::Suffix => bindings::Gecko_AttrHasSuffix(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
AttrSelectorOperator::Substring => bindings::Gecko_AttrHasSubstring(
self.0,
ns.atom_or_null(),
local_name.as_ptr(),
expected_value.as_ptr(),
ignore_case,
),
}
}
}
}
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
!self.as_node().dom_children().any(|child| unsafe {
Gecko_IsSignificantChild(child.0, true, true)
})
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName)
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class<F>(
&self,
pseudo_class: &NonTSPseudoClass,
context: &mut LocalMatchingContext<Self::Impl>,
relevant_link: &RelevantLinkStatus,
flags_setter: &mut F,
) -> bool
where
F: FnMut(&Self, ElementSelectorFlags),
{
use selectors::matching::*;
match *pseudo_class {
NonTSPseudoClass::Focus |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::MozFullScreen |
NonTSPseudoClass::Indeterminate |
NonTSPseudoClass::PlaceholderShown |
NonTSPseudoClass::Target |
NonTSPseudoClass::Valid |
NonTSPseudoClass::Invalid |
NonTSPseudoClass::MozUIValid |
NonTSPseudoClass::MozBroken |
NonTSPseudoClass::MozUserDisabled |
NonTSPseudoClass::MozSuppressed |
NonTSPseudoClass::MozLoading |
NonTSPseudoClass::MozHandlerBlocked |
NonTSPseudoClass::MozHandlerDisabled |
NonTSPseudoClass::MozHandlerCrashed |
NonTSPseudoClass::Required |
NonTSPseudoClass::Optional |
NonTSPseudoClass::MozReadOnly |
NonTSPseudoClass::MozReadWrite |
NonTSPseudoClass::Unresolved |
NonTSPseudoClass::FocusWithin |
NonTSPseudoClass::MozDragOver |
NonTSPseudoClass::MozDevtoolsHighlighted |
NonTSPseudoClass::MozStyleeditorTransitioning |
NonTSPseudoClass::MozFocusRing |
NonTSPseudoClass::MozHandlerClickToPlay |
NonTSPseudoClass::MozHandlerVulnerableUpdatable |
NonTSPseudoClass::MozHandlerVulnerableNoUpdate |
NonTSPseudoClass::MozMathIncrementScriptLevel |
NonTSPseudoClass::InRange |
NonTSPseudoClass::OutOfRange |
NonTSPseudoClass::Default |
NonTSPseudoClass::MozSubmitInvalid |
NonTSPseudoClass::MozUIInvalid |
NonTSPseudoClass::MozMeterOptimum |
NonTSPseudoClass::MozMeterSubOptimum |
NonTSPseudoClass::MozMeterSubSubOptimum |
NonTSPseudoClass::MozHasDirAttr |
NonTSPseudoClass::MozDirAttrLTR |
NonTSPseudoClass::MozDirAttrRTL |
NonTSPseudoClass::MozDirAttrLikeAuto |
NonTSPseudoClass::MozAutofill |
NonTSPseudoClass::MozAutofillPreview => {
self.get_state().intersects(pseudo_class.state_flag())
},
NonTSPseudoClass::AnyLink => self.is_link(),
NonTSPseudoClass::Link => relevant_link.is_unvisited(self, context.shared),
NonTSPseudoClass::Visited => relevant_link.is_visited(self, context.shared),
NonTSPseudoClass::Active |
NonTSPseudoClass::Hover => {
if context.active_hover_quirk_matches() && !self.is_link() {
false
} else {
self.get_state().contains(pseudo_class.state_flag())
}
},
NonTSPseudoClass::MozFirstNode => {
flags_setter(self, HAS_EDGE_CHILD_SELECTOR);
let mut elem = self.as_node();
while let Some(prev) = elem.prev_sibling() {
if prev.contains_non_whitespace_content() {
return false
}
elem = prev;
}
true
}
NonTSPseudoClass::MozLastNode => {
flags_setter(self, HAS_EDGE_CHILD_SELECTOR);
let mut elem = self.as_node();
while let Some(next) = elem.next_sibling() {
if next.contains_non_whitespace_content() {
return false
}
elem = next;
}
true
}
NonTSPseudoClass::MozOnlyWhitespace => {
flags_setter(self, HAS_EMPTY_SELECTOR);
if self.as_node().dom_children().any(|c| c.contains_non_whitespace_content()) {
return false
}
true
}
NonTSPseudoClass::MozTableBorderNonzero |
NonTSPseudoClass::MozBrowserFrame |
NonTSPseudoClass::MozNativeAnonymous |
NonTSPseudoClass::MozUseShadowTreeRoot => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
},
NonTSPseudoClass::MozIsHTML => {
self.is_html_element_in_html_document()
}
NonTSPseudoClass::MozLWTheme => {
self.get_document_theme() != DocumentTheme::Doc_Theme_None
}
NonTSPseudoClass::MozLWThemeBrightText => {
self.get_document_theme() == DocumentTheme::Doc_Theme_Bright
}
NonTSPseudoClass::MozLWThemeDarkText => {
self.get_document_theme() == DocumentTheme::Doc_Theme_Dark
}
NonTSPseudoClass::MozWindowInactive => {
self.document_state().contains(NS_DOCUMENT_STATE_WINDOW_INACTIVE)
}
NonTSPseudoClass::MozPlaceholder => false,
NonTSPseudoClass::MozAny(ref sels) => {
context.nesting_level += 1;
let result = sels.iter().any(|s| {
matches_complex_selector(s.iter(), self, context, flags_setter)
});
context.nesting_level -= 1;
result
}
NonTSPseudoClass::Lang(ref lang_arg) => {
self.match_element_lang(None, lang_arg)
}
NonTSPseudoClass::MozSystemMetric(ref s) |
NonTSPseudoClass::MozLocaleDir(ref s) |
NonTSPseudoClass::Dir(ref s) => {
unsafe {
Gecko_MatchStringArgPseudo(
self.0,
pseudo_class.to_gecko_pseudoclasstype().unwrap(),
s.as_ptr(),
)
}
}
}
}
fn match_pseudo_element(
&self,
pseudo_element: &PseudoElement,
_context: &mut MatchingContext
) -> bool {
// TODO(emilio): I believe we could assert we are a pseudo-element and
// match the proper pseudo-element, given how we rulehash the stuff
// based on the pseudo.
match self.implemented_pseudo_element() {
Some(ref pseudo) => *pseudo == pseudo_element.canonical(),
None => false,
}
}
#[inline]
fn is_link(&self) -> bool {
self.get_state().intersects(NonTSPseudoClass::AnyLink.state_flag())
}
fn has_id(&self, id: &Atom, case_sensitivity: CaseSensitivity) -> bool {
if !self.has_id() {
return false
}
unsafe {
let ptr = bindings::Gecko_AtomAttrValue(self.0, atom!("id").as_ptr());
if ptr.is_null() {
false
} else {
case_sensitivity.eq_atom(WeakAtom::new(ptr), id)
}
}
}
fn has_class(&self, name: &Atom, case_sensitivity: CaseSensitivity) -> bool {
if !self.may_have_class() {
return false;
}
snapshot_helpers::has_class(self.0,
name,
case_sensitivity,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
self.is_html_element() &&
self.as_node().owner_doc().mType == structs::root::nsIDocument_Type::eHTML
}
fn ignores_nth_child_selectors(&self) -> bool {
self.is_root_of_anonymous_subtree()
}
fn blocks_ancestor_combinators(&self) -> bool {
if !self.is_root_of_anonymous_subtree() {
return false
}
match self.parent_element() {
Some(e) => {
// If this element is the shadow root of an use-element shadow
// tree, according to the spec, we should not match rules
// cross the shadow DOM boundary.
e.get_local_name() == &*local_name!("use") &&
e.get_namespace() == &*ns!("http://www.w3.org/2000/svg")
},
None => false,
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait NamespaceConstraintHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn atom_or_null(&self) -> *mut nsIAtom;
}
impl<'a> NamespaceConstraintHelpers for NamespaceConstraint<&'a Namespace> {
fn atom_or_null(&self) -> *mut nsIAtom {
match *self {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.0.as_ptr(),
}
}
}
impl<'le> ElementExt for GeckoElement<'le> {
#[inline]
fn matches_user_and_author_rules(&self) -> bool {
!self.is_in_native_anonymous_subtree()
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Restyle hints: an optimization to avoid unnecessarily matching selectors.
use element_state::*;
use selector_impl::{ElementExt, TheSelectorImpl, NonTSPseudoClass, AttrValue};
use selectors::matching::StyleRelations;
use selectors::matching::matches_complex_selector;
use selectors::parser::{AttrSelector, Combinator, ComplexSelector, SimpleSelector, SelectorImpl};
use selectors::{Element, MatchAttr};
use std::clone::Clone;
use std::sync::Arc;
use string_cache::Atom;
/// When the ElementState of an element (like IN_HOVER_STATE) changes, certain
/// pseudo-classes (like :hover) may require us to restyle that element, its
/// siblings, and/or its descendants. Similarly, when various attributes of an
/// element change, we may also need to restyle things with id, class, and
/// attribute selectors. Doing this conservatively is expensive, and so we use
/// RestyleHints to short-circuit work we know is unnecessary.
bitflags! {
pub flags RestyleHint: u8 {
#[doc = "Rerun selector matching on the element."]
const RESTYLE_SELF = 0x01,
#[doc = "Rerun selector matching on all of the element's descendants."]
// NB: In Gecko, we have RESTYLE_SUBTREE which is inclusive of self, but heycam isn't aware
// of a good reason for that.
const RESTYLE_DESCENDANTS = 0x02,
#[doc = "Rerun selector matching on all later siblings of the element and all of their descendants."]
const RESTYLE_LATER_SIBLINGS = 0x08,
}
}
/// In order to compute restyle hints, we perform a selector match against a
/// list of partial selectors whose rightmost simple selector may be sensitive
/// to the thing being changed. We do this matching twice, once for the element
/// as it exists now and once for the element as it existed at the time of the
/// last restyle. If the results of the selector match differ, that means that
/// the given partial selector is sensitive to the change, and we compute a
/// restyle hint based on its combinator.
///
/// In order to run selector matching against the old element state, we generate
/// a wrapper for the element which claims to have the old state. This is the
/// ElementWrapper logic below.
///
/// Gecko does this differently for element states, and passes a mask called
/// mStateMask, which indicates the states that need to be ignored during
/// selector matching. This saves an ElementWrapper allocation and an additional
/// selector match call at the expense of additional complexity inside the
/// selector matching logic. This only works for boolean states though, so we
/// still need to take the ElementWrapper approach for attribute-dependent
/// style. So we do it the same both ways for now to reduce complexity, but it's
/// worth measuring the performance impact (if any) of the mStateMask approach.
pub trait ElementSnapshot : Sized + MatchAttr<Impl=TheSelectorImpl> {
/// The state of the snapshot, if any.
fn state(&self) -> Option<ElementState>;
/// If this snapshot contains attribute information.
fn has_attrs(&self) -> bool;
/// The ID attribute per this snapshot. Should only be called if
/// `has_attrs()` returns true.
fn id_attr(&self) -> Option<Atom>;
/// Whether this snapshot contains the class `name`. Should only be called
/// if `has_attrs()` returns true.
fn has_class(&self, name: &Atom) -> bool;
/// A callback that should be called for each class of the snapshot. Should
/// only be called if `has_attrs()` returns true.
fn each_class<F>(&self, F)
where F: FnMut(&Atom);
}
struct ElementWrapper<'a, E>
where E: ElementExt
{
element: E,
snapshot: Option<&'a E::Snapshot>,
}
impl<'a, E> ElementWrapper<'a, E>
where E: ElementExt
{
pub fn new(el: E) -> ElementWrapper<'a, E> {
ElementWrapper { element: el, snapshot: None }
}
pub fn new_with_snapshot(el: E, snapshot: &'a E::Snapshot) -> ElementWrapper<'a, E> {
ElementWrapper { element: el, snapshot: Some(snapshot) }
}
}
impl<'a, E> MatchAttr for ElementWrapper<'a, E>
where E: ElementExt,
{
type Impl = TheSelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<TheSelectorImpl>) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_has(attr),
_ => self.element.match_attr_has(attr)
}
}
fn match_attr_equals(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_equals(attr, value),
_ => self.element.match_attr_equals(attr, value)
}
}
fn match_attr_equals_ignore_ascii_case(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_equals_ignore_ascii_case(attr, value),
_ => self.element.match_attr_equals_ignore_ascii_case(attr, value)
}
}
fn match_attr_includes(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_includes(attr, value),
_ => self.element.match_attr_includes(attr, value)
}
}
fn match_attr_dash(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_dash(attr, value),
_ => self.element.match_attr_dash(attr, value)
}
}
fn match_attr_prefix(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_prefix(attr, value),
_ => self.element.match_attr_prefix(attr, value)
}
}
fn match_attr_substring(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_substring(attr, value),
_ => self.element.match_attr_substring(attr, value)
}
}
fn match_attr_suffix(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_suffix(attr, value),
_ => self.element.match_attr_suffix(attr, value)
}
}
}
impl<'a, E> Element for ElementWrapper<'a, E>
where E: ElementExt<Impl=TheSelectorImpl>
{
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
let flag = TheSelectorImpl::pseudo_class_state_flag(&pseudo_class);
if flag == ElementState::empty() {
self.element.match_non_ts_pseudo_class(pseudo_class)
} else {
match self.snapshot.and_then(|s| s.state()) {
Some(snapshot_state) => snapshot_state.contains(flag),
_ => self.element.match_non_ts_pseudo_class(pseudo_class)
}
}
}
fn parent_element(&self) -> Option<Self> {
self.element.parent_element().map(ElementWrapper::new)
}
fn first_child_element(&self) -> Option<Self> {
self.element.first_child_element().map(ElementWrapper::new)
}
fn last_child_element(&self) -> Option<Self> {
self.element.last_child_element().map(ElementWrapper::new)
}
fn prev_sibling_element(&self) -> Option<Self> {
self.element.prev_sibling_element().map(ElementWrapper::new)
}
fn next_sibling_element(&self) -> Option<Self> {
self.element.next_sibling_element().map(ElementWrapper::new)
}
fn is_html_element_in_html_document(&self) -> bool {
self.element.is_html_element_in_html_document()
}
fn get_local_name(&self) -> &<Self::Impl as SelectorImpl>::BorrowedLocalName {
self.element.get_local_name()
}
fn get_namespace(&self) -> &<Self::Impl as SelectorImpl>::BorrowedNamespaceUrl {
self.element.get_namespace()
}
fn get_id(&self) -> Option<Atom> {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.id_attr(),
_ => self.element.get_id()
}
}
fn has_class(&self, name: &Atom) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.has_class(name),
_ => self.element.has_class(name)
}
}
fn is_empty(&self) -> bool {
self.element.is_empty()
}
fn is_root(&self) -> bool {
self.element.is_root()
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom) {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.each_class(callback),
_ => self.element.each_class(callback)
}
}
}
fn selector_to_state(sel: &SimpleSelector<TheSelectorImpl>) -> ElementState {
match *sel {
SimpleSelector::NonTSPseudoClass(ref pc) => TheSelectorImpl::pseudo_class_state_flag(pc),
_ => ElementState::empty(),
}
}
fn is_attr_selector(sel: &SimpleSelector<TheSelectorImpl>) -> bool {
match *sel {
SimpleSelector::ID(_) |
SimpleSelector::Class(_) |
SimpleSelector::AttrExists(_) |
SimpleSelector::AttrEqual(_, _, _) |
SimpleSelector::AttrIncludes(_, _) |
SimpleSelector::AttrDashMatch(_, _) |
SimpleSelector::AttrPrefixMatch(_, _) |
SimpleSelector::AttrSubstringMatch(_, _) |
SimpleSelector::AttrSuffixMatch(_, _) => true,
_ => false,
}
}
fn combinator_to_restyle_hint(combinator: Option<Combinator>) -> RestyleHint {
match combinator {
None => RESTYLE_SELF,
Some(c) => match c {
Combinator::Child => RESTYLE_DESCENDANTS,
Combinator::Descendant => RESTYLE_DESCENDANTS,
Combinator::NextSibling => RESTYLE_LATER_SIBLINGS,
Combinator::LaterSibling => RESTYLE_LATER_SIBLINGS,
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
struct Sensitivities {
pub states: ElementState,
pub attrs: bool,
}
impl Sensitivities {
fn is_empty(&self) -> bool {
self.states.is_empty() && !self.attrs
}
fn new() -> Sensitivities {
Sensitivities {
states: ElementState::empty(),
attrs: false,
}
}
}
/// Mapping between (partial) CompoundSelectors (and the combinator to their
/// right) and the states and attributes they depend on.
///
/// In general, for all selectors in all applicable stylesheets of the form:
///
/// |a _ b _ c _ d _ e|
///
/// Where:
/// * |b| and |d| are simple selectors that depend on state (like :hover) or
/// attributes (like [attr...], .foo, or #foo).
/// * |a|, |c|, and |e| are arbitrary simple selectors that do not depend on
/// state or attributes.
///
/// We generate a Dependency for both |a _ b:X _| and |a _ b:X _ c _ d:Y _|,
/// even though those selectors may not appear on their own in any stylesheet.
/// This allows us to quickly scan through the dependency sites of all style
/// rules and determine the maximum effect that a given state or attribute
/// change may have on the style of elements in the document.
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
struct Dependency {
selector: Arc<ComplexSelector<TheSelectorImpl>>,
combinator: Option<Combinator>,
sensitivities: Sensitivities,
}
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct DependencySet {
deps: Vec<Dependency>,
}
impl DependencySet {
pub fn new() -> Self {
DependencySet { deps: Vec::new() }
}
pub fn len(&self) -> usize {
self.deps.len()
}
pub fn note_selector(&mut self, selector: &Arc<ComplexSelector<TheSelectorImpl>>) {
let mut cur = selector;
let mut combinator: Option<Combinator> = None;
loop {
let mut sensitivities = Sensitivities::new();
for s in &cur.compound_selector {
sensitivities.states.insert(selector_to_state(s));
if !sensitivities.attrs {
sensitivities.attrs = is_attr_selector(s);
}
}
if !sensitivities.is_empty() {
self.deps.push(Dependency {
selector: cur.clone(),
combinator: combinator,
sensitivities: sensitivities,
});
}
cur = match cur.next {
Some((ref sel, comb)) => {
combinator = Some(comb);
sel
}
None => break,
}
}
}
pub fn clear(&mut self) {
self.deps.clear();
}
}
impl DependencySet {
pub fn compute_hint<E>(&self, el: &E,
snapshot: &E::Snapshot,
current_state: ElementState)
-> RestyleHint
where E: ElementExt + Clone
{
debug!("About to calculate restyle hint for element. Deps: {}",
self.deps.len());
let state_changes = snapshot.state().map_or_else(ElementState::empty, |old_state| current_state ^ old_state);
let attrs_changed = snapshot.has_attrs();
let mut hint = RestyleHint::empty();
for dep in &self.deps {
if state_changes.intersects(dep.sensitivities.states) || (attrs_changed && dep.sensitivities.attrs) {
let old_el: ElementWrapper<E> = ElementWrapper::new_with_snapshot(el.clone(), snapshot);
let matched_then =
matches_complex_selector(&*dep.selector, &old_el, None, &mut StyleRelations::empty());
let matches_now =
matches_complex_selector(&*dep.selector, el, None, &mut StyleRelations::empty());
if matched_then != matches_now {
hint.insert(combinator_to_restyle_hint(dep.combinator));
if hint.is_all() {
break
}
}
}
}
hint
}
}
Auto merge of #13108 - emilio:stupidest-opt-ever, r=bholley
style: Don't loop over all the set of dependencies always.
<!-- Please describe your changes on the following line: -->
Instead, divide which kind of dependencies could match a mutation. This cuts down incremental restyle time in BrowserHTML quite a bit.
---
<!-- Thank you for contributing to Servo! Please replace each `[ ]` by `[X]` when the step is complete, and replace `__` with appropriate data: -->
- [x] `./mach build -d` does not report any errors
- [x] `./mach test-tidy` does not report any errors
<!-- Either: -->
- [x] There are tests for these changes OR
<!-- Pull requests that do not address these steps are welcome, but they will require additional verification as part of the review process. -->
The dependency count is not at all minor, and this way we avoid looping
through all of them in the common cases, mainly either changing state, or
attributes.
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/13108)
<!-- Reviewable:end -->
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Restyle hints: an optimization to avoid unnecessarily matching selectors.
use element_state::*;
#[cfg(feature = "servo")]
use heapsize::HeapSizeOf;
use selector_impl::{ElementExt, TheSelectorImpl, NonTSPseudoClass, AttrValue};
use selectors::matching::StyleRelations;
use selectors::matching::matches_complex_selector;
use selectors::parser::{AttrSelector, Combinator, ComplexSelector, SimpleSelector, SelectorImpl};
use selectors::{Element, MatchAttr};
use std::clone::Clone;
use std::sync::Arc;
use string_cache::Atom;
/// When the ElementState of an element (like IN_HOVER_STATE) changes, certain
/// pseudo-classes (like :hover) may require us to restyle that element, its
/// siblings, and/or its descendants. Similarly, when various attributes of an
/// element change, we may also need to restyle things with id, class, and
/// attribute selectors. Doing this conservatively is expensive, and so we use
/// RestyleHints to short-circuit work we know is unnecessary.
bitflags! {
pub flags RestyleHint: u8 {
#[doc = "Rerun selector matching on the element."]
const RESTYLE_SELF = 0x01,
#[doc = "Rerun selector matching on all of the element's descendants."]
// NB: In Gecko, we have RESTYLE_SUBTREE which is inclusive of self, but heycam isn't aware
// of a good reason for that.
const RESTYLE_DESCENDANTS = 0x02,
#[doc = "Rerun selector matching on all later siblings of the element and all of their descendants."]
const RESTYLE_LATER_SIBLINGS = 0x08,
}
}
#[cfg(feature = "servo")]
impl HeapSizeOf for RestyleHint {
fn heap_size_of_children(&self) -> usize { 0 }
}
/// In order to compute restyle hints, we perform a selector match against a
/// list of partial selectors whose rightmost simple selector may be sensitive
/// to the thing being changed. We do this matching twice, once for the element
/// as it exists now and once for the element as it existed at the time of the
/// last restyle. If the results of the selector match differ, that means that
/// the given partial selector is sensitive to the change, and we compute a
/// restyle hint based on its combinator.
///
/// In order to run selector matching against the old element state, we generate
/// a wrapper for the element which claims to have the old state. This is the
/// ElementWrapper logic below.
///
/// Gecko does this differently for element states, and passes a mask called
/// mStateMask, which indicates the states that need to be ignored during
/// selector matching. This saves an ElementWrapper allocation and an additional
/// selector match call at the expense of additional complexity inside the
/// selector matching logic. This only works for boolean states though, so we
/// still need to take the ElementWrapper approach for attribute-dependent
/// style. So we do it the same both ways for now to reduce complexity, but it's
/// worth measuring the performance impact (if any) of the mStateMask approach.
pub trait ElementSnapshot : Sized + MatchAttr<Impl=TheSelectorImpl> {
/// The state of the snapshot, if any.
fn state(&self) -> Option<ElementState>;
/// If this snapshot contains attribute information.
fn has_attrs(&self) -> bool;
/// The ID attribute per this snapshot. Should only be called if
/// `has_attrs()` returns true.
fn id_attr(&self) -> Option<Atom>;
/// Whether this snapshot contains the class `name`. Should only be called
/// if `has_attrs()` returns true.
fn has_class(&self, name: &Atom) -> bool;
/// A callback that should be called for each class of the snapshot. Should
/// only be called if `has_attrs()` returns true.
fn each_class<F>(&self, F)
where F: FnMut(&Atom);
}
struct ElementWrapper<'a, E>
where E: ElementExt
{
element: E,
snapshot: Option<&'a E::Snapshot>,
}
impl<'a, E> ElementWrapper<'a, E>
where E: ElementExt
{
pub fn new(el: E) -> ElementWrapper<'a, E> {
ElementWrapper { element: el, snapshot: None }
}
pub fn new_with_snapshot(el: E, snapshot: &'a E::Snapshot) -> ElementWrapper<'a, E> {
ElementWrapper { element: el, snapshot: Some(snapshot) }
}
}
impl<'a, E> MatchAttr for ElementWrapper<'a, E>
where E: ElementExt,
{
type Impl = TheSelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<TheSelectorImpl>) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_has(attr),
_ => self.element.match_attr_has(attr)
}
}
fn match_attr_equals(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_equals(attr, value),
_ => self.element.match_attr_equals(attr, value)
}
}
fn match_attr_equals_ignore_ascii_case(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_equals_ignore_ascii_case(attr, value),
_ => self.element.match_attr_equals_ignore_ascii_case(attr, value)
}
}
fn match_attr_includes(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_includes(attr, value),
_ => self.element.match_attr_includes(attr, value)
}
}
fn match_attr_dash(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_dash(attr, value),
_ => self.element.match_attr_dash(attr, value)
}
}
fn match_attr_prefix(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_prefix(attr, value),
_ => self.element.match_attr_prefix(attr, value)
}
}
fn match_attr_substring(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_substring(attr, value),
_ => self.element.match_attr_substring(attr, value)
}
}
fn match_attr_suffix(&self,
attr: &AttrSelector<TheSelectorImpl>,
value: &AttrValue) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.match_attr_suffix(attr, value),
_ => self.element.match_attr_suffix(attr, value)
}
}
}
impl<'a, E> Element for ElementWrapper<'a, E>
where E: ElementExt<Impl=TheSelectorImpl>
{
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
let flag = TheSelectorImpl::pseudo_class_state_flag(&pseudo_class);
if flag == ElementState::empty() {
self.element.match_non_ts_pseudo_class(pseudo_class)
} else {
match self.snapshot.and_then(|s| s.state()) {
Some(snapshot_state) => snapshot_state.contains(flag),
_ => self.element.match_non_ts_pseudo_class(pseudo_class)
}
}
}
fn parent_element(&self) -> Option<Self> {
self.element.parent_element().map(ElementWrapper::new)
}
fn first_child_element(&self) -> Option<Self> {
self.element.first_child_element().map(ElementWrapper::new)
}
fn last_child_element(&self) -> Option<Self> {
self.element.last_child_element().map(ElementWrapper::new)
}
fn prev_sibling_element(&self) -> Option<Self> {
self.element.prev_sibling_element().map(ElementWrapper::new)
}
fn next_sibling_element(&self) -> Option<Self> {
self.element.next_sibling_element().map(ElementWrapper::new)
}
fn is_html_element_in_html_document(&self) -> bool {
self.element.is_html_element_in_html_document()
}
fn get_local_name(&self) -> &<Self::Impl as SelectorImpl>::BorrowedLocalName {
self.element.get_local_name()
}
fn get_namespace(&self) -> &<Self::Impl as SelectorImpl>::BorrowedNamespaceUrl {
self.element.get_namespace()
}
fn get_id(&self) -> Option<Atom> {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.id_attr(),
_ => self.element.get_id()
}
}
fn has_class(&self, name: &Atom) -> bool {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.has_class(name),
_ => self.element.has_class(name)
}
}
fn is_empty(&self) -> bool {
self.element.is_empty()
}
fn is_root(&self) -> bool {
self.element.is_root()
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom) {
match self.snapshot {
Some(snapshot) if snapshot.has_attrs()
=> snapshot.each_class(callback),
_ => self.element.each_class(callback)
}
}
}
fn selector_to_state(sel: &SimpleSelector<TheSelectorImpl>) -> ElementState {
match *sel {
SimpleSelector::NonTSPseudoClass(ref pc) => TheSelectorImpl::pseudo_class_state_flag(pc),
_ => ElementState::empty(),
}
}
fn is_attr_selector(sel: &SimpleSelector<TheSelectorImpl>) -> bool {
match *sel {
SimpleSelector::ID(_) |
SimpleSelector::Class(_) |
SimpleSelector::AttrExists(_) |
SimpleSelector::AttrEqual(_, _, _) |
SimpleSelector::AttrIncludes(_, _) |
SimpleSelector::AttrDashMatch(_, _) |
SimpleSelector::AttrPrefixMatch(_, _) |
SimpleSelector::AttrSubstringMatch(_, _) |
SimpleSelector::AttrSuffixMatch(_, _) => true,
_ => false,
}
}
fn combinator_to_restyle_hint(combinator: Option<Combinator>) -> RestyleHint {
match combinator {
None => RESTYLE_SELF,
Some(c) => match c {
Combinator::Child => RESTYLE_DESCENDANTS,
Combinator::Descendant => RESTYLE_DESCENDANTS,
Combinator::NextSibling => RESTYLE_LATER_SIBLINGS,
Combinator::LaterSibling => RESTYLE_LATER_SIBLINGS,
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
struct Sensitivities {
pub states: ElementState,
pub attrs: bool,
}
impl Sensitivities {
fn is_empty(&self) -> bool {
self.states.is_empty() && !self.attrs
}
fn new() -> Sensitivities {
Sensitivities {
states: ElementState::empty(),
attrs: false,
}
}
}
/// Mapping between (partial) CompoundSelectors (and the combinator to their
/// right) and the states and attributes they depend on.
///
/// In general, for all selectors in all applicable stylesheets of the form:
///
/// |a _ b _ c _ d _ e|
///
/// Where:
/// * |b| and |d| are simple selectors that depend on state (like :hover) or
/// attributes (like [attr...], .foo, or #foo).
/// * |a|, |c|, and |e| are arbitrary simple selectors that do not depend on
/// state or attributes.
///
/// We generate a Dependency for both |a _ b:X _| and |a _ b:X _ c _ d:Y _|,
/// even though those selectors may not appear on their own in any stylesheet.
/// This allows us to quickly scan through the dependency sites of all style
/// rules and determine the maximum effect that a given state or attribute
/// change may have on the style of elements in the document.
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
struct Dependency {
selector: Arc<ComplexSelector<TheSelectorImpl>>,
hint: RestyleHint,
sensitivities: Sensitivities,
}
/// A set of dependencies for a given stylist.
///
/// Note that there are measurable perf wins from storing them separately
/// depending on what kind of change they affect, and its also not a big deal to
/// do it, since the dependencies are per-document.
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct DependencySet {
/// Dependencies only affected by state.
state_deps: Vec<Dependency>,
/// Dependencies only affected by attributes.
attr_deps: Vec<Dependency>,
/// Dependencies affected by both.
common_deps: Vec<Dependency>,
}
impl DependencySet {
fn add_dependency(&mut self, dep: Dependency) {
let affects_attrs = dep.sensitivities.attrs;
let affects_states = !dep.sensitivities.states.is_empty();
if affects_attrs && affects_states {
self.common_deps.push(dep)
} else if affects_attrs {
self.attr_deps.push(dep)
} else {
self.state_deps.push(dep)
}
}
pub fn new() -> Self {
DependencySet {
state_deps: vec![],
attr_deps: vec![],
common_deps: vec![],
}
}
pub fn len(&self) -> usize {
self.common_deps.len() + self.attr_deps.len() + self.state_deps.len()
}
pub fn note_selector(&mut self, selector: &Arc<ComplexSelector<TheSelectorImpl>>) {
let mut cur = selector;
let mut combinator: Option<Combinator> = None;
loop {
let mut sensitivities = Sensitivities::new();
for s in &cur.compound_selector {
sensitivities.states.insert(selector_to_state(s));
if !sensitivities.attrs {
sensitivities.attrs = is_attr_selector(s);
}
}
if !sensitivities.is_empty() {
self.add_dependency(Dependency {
selector: cur.clone(),
hint: combinator_to_restyle_hint(combinator),
sensitivities: sensitivities,
});
}
cur = match cur.next {
Some((ref sel, comb)) => {
combinator = Some(comb);
sel
}
None => break,
}
}
}
pub fn clear(&mut self) {
self.common_deps.clear();
self.attr_deps.clear();
self.state_deps.clear();
}
pub fn compute_hint<E>(&self, el: &E,
snapshot: &E::Snapshot,
current_state: ElementState)
-> RestyleHint
where E: ElementExt + Clone
{
debug!("About to calculate restyle hint for element. Deps: {}",
self.len());
let state_changes = snapshot.state()
.map_or_else(ElementState::empty, |old_state| current_state ^ old_state);
let attrs_changed = snapshot.has_attrs();
if state_changes.is_empty() && !attrs_changed {
return RestyleHint::empty();
}
let mut hint = RestyleHint::empty();
let snapshot = ElementWrapper::new_with_snapshot(el.clone(), snapshot);
Self::compute_partial_hint(&self.common_deps, el, &snapshot,
&state_changes, attrs_changed, &mut hint);
if !state_changes.is_empty() {
Self::compute_partial_hint(&self.state_deps, el, &snapshot,
&state_changes, attrs_changed, &mut hint);
}
if attrs_changed {
Self::compute_partial_hint(&self.attr_deps, el, &snapshot,
&state_changes, attrs_changed, &mut hint);
}
hint
}
fn compute_partial_hint<E>(deps: &[Dependency],
element: &E,
snapshot: &ElementWrapper<E>,
state_changes: &ElementState,
attrs_changed: bool,
hint: &mut RestyleHint)
where E: ElementExt
{
if hint.is_all() {
return;
}
for dep in deps {
debug_assert!(state_changes.intersects(dep.sensitivities.states) ||
attrs_changed && dep.sensitivities.attrs,
"Testing a completely ineffective dependency?");
if !hint.intersects(dep.hint) {
let matched_then =
matches_complex_selector(&dep.selector, snapshot, None,
&mut StyleRelations::empty());
let matches_now =
matches_complex_selector(&dep.selector, element, None,
&mut StyleRelations::empty());
if matched_then != matches_now {
hint.insert(dep.hint);
}
if hint.is_all() {
break;
}
}
}
}
}
|
use std::iter::{self, successors};
use either::Either;
use ide_db::{ty_filter::TryEnum, RootDatabase};
use syntax::{
ast::{
self,
edit::{AstNodeEdit, IndentLevel},
make,
},
AstNode,
};
use crate::{
utils::{does_pat_match_variant, unwrap_trivial_block},
AssistContext, AssistId, AssistKind, Assists,
};
// Assist: replace_if_let_with_match
//
// Replaces a `if let` expression with a `match` expression.
//
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// $0if let Action::Move { distance } = action {
// foo(distance)
// } else {
// bar()
// }
// }
// ```
// ->
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// match action {
// Action::Move { distance } => foo(distance),
// _ => bar(),
// }
// }
// ```
pub(crate) fn replace_if_let_with_match(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let if_expr: ast::IfExpr = ctx.find_node_at_offset()?;
let mut else_block = None;
let if_exprs = successors(Some(if_expr.clone()), |expr| match expr.else_branch()? {
ast::ElseBranch::IfExpr(expr) => Some(expr),
ast::ElseBranch::Block(block) => {
else_block = Some(block);
None
}
});
let scrutinee_to_be_expr = if_expr.condition()?.expr()?;
let mut pat_seen = false;
let mut cond_bodies = Vec::new();
for if_expr in if_exprs {
let cond = if_expr.condition()?;
let expr = cond.expr()?;
let cond = match cond.pat() {
Some(pat) => {
if scrutinee_to_be_expr.syntax().text() != expr.syntax().text() {
// Only if all condition expressions are equal we can merge them into a match
return None;
}
pat_seen = true;
Either::Left(pat)
}
None => Either::Right(expr),
};
let body = if_expr.then_branch()?;
cond_bodies.push((cond, body));
}
if !pat_seen {
// Don't offer turning an if (chain) without patterns into a match
return None;
}
let target = if_expr.syntax().text_range();
acc.add(
AssistId("replace_if_let_with_match", AssistKind::RefactorRewrite),
"Replace with match",
target,
move |edit| {
let match_expr = {
let else_arm = {
match else_block {
Some(else_block) => {
let pattern = match &*cond_bodies {
[(Either::Left(pat), _)] => ctx
.sema
.type_of_pat(&pat)
.and_then(|ty| TryEnum::from_ty(&ctx.sema, &ty))
.map(|it| {
if does_pat_match_variant(&pat, &it.sad_pattern()) {
it.happy_pattern()
} else {
it.sad_pattern()
}
}),
_ => None,
}
.unwrap_or_else(|| make::wildcard_pat().into());
make::match_arm(
iter::once(pattern),
None,
unwrap_trivial_block(else_block),
)
}
None => make::match_arm(
iter::once(make::wildcard_pat().into()),
None,
make::expr_unit().into(),
),
}
};
let arms = cond_bodies
.into_iter()
.map(|(pat, body)| {
let body = body.reset_indent().indent(IndentLevel(1));
match pat {
Either::Left(pat) => {
make::match_arm(iter::once(pat), None, unwrap_trivial_block(body))
}
Either::Right(expr) => make::match_arm(
iter::once(make::wildcard_pat().into()),
Some(expr),
unwrap_trivial_block(body),
),
}
})
.chain(iter::once(else_arm));
let match_expr = make::expr_match(scrutinee_to_be_expr, make::match_arm_list(arms));
match_expr.indent(IndentLevel::from_node(if_expr.syntax()))
};
let has_preceding_if_expr =
if_expr.syntax().parent().map_or(false, |it| ast::IfExpr::can_cast(it.kind()));
let expr = if has_preceding_if_expr {
// make sure we replace the `else if let ...` with a block so we don't end up with `else expr`
make::block_expr(None, Some(match_expr)).into()
} else {
match_expr
};
edit.replace_ast::<ast::Expr>(if_expr.into(), expr);
},
)
}
// Assist: replace_match_with_if_let
//
// Replaces a binary `match` with a wildcard pattern and no guards with an `if let` expression.
//
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// $0match action {
// Action::Move { distance } => foo(distance),
// _ => bar(),
// }
// }
// ```
// ->
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// if let Action::Move { distance } = action {
// foo(distance)
// } else {
// bar()
// }
// }
// ```
pub(crate) fn replace_match_with_if_let(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let match_expr: ast::MatchExpr = ctx.find_node_at_offset()?;
let mut arms = match_expr.match_arm_list()?.arms();
let first_arm = arms.next()?;
let second_arm = arms.next()?;
if arms.next().is_some() || first_arm.guard().is_some() || second_arm.guard().is_some() {
return None;
}
let condition_expr = match_expr.expr()?;
let (if_let_pat, then_expr, else_expr) = if is_pat_wildcard_or_sad(&ctx.sema, &first_arm.pat()?)
{
(second_arm.pat()?, second_arm.expr()?, first_arm.expr()?)
} else if is_pat_wildcard_or_sad(&ctx.sema, &second_arm.pat()?) {
(first_arm.pat()?, first_arm.expr()?, second_arm.expr()?)
} else {
return None;
};
let target = match_expr.syntax().text_range();
acc.add(
AssistId("replace_match_with_if_let", AssistKind::RefactorRewrite),
"Replace with if let",
target,
move |edit| {
let condition = make::condition(condition_expr, Some(if_let_pat));
let then_block = match then_expr.reset_indent() {
ast::Expr::BlockExpr(block) => block,
expr => make::block_expr(iter::empty(), Some(expr)),
};
let else_expr = match else_expr {
ast::Expr::BlockExpr(block)
if block.statements().count() == 0 && block.tail_expr().is_none() =>
{
None
}
ast::Expr::TupleExpr(tuple) if tuple.fields().count() == 0 => None,
expr => Some(expr),
};
let if_let_expr = make::expr_if(
condition,
then_block,
else_expr.map(|else_expr| {
ast::ElseBranch::Block(make::block_expr(iter::empty(), Some(else_expr)))
}),
)
.indent(IndentLevel::from_node(match_expr.syntax()));
edit.replace_ast::<ast::Expr>(match_expr.into(), if_let_expr);
},
)
}
fn is_pat_wildcard_or_sad(sema: &hir::Semantics<RootDatabase>, pat: &ast::Pat) -> bool {
sema.type_of_pat(pat)
.and_then(|ty| TryEnum::from_ty(sema, &ty))
.map(|it| it.sad_pattern().syntax().text() == pat.syntax().text())
.unwrap_or_else(|| matches!(pat, ast::Pat::WildcardPat(_)))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{check_assist, check_assist_not_applicable, check_assist_target};
#[test]
fn test_if_let_with_match_unapplicable_for_simple_ifs() {
check_assist_not_applicable(
replace_if_let_with_match,
r#"
fn main() {
if $0true {} else if false {} else {}
}
"#,
)
}
#[test]
fn test_if_let_with_match_no_else() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn foo(&self) {
if $0let VariantData::Struct(..) = *self {
self.foo();
}
}
}
"#,
r#"
impl VariantData {
pub fn foo(&self) {
match *self {
VariantData::Struct(..) => {
self.foo();
}
_ => (),
}
}
}
"#,
)
}
#[test]
fn test_if_let_with_match_basic() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if $0let VariantData::Struct(..) = *self {
true
} else if let VariantData::Tuple(..) = *self {
false
} else if cond() {
true
} else {
bar(
123
)
}
}
}
"#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
match *self {
VariantData::Struct(..) => true,
VariantData::Tuple(..) => false,
_ if cond() => true,
_ => {
bar(
123
)
}
}
}
}
"#,
)
}
#[test]
fn test_if_let_with_match_on_tail_if_let() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else if let$0 VariantData::Tuple(..) = *self {
false
} else {
false
}
}
}
"#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else {
match *self {
VariantData::Tuple(..) => false,
_ => false,
}
}
}
}
"#,
)
}
#[test]
fn special_case_option() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0if let Some(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
r#"
fn foo(x: Option<i32>) {
match x {
Some(x) => println!("{}", x),
None => println!("none"),
}
}
"#,
);
}
#[test]
fn special_case_inverted_option() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0if let None = x {
println!("none")
} else {
println!("some")
}
}
"#,
r#"
fn foo(x: Option<i32>) {
match x {
None => println!("none"),
Some(_) => println!("some"),
}
}
"#,
);
}
#[test]
fn special_case_result() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0if let Ok(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
match x {
Ok(x) => println!("{}", x),
Err(_) => println!("none"),
}
}
"#,
);
}
#[test]
fn special_case_inverted_result() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0if let Err(x) = x {
println!("{}", x)
} else {
println!("ok")
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
match x {
Err(x) => println!("{}", x),
Ok(_) => println!("ok"),
}
}
"#,
);
}
#[test]
fn nested_indent() {
check_assist(
replace_if_let_with_match,
r#"
fn main() {
if true {
$0if let Ok(rel_path) = path.strip_prefix(root_path) {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
} else {
None
}
}
}
"#,
r#"
fn main() {
if true {
match path.strip_prefix(root_path) {
Ok(rel_path) => {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
}
_ => None,
}
}
}
"#,
)
}
#[test]
fn test_replace_match_with_if_let_unwraps_simple_expressions() {
check_assist(
replace_match_with_if_let,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
$0match *self {
VariantData::Struct(..) => true,
_ => false,
}
}
} "#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else {
false
}
}
} "#,
)
}
#[test]
fn test_replace_match_with_if_let_doesnt_unwrap_multiline_expressions() {
check_assist(
replace_match_with_if_let,
r#"
fn foo() {
$0match a {
VariantData::Struct(..) => {
bar(
123
)
}
_ => false,
}
} "#,
r#"
fn foo() {
if let VariantData::Struct(..) = a {
bar(
123
)
} else {
false
}
} "#,
)
}
#[test]
fn replace_match_with_if_let_target() {
check_assist_target(
replace_match_with_if_let,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
$0match *self {
VariantData::Struct(..) => true,
_ => false,
}
}
} "#,
r#"match *self {
VariantData::Struct(..) => true,
_ => false,
}"#,
);
}
#[test]
fn special_case_option_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0match x {
Some(x) => println!("{}", x),
None => println!("none"),
}
}
"#,
r#"
fn foo(x: Option<i32>) {
if let Some(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
);
}
#[test]
fn special_case_result_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0match x {
Ok(x) => println!("{}", x),
Err(_) => println!("none"),
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
if let Ok(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
);
}
#[test]
fn nested_indent_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
fn main() {
if true {
$0match path.strip_prefix(root_path) {
Ok(rel_path) => {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
}
_ => None,
}
}
}
"#,
r#"
fn main() {
if true {
if let Ok(rel_path) = path.strip_prefix(root_path) {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
} else {
None
}
}
}
"#,
)
}
#[test]
fn replace_match_with_if_let_empty_wildcard_expr() {
check_assist(
replace_match_with_if_let,
r#"
fn main() {
$0match path.strip_prefix(root_path) {
Ok(rel_path) => println!("{}", rel_path),
_ => (),
}
}
"#,
r#"
fn main() {
if let Ok(rel_path) = path.strip_prefix(root_path) {
println!("{}", rel_path)
}
}
"#,
)
}
}
Simplify
use std::iter::{self, successors};
use either::Either;
use ide_db::{ty_filter::TryEnum, RootDatabase};
use syntax::{
ast::{
self,
edit::{AstNodeEdit, IndentLevel},
make,
},
AstNode,
};
use crate::{
utils::{does_pat_match_variant, unwrap_trivial_block},
AssistContext, AssistId, AssistKind, Assists,
};
// Assist: replace_if_let_with_match
//
// Replaces a `if let` expression with a `match` expression.
//
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// $0if let Action::Move { distance } = action {
// foo(distance)
// } else {
// bar()
// }
// }
// ```
// ->
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// match action {
// Action::Move { distance } => foo(distance),
// _ => bar(),
// }
// }
// ```
pub(crate) fn replace_if_let_with_match(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let if_expr: ast::IfExpr = ctx.find_node_at_offset()?;
let mut else_block = None;
let if_exprs = successors(Some(if_expr.clone()), |expr| match expr.else_branch()? {
ast::ElseBranch::IfExpr(expr) => Some(expr),
ast::ElseBranch::Block(block) => {
else_block = Some(block);
None
}
});
let scrutinee_to_be_expr = if_expr.condition()?.expr()?;
let mut pat_seen = false;
let mut cond_bodies = Vec::new();
for if_expr in if_exprs {
let cond = if_expr.condition()?;
let expr = cond.expr()?;
let cond = match cond.pat() {
Some(pat) => {
if scrutinee_to_be_expr.syntax().text() != expr.syntax().text() {
// Only if all condition expressions are equal we can merge them into a match
return None;
}
pat_seen = true;
Either::Left(pat)
}
None => Either::Right(expr),
};
let body = if_expr.then_branch()?;
cond_bodies.push((cond, body));
}
if !pat_seen {
// Don't offer turning an if (chain) without patterns into a match
return None;
}
let target = if_expr.syntax().text_range();
acc.add(
AssistId("replace_if_let_with_match", AssistKind::RefactorRewrite),
"Replace with match",
target,
move |edit| {
let match_expr = {
let else_arm = make_else_arm(else_block, &cond_bodies, ctx);
let make_match_arm = |(pat, body): (_, ast::BlockExpr)| {
let body = body.reset_indent().indent(IndentLevel(1));
match pat {
Either::Left(pat) => {
make::match_arm(iter::once(pat), None, unwrap_trivial_block(body))
}
Either::Right(expr) => make::match_arm(
iter::once(make::wildcard_pat().into()),
Some(expr),
unwrap_trivial_block(body),
),
}
};
let arms = cond_bodies.into_iter().map(make_match_arm).chain(iter::once(else_arm));
let match_expr = make::expr_match(scrutinee_to_be_expr, make::match_arm_list(arms));
match_expr.indent(IndentLevel::from_node(if_expr.syntax()))
};
let has_preceding_if_expr =
if_expr.syntax().parent().map_or(false, |it| ast::IfExpr::can_cast(it.kind()));
let expr = if has_preceding_if_expr {
// make sure we replace the `else if let ...` with a block so we don't end up with `else expr`
make::block_expr(None, Some(match_expr)).into()
} else {
match_expr
};
edit.replace_ast::<ast::Expr>(if_expr.into(), expr);
},
)
}
fn make_else_arm(
else_block: Option<ast::BlockExpr>,
cond_bodies: &Vec<(Either<ast::Pat, ast::Expr>, ast::BlockExpr)>,
ctx: &AssistContext,
) -> ast::MatchArm {
if let Some(else_block) = else_block {
let pattern = if let [(Either::Left(pat), _)] = &**cond_bodies {
ctx.sema
.type_of_pat(&pat)
.and_then(|ty| TryEnum::from_ty(&ctx.sema, &ty))
.zip(Some(pat))
} else {
None
};
let pattern = match pattern {
Some((it, pat)) => {
if does_pat_match_variant(&pat, &it.sad_pattern()) {
it.happy_pattern()
} else {
it.sad_pattern()
}
}
None => make::wildcard_pat().into(),
};
make::match_arm(iter::once(pattern), None, unwrap_trivial_block(else_block))
} else {
make::match_arm(iter::once(make::wildcard_pat().into()), None, make::expr_unit().into())
}
}
// Assist: replace_match_with_if_let
//
// Replaces a binary `match` with a wildcard pattern and no guards with an `if let` expression.
//
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// $0match action {
// Action::Move { distance } => foo(distance),
// _ => bar(),
// }
// }
// ```
// ->
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// if let Action::Move { distance } = action {
// foo(distance)
// } else {
// bar()
// }
// }
// ```
pub(crate) fn replace_match_with_if_let(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let match_expr: ast::MatchExpr = ctx.find_node_at_offset()?;
let mut arms = match_expr.match_arm_list()?.arms();
let first_arm = arms.next()?;
let second_arm = arms.next()?;
if arms.next().is_some() || first_arm.guard().is_some() || second_arm.guard().is_some() {
return None;
}
let condition_expr = match_expr.expr()?;
let (if_let_pat, then_expr, else_expr) = if is_pat_wildcard_or_sad(&ctx.sema, &first_arm.pat()?)
{
(second_arm.pat()?, second_arm.expr()?, first_arm.expr()?)
} else if is_pat_wildcard_or_sad(&ctx.sema, &second_arm.pat()?) {
(first_arm.pat()?, first_arm.expr()?, second_arm.expr()?)
} else {
return None;
};
let target = match_expr.syntax().text_range();
acc.add(
AssistId("replace_match_with_if_let", AssistKind::RefactorRewrite),
"Replace with if let",
target,
move |edit| {
let condition = make::condition(condition_expr, Some(if_let_pat));
let then_block = match then_expr.reset_indent() {
ast::Expr::BlockExpr(block) => block,
expr => make::block_expr(iter::empty(), Some(expr)),
};
let else_expr = match else_expr {
ast::Expr::BlockExpr(block)
if block.statements().count() == 0 && block.tail_expr().is_none() =>
{
None
}
ast::Expr::TupleExpr(tuple) if tuple.fields().count() == 0 => None,
expr => Some(expr),
};
let if_let_expr = make::expr_if(
condition,
then_block,
else_expr.map(|else_expr| {
ast::ElseBranch::Block(make::block_expr(iter::empty(), Some(else_expr)))
}),
)
.indent(IndentLevel::from_node(match_expr.syntax()));
edit.replace_ast::<ast::Expr>(match_expr.into(), if_let_expr);
},
)
}
fn is_pat_wildcard_or_sad(sema: &hir::Semantics<RootDatabase>, pat: &ast::Pat) -> bool {
sema.type_of_pat(pat)
.and_then(|ty| TryEnum::from_ty(sema, &ty))
.map(|it| it.sad_pattern().syntax().text() == pat.syntax().text())
.unwrap_or_else(|| matches!(pat, ast::Pat::WildcardPat(_)))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{check_assist, check_assist_not_applicable, check_assist_target};
#[test]
fn test_if_let_with_match_unapplicable_for_simple_ifs() {
check_assist_not_applicable(
replace_if_let_with_match,
r#"
fn main() {
if $0true {} else if false {} else {}
}
"#,
)
}
#[test]
fn test_if_let_with_match_no_else() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn foo(&self) {
if $0let VariantData::Struct(..) = *self {
self.foo();
}
}
}
"#,
r#"
impl VariantData {
pub fn foo(&self) {
match *self {
VariantData::Struct(..) => {
self.foo();
}
_ => (),
}
}
}
"#,
)
}
#[test]
fn test_if_let_with_match_basic() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if $0let VariantData::Struct(..) = *self {
true
} else if let VariantData::Tuple(..) = *self {
false
} else if cond() {
true
} else {
bar(
123
)
}
}
}
"#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
match *self {
VariantData::Struct(..) => true,
VariantData::Tuple(..) => false,
_ if cond() => true,
_ => {
bar(
123
)
}
}
}
}
"#,
)
}
#[test]
fn test_if_let_with_match_on_tail_if_let() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else if let$0 VariantData::Tuple(..) = *self {
false
} else {
false
}
}
}
"#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else {
match *self {
VariantData::Tuple(..) => false,
_ => false,
}
}
}
}
"#,
)
}
#[test]
fn special_case_option() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0if let Some(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
r#"
fn foo(x: Option<i32>) {
match x {
Some(x) => println!("{}", x),
None => println!("none"),
}
}
"#,
);
}
#[test]
fn special_case_inverted_option() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0if let None = x {
println!("none")
} else {
println!("some")
}
}
"#,
r#"
fn foo(x: Option<i32>) {
match x {
None => println!("none"),
Some(_) => println!("some"),
}
}
"#,
);
}
#[test]
fn special_case_result() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0if let Ok(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
match x {
Ok(x) => println!("{}", x),
Err(_) => println!("none"),
}
}
"#,
);
}
#[test]
fn special_case_inverted_result() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0if let Err(x) = x {
println!("{}", x)
} else {
println!("ok")
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
match x {
Err(x) => println!("{}", x),
Ok(_) => println!("ok"),
}
}
"#,
);
}
#[test]
fn nested_indent() {
check_assist(
replace_if_let_with_match,
r#"
fn main() {
if true {
$0if let Ok(rel_path) = path.strip_prefix(root_path) {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
} else {
None
}
}
}
"#,
r#"
fn main() {
if true {
match path.strip_prefix(root_path) {
Ok(rel_path) => {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
}
_ => None,
}
}
}
"#,
)
}
#[test]
fn test_replace_match_with_if_let_unwraps_simple_expressions() {
check_assist(
replace_match_with_if_let,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
$0match *self {
VariantData::Struct(..) => true,
_ => false,
}
}
} "#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else {
false
}
}
} "#,
)
}
#[test]
fn test_replace_match_with_if_let_doesnt_unwrap_multiline_expressions() {
check_assist(
replace_match_with_if_let,
r#"
fn foo() {
$0match a {
VariantData::Struct(..) => {
bar(
123
)
}
_ => false,
}
} "#,
r#"
fn foo() {
if let VariantData::Struct(..) = a {
bar(
123
)
} else {
false
}
} "#,
)
}
#[test]
fn replace_match_with_if_let_target() {
check_assist_target(
replace_match_with_if_let,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
$0match *self {
VariantData::Struct(..) => true,
_ => false,
}
}
} "#,
r#"match *self {
VariantData::Struct(..) => true,
_ => false,
}"#,
);
}
#[test]
fn special_case_option_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0match x {
Some(x) => println!("{}", x),
None => println!("none"),
}
}
"#,
r#"
fn foo(x: Option<i32>) {
if let Some(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
);
}
#[test]
fn special_case_result_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0match x {
Ok(x) => println!("{}", x),
Err(_) => println!("none"),
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
if let Ok(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
);
}
#[test]
fn nested_indent_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
fn main() {
if true {
$0match path.strip_prefix(root_path) {
Ok(rel_path) => {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
}
_ => None,
}
}
}
"#,
r#"
fn main() {
if true {
if let Ok(rel_path) = path.strip_prefix(root_path) {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
} else {
None
}
}
}
"#,
)
}
#[test]
fn replace_match_with_if_let_empty_wildcard_expr() {
check_assist(
replace_match_with_if_let,
r#"
fn main() {
$0match path.strip_prefix(root_path) {
Ok(rel_path) => println!("{}", rel_path),
_ => (),
}
}
"#,
r#"
fn main() {
if let Ok(rel_path) = path.strip_prefix(root_path) {
println!("{}", rel_path)
}
}
"#,
)
}
}
|
use std::collections::BTreeMap;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::io::{self, Write};
use std::path::PathBuf;
use std::process::Command;
use std::{env, fs};
use rustc_version::VersionMeta;
use tempdir::TempDir;
use toml::{Table, Value};
use CompilationMode;
use cargo::{Root, Rustflags};
use errors::*;
use extensions::CommandExt;
use rustc::{Src, Sysroot, Target};
use util;
use xargo::Home;
use {cargo, xargo};
#[cfg(feature = "dev")]
fn profile() -> &'static str {
"debug"
}
#[cfg(not(feature = "dev"))]
fn profile() -> &'static str {
"release"
}
fn build(
cmode: &CompilationMode,
blueprint: Blueprint,
ctoml: &cargo::Toml,
home: &Home,
rustflags: &Rustflags,
sysroot: &Sysroot,
hash: u64,
verbose: bool,
) -> Result<()> {
const TOML: &'static str = r#"
[package]
authors = ["The Rust Project Developers"]
name = "sysroot"
version = "0.0.0"
"#;
let rustlib = home.lock_rw(cmode.triple())?;
rustlib
.remove_siblings()
.chain_err(|| format!("couldn't clear {}", rustlib.path().display()))?;
let dst = rustlib.parent().join("lib");
util::mkdir(&dst)?;
if cmode.triple().contains("pc-windows-gnu") {
let src = &sysroot
.path()
.join("lib")
.join("rustlib")
.join(cmode.triple())
.join("lib");
// These are required for linking executables/dlls
for file in ["rsbegin.o", "rsend.o", "crt2.o", "dllcrt2.o"].iter() {
let file_src = src.join(file);
let file_dst = dst.join(file);
fs::copy(&file_src, &file_dst).chain_err(|| {
format!(
"couldn't copy {} to {}",
file_src.display(),
file_dst.display()
)
})?;
}
}
for (_, stage) in blueprint.stages {
let td = TempDir::new("xargo").chain_err(|| "couldn't create a temporary directory")?;
let td = td.path();
let mut stoml = TOML.to_owned();
let mut map = Table::new();
map.insert("dependencies".to_owned(), Value::Table(stage.toml));
stoml.push_str(&Value::Table(map).to_string());
if let Some(profile) = ctoml.profile() {
stoml.push_str(&profile.to_string())
}
util::write(&td.join("Cargo.toml"), &stoml)?;
util::mkdir(&td.join("src"))?;
util::write(&td.join("src/lib.rs"), "")?;
let cargo = || {
let mut cmd = Command::new("cargo");
let mut flags = rustflags.for_xargo(home);
flags.push_str(" -Z force-unstable-if-unmarked");
if verbose {
writeln!(io::stderr(), "+ RUSTFLAGS={:?}", flags).ok();
}
cmd.env("RUSTFLAGS", flags);
cmd.env_remove("CARGO_TARGET_DIR");
// As of rust-lang/cargo#4788 Cargo invokes rustc with a changed "current directory" so
// we can't assume that such directory will be the same as the directory from which
// Xargo was invoked. This is specially true when compiling the sysroot as the std
// source is provided as a workspace and Cargo will change the current directory to the
// root of the workspace when building one. To ensure rustc finds a target specification
// file stored in the current directory we'll set `RUST_TARGET_PATH` to the current
// directory.
if env::var_os("RUST_TARGET_PATH").is_none() {
if let CompilationMode::Cross(ref target) = *cmode {
if let Target::Custom { ref json, .. } = *target {
cmd.env("RUST_TARGET_PATH", json.parent().unwrap());
}
}
}
cmd.arg("build");
match () {
#[cfg(feature = "dev")]
() => {}
#[cfg(not(feature = "dev"))]
() => {
cmd.arg("--release");
}
}
cmd.arg("--manifest-path");
cmd.arg(td.join("Cargo.toml"));
cmd.args(&["--target", cmode.triple()]);
if verbose {
cmd.arg("-v");
}
cmd
};
for krate in stage.crates {
cargo().arg("-p").arg(krate).run(verbose)?;
}
// Copy artifacts to Xargo sysroot
util::cp_r(
&td.join("target")
.join(cmode.triple())
.join(profile())
.join("deps"),
&dst,
)?;
}
// Create hash file
util::write(&rustlib.parent().join(".hash"), &hash.to_string())?;
Ok(())
}
fn old_hash(cmode: &CompilationMode, home: &Home) -> Result<Option<u64>> {
// FIXME this should be `lock_ro`
let lock = home.lock_rw(cmode.triple())?;
let hfile = lock.parent().join(".hash");
if hfile.exists() {
Ok(util::read(&hfile)?.parse().ok())
} else {
Ok(None)
}
}
/// Computes the hash of the would-be target sysroot
///
/// This information is used to compute the hash
///
/// - Dependencies in `Xargo.toml` for a specific target
/// - RUSTFLAGS / build.rustflags / target.*.rustflags
/// - The target specification file, is any
/// - `[profile.release]` in `Cargo.toml`
/// - `rustc` commit hash
fn hash(
cmode: &CompilationMode,
blueprint: &Blueprint,
rustflags: &Rustflags,
ctoml: &cargo::Toml,
meta: &VersionMeta,
) -> Result<u64> {
let mut hasher = DefaultHasher::new();
blueprint.hash(&mut hasher);
rustflags.hash(&mut hasher);
cmode.hash(&mut hasher)?;
if let Some(profile) = ctoml.profile() {
profile.hash(&mut hasher);
}
if let Some(ref hash) = meta.commit_hash {
hash.hash(&mut hasher);
}
Ok(hasher.finish())
}
pub fn update(
cmode: &CompilationMode,
home: &Home,
root: &Root,
rustflags: &Rustflags,
meta: &VersionMeta,
src: &Src,
sysroot: &Sysroot,
verbose: bool,
) -> Result<()> {
let ctoml = cargo::toml(root)?;
let xtoml = xargo::toml(root)?;
let blueprint = Blueprint::from(xtoml.as_ref(), cmode.triple(), root, &src)?;
let hash = hash(cmode, &blueprint, rustflags, &ctoml, meta)?;
if old_hash(cmode, home)? != Some(hash) {
build(
cmode,
blueprint,
&ctoml,
home,
rustflags,
sysroot,
hash,
verbose,
)?;
}
// copy host artifacts into the sysroot, if necessary
if cmode.is_native() {
return Ok(());
}
let lock = home.lock_rw(&meta.host)?;
let hfile = lock.parent().join(".hash");
let hash = meta.commit_hash.as_ref().map(|s| &**s).unwrap_or("");
if hfile.exists() {
if util::read(&hfile)? == hash {
return Ok(());
}
}
lock.remove_siblings()
.chain_err(|| format!("couldn't clear {}", lock.path().display()))?;
let dst = lock.parent().join("lib");
util::mkdir(&dst)?;
util::cp_r(
&sysroot
.path()
.join("lib/rustlib")
.join(&meta.host)
.join("lib"),
&dst,
)?;
let bin_dst = lock.parent().join("bin");
util::mkdir(&bin_dst)?;
util::cp_r(
&sysroot
.path()
.join("lib/rustlib")
.join(&meta.host)
.join("bin"),
&bin_dst,
)?;
util::write(&hfile, hash)?;
Ok(())
}
/// Per stage dependencies
#[derive(Debug)]
pub struct Stage {
crates: Vec<String>,
toml: Table,
}
/// A sysroot that will be built in "stages"
#[derive(Debug)]
pub struct Blueprint {
stages: BTreeMap<i64, Stage>,
}
impl Blueprint {
fn new() -> Self {
Blueprint {
stages: BTreeMap::new(),
}
}
fn from(toml: Option<&xargo::Toml>, target: &str, root: &Root, src: &Src) -> Result<Self> {
let deps = match (
toml.and_then(|t| t.dependencies()),
toml.and_then(|t| t.target_dependencies(target)),
) {
(Some(value), Some(tvalue)) => {
let mut deps = value
.as_table()
.cloned()
.ok_or_else(|| format!("Xargo.toml: `dependencies` must be a table"))?;
let more_deps = tvalue.as_table().ok_or_else(|| {
format!(
"Xargo.toml: `target.{}.dependencies` must be \
a table",
target
)
})?;
for (k, v) in more_deps {
if deps.insert(k.to_owned(), v.clone()).is_some() {
Err(format!(
"found duplicate dependency name {}, \
but all dependencies must have a \
unique name",
k
))?
}
}
deps
}
(Some(value), None) | (None, Some(value)) => if let Some(table) = value.as_table() {
table.clone()
} else {
Err(format!(
"Xargo.toml: target.{}.dependencies must be \
a table",
target
))?
},
(None, None) => {
// If no dependencies were listed, we assume `core` and `compiler_builtins` as the
// dependencies
let mut t = BTreeMap::new();
let mut core = BTreeMap::new();
core.insert("stage".to_owned(), Value::Integer(0));
t.insert("core".to_owned(), Value::Table(core));
let mut cb = BTreeMap::new();
cb.insert(
"features".to_owned(),
Value::Array(vec![Value::String("mem".to_owned())]),
);
cb.insert("stage".to_owned(), Value::Integer(1));
t.insert(
"compiler_builtins".to_owned(),
Value::Table(cb),
);
t
}
};
let mut blueprint = Blueprint::new();
for (k, v) in deps {
if let Value::Table(mut map) = v {
let stage = if let Some(value) = map.remove("stage") {
value
.as_integer()
.ok_or_else(|| format!("dependencies.{}.stage must be an integer", k))?
} else {
0
};
if let Some(path) = map.get_mut("path") {
let p = PathBuf::from(path.as_str()
.ok_or_else(|| format!("dependencies.{}.path must be a string", k))?);
if !p.is_absolute() {
*path = Value::String(
root.path()
.join(&p)
.canonicalize()
.chain_err(|| format!("couldn't canonicalize {}", p.display()))?
.display()
.to_string(),
);
}
}
if !map.contains_key("path") && !map.contains_key("git") {
let path = src.path().join(format!("lib{}", k)).display().to_string();
map.insert("path".to_owned(), Value::String(path));
}
blueprint.push(stage, k, map);
} else {
Err(format!(
"Xargo.toml: target.{}.dependencies.{} must be \
a table",
target, k
))?
}
}
Ok(blueprint)
}
fn push(&mut self, stage: i64, krate: String, toml: Table) {
let stage = self.stages.entry(stage).or_insert_with(|| Stage {
crates: vec![],
toml: Table::new(),
});
stage.toml.insert(krate.clone(), Value::Table(toml));
stage.crates.push(krate);
}
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
for stage in self.stages.values() {
for (k, v) in stage.toml.iter() {
k.hash(hasher);
v.to_string().hash(hasher);
}
}
}
}
fix for latest nightly: need to add a patch section for rustc-std-workspace-core
use std::collections::BTreeMap;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::io::{self, Write};
use std::path::PathBuf;
use std::process::Command;
use std::{env, fs};
use rustc_version::VersionMeta;
use tempdir::TempDir;
use toml::{Table, Value};
use CompilationMode;
use cargo::{Root, Rustflags};
use errors::*;
use extensions::CommandExt;
use rustc::{Src, Sysroot, Target};
use util;
use xargo::Home;
use {cargo, xargo};
#[cfg(feature = "dev")]
fn profile() -> &'static str {
"debug"
}
#[cfg(not(feature = "dev"))]
fn profile() -> &'static str {
"release"
}
fn build(
cmode: &CompilationMode,
blueprint: Blueprint,
ctoml: &cargo::Toml,
home: &Home,
rustflags: &Rustflags,
sysroot: &Sysroot,
hash: u64,
verbose: bool,
) -> Result<()> {
const TOML: &'static str = r#"
[package]
authors = ["The Rust Project Developers"]
name = "sysroot"
version = "0.0.0"
"#;
let rustlib = home.lock_rw(cmode.triple())?;
rustlib
.remove_siblings()
.chain_err(|| format!("couldn't clear {}", rustlib.path().display()))?;
let dst = rustlib.parent().join("lib");
util::mkdir(&dst)?;
if cmode.triple().contains("pc-windows-gnu") {
let src = &sysroot
.path()
.join("lib")
.join("rustlib")
.join(cmode.triple())
.join("lib");
// These are required for linking executables/dlls
for file in ["rsbegin.o", "rsend.o", "crt2.o", "dllcrt2.o"].iter() {
let file_src = src.join(file);
let file_dst = dst.join(file);
fs::copy(&file_src, &file_dst).chain_err(|| {
format!(
"couldn't copy {} to {}",
file_src.display(),
file_dst.display()
)
})?;
}
}
for (_, stage) in blueprint.stages {
let td = TempDir::new("xargo").chain_err(|| "couldn't create a temporary directory")?;
let td = td.path();
let mut stoml = TOML.to_owned();
{
let mut map = Table::new();
map.insert("dependencies".to_owned(), Value::Table(stage.dependencies));
map.insert("patch".to_owned(), Value::Table(stage.patch));
stoml.push_str(&Value::Table(map).to_string());
}
if let Some(profile) = ctoml.profile() {
stoml.push_str(&profile.to_string())
}
util::write(&td.join("Cargo.toml"), &stoml)?;
util::mkdir(&td.join("src"))?;
util::write(&td.join("src/lib.rs"), "")?;
let cargo = || {
let mut cmd = Command::new("cargo");
let mut flags = rustflags.for_xargo(home);
flags.push_str(" -Z force-unstable-if-unmarked");
if verbose {
writeln!(io::stderr(), "+ RUSTFLAGS={:?}", flags).ok();
}
cmd.env("RUSTFLAGS", flags);
cmd.env_remove("CARGO_TARGET_DIR");
// As of rust-lang/cargo#4788 Cargo invokes rustc with a changed "current directory" so
// we can't assume that such directory will be the same as the directory from which
// Xargo was invoked. This is specially true when compiling the sysroot as the std
// source is provided as a workspace and Cargo will change the current directory to the
// root of the workspace when building one. To ensure rustc finds a target specification
// file stored in the current directory we'll set `RUST_TARGET_PATH` to the current
// directory.
if env::var_os("RUST_TARGET_PATH").is_none() {
if let CompilationMode::Cross(ref target) = *cmode {
if let Target::Custom { ref json, .. } = *target {
cmd.env("RUST_TARGET_PATH", json.parent().unwrap());
}
}
}
cmd.arg("build");
match () {
#[cfg(feature = "dev")]
() => {}
#[cfg(not(feature = "dev"))]
() => {
cmd.arg("--release");
}
}
cmd.arg("--manifest-path");
cmd.arg(td.join("Cargo.toml"));
cmd.args(&["--target", cmode.triple()]);
if verbose {
cmd.arg("-v");
}
cmd
};
for krate in stage.crates {
cargo().arg("-p").arg(krate).run(verbose)?;
}
// Copy artifacts to Xargo sysroot
util::cp_r(
&td.join("target")
.join(cmode.triple())
.join(profile())
.join("deps"),
&dst,
)?;
}
// Create hash file
util::write(&rustlib.parent().join(".hash"), &hash.to_string())?;
Ok(())
}
fn old_hash(cmode: &CompilationMode, home: &Home) -> Result<Option<u64>> {
// FIXME this should be `lock_ro`
let lock = home.lock_rw(cmode.triple())?;
let hfile = lock.parent().join(".hash");
if hfile.exists() {
Ok(util::read(&hfile)?.parse().ok())
} else {
Ok(None)
}
}
/// Computes the hash of the would-be target sysroot
///
/// This information is used to compute the hash
///
/// - Dependencies in `Xargo.toml` for a specific target
/// - RUSTFLAGS / build.rustflags / target.*.rustflags
/// - The target specification file, is any
/// - `[profile.release]` in `Cargo.toml`
/// - `rustc` commit hash
fn hash(
cmode: &CompilationMode,
blueprint: &Blueprint,
rustflags: &Rustflags,
ctoml: &cargo::Toml,
meta: &VersionMeta,
) -> Result<u64> {
let mut hasher = DefaultHasher::new();
blueprint.hash(&mut hasher);
rustflags.hash(&mut hasher);
cmode.hash(&mut hasher)?;
if let Some(profile) = ctoml.profile() {
profile.hash(&mut hasher);
}
if let Some(ref hash) = meta.commit_hash {
hash.hash(&mut hasher);
}
Ok(hasher.finish())
}
pub fn update(
cmode: &CompilationMode,
home: &Home,
root: &Root,
rustflags: &Rustflags,
meta: &VersionMeta,
src: &Src,
sysroot: &Sysroot,
verbose: bool,
) -> Result<()> {
let ctoml = cargo::toml(root)?;
let xtoml = xargo::toml(root)?;
let blueprint = Blueprint::from(xtoml.as_ref(), cmode.triple(), root, &src)?;
let hash = hash(cmode, &blueprint, rustflags, &ctoml, meta)?;
if old_hash(cmode, home)? != Some(hash) {
build(
cmode,
blueprint,
&ctoml,
home,
rustflags,
sysroot,
hash,
verbose,
)?;
}
// copy host artifacts into the sysroot, if necessary
if cmode.is_native() {
return Ok(());
}
let lock = home.lock_rw(&meta.host)?;
let hfile = lock.parent().join(".hash");
let hash = meta.commit_hash.as_ref().map(|s| &**s).unwrap_or("");
if hfile.exists() {
if util::read(&hfile)? == hash {
return Ok(());
}
}
lock.remove_siblings()
.chain_err(|| format!("couldn't clear {}", lock.path().display()))?;
let dst = lock.parent().join("lib");
util::mkdir(&dst)?;
util::cp_r(
&sysroot
.path()
.join("lib/rustlib")
.join(&meta.host)
.join("lib"),
&dst,
)?;
let bin_dst = lock.parent().join("bin");
util::mkdir(&bin_dst)?;
util::cp_r(
&sysroot
.path()
.join("lib/rustlib")
.join(&meta.host)
.join("bin"),
&bin_dst,
)?;
util::write(&hfile, hash)?;
Ok(())
}
/// Per stage dependencies
#[derive(Debug)]
pub struct Stage {
crates: Vec<String>,
dependencies: Table,
patch: Table,
}
/// A sysroot that will be built in "stages"
#[derive(Debug)]
pub struct Blueprint {
stages: BTreeMap<i64, Stage>,
}
impl Blueprint {
fn new() -> Self {
Blueprint {
stages: BTreeMap::new(),
}
}
fn from(toml: Option<&xargo::Toml>, target: &str, root: &Root, src: &Src) -> Result<Self> {
let deps = match (
toml.and_then(|t| t.dependencies()),
toml.and_then(|t| t.target_dependencies(target)),
) {
(Some(value), Some(tvalue)) => {
let mut deps = value
.as_table()
.cloned()
.ok_or_else(|| format!("Xargo.toml: `dependencies` must be a table"))?;
let more_deps = tvalue.as_table().ok_or_else(|| {
format!(
"Xargo.toml: `target.{}.dependencies` must be \
a table",
target
)
})?;
for (k, v) in more_deps {
if deps.insert(k.to_owned(), v.clone()).is_some() {
Err(format!(
"found duplicate dependency name {}, \
but all dependencies must have a \
unique name",
k
))?
}
}
deps
}
(Some(value), None) | (None, Some(value)) => if let Some(table) = value.as_table() {
table.clone()
} else {
Err(format!(
"Xargo.toml: target.{}.dependencies must be \
a table",
target
))?
},
(None, None) => {
// If no dependencies were listed, we assume `core` and `compiler_builtins` as the
// dependencies
let mut t = BTreeMap::new();
let mut core = BTreeMap::new();
core.insert("stage".to_owned(), Value::Integer(0));
t.insert("core".to_owned(), Value::Table(core));
let mut cb = BTreeMap::new();
cb.insert(
"features".to_owned(),
Value::Array(vec![Value::String("mem".to_owned())]),
);
cb.insert("stage".to_owned(), Value::Integer(1));
t.insert(
"compiler_builtins".to_owned(),
Value::Table(cb),
);
t
}
};
let mut blueprint = Blueprint::new();
for (k, v) in deps {
if let Value::Table(mut map) = v {
let stage = if let Some(value) = map.remove("stage") {
value
.as_integer()
.ok_or_else(|| format!("dependencies.{}.stage must be an integer", k))?
} else {
0
};
if let Some(path) = map.get_mut("path") {
let p = PathBuf::from(path.as_str()
.ok_or_else(|| format!("dependencies.{}.path must be a string", k))?);
if !p.is_absolute() {
*path = Value::String(
root.path()
.join(&p)
.canonicalize()
.chain_err(|| format!("couldn't canonicalize {}", p.display()))?
.display()
.to_string(),
);
}
}
if !map.contains_key("path") && !map.contains_key("git") {
let path = src.path().join(format!("lib{}", k)).display().to_string();
map.insert("path".to_owned(), Value::String(path));
}
blueprint.push(stage, k, map, src);
} else {
Err(format!(
"Xargo.toml: target.{}.dependencies.{} must be \
a table",
target, k
))?
}
}
Ok(blueprint)
}
fn push(&mut self, stage: i64, krate: String, toml: Table, src: &Src) {
let stage = self.stages.entry(stage).or_insert_with(|| Stage {
crates: vec![],
dependencies: Table::new(),
patch: {
// For a new stage, we also need to compute the patch section of the toml
fn make_singleton_map(key: &str, val: Value) -> Table {
let mut map = Table::new();
map.insert(key.to_owned(), val);
map
}
make_singleton_map("crates-io", Value::Table(
make_singleton_map("rustc-std-workspace-core", Value::Table(
make_singleton_map("path", Value::String(
src.path().join("tools/rustc-std-workspace-core")
.display().to_string()
))
))
))
}
});
stage.dependencies.insert(krate.clone(), Value::Table(toml));
stage.crates.push(krate);
}
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
for stage in self.stages.values() {
for (k, v) in stage.dependencies.iter() {
k.hash(hasher);
v.to_string().hash(hasher);
}
}
}
}
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{raw, ptr, mem, intrinsics, hash, str, u32, io, slice, cmp};
use std::borrow::Cow;
use std::marker::PhantomData;
use std::cell::Cell;
use std::ops::{Deref, DerefMut};
use std::iter::IntoIterator;
use std::default::Default;
use std::cmp::Ordering;
use std::fmt as strfmt;
use core::nonzero::NonZero;
use encoding::{self, EncodingRef, DecoderTrap, EncoderTrap};
use buf32::{self, Buf32};
use fmt::{self, Slice};
use fmt::imp::Fixup;
use util::{unsafe_slice, copy_and_advance};
use OFLOW;
const MAX_INLINE_LEN: usize = 8;
const MAX_INLINE_TAG: usize = 0xF;
const EMPTY_TAG: usize = 0xF;
#[inline(always)]
fn inline_tag(len: u32) -> NonZero<usize> {
debug_assert!(len <= MAX_INLINE_LEN as u32);
unsafe {
NonZero::new(if len == 0 {
EMPTY_TAG
} else {
len as usize
})
}
}
#[repr(packed)]
struct Header {
refcount: Cell<usize>,
cap: u32,
}
impl Header {
#[inline(always)]
unsafe fn new() -> Header {
Header {
refcount: Cell::new(1),
cap: mem::uninitialized(),
}
}
}
/// Errors that can occur when slicing a `Tendril`.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq)]
pub enum SubtendrilError {
OutOfBounds,
ValidationFailed,
}
/// Compact string type for zero-copy parsing.
///
/// `Tendril`s have the semantics of owned strings, but are sometimes views
/// into shared buffers. When you mutate a `Tendril`, an owned copy is made
/// if necessary. Further mutations occur in-place until the string becomes
/// shared, e.g. with `clone()` or `subtendril()`.
///
/// Buffer sharing is accomplished through thread-local (non-atomic) reference
/// counting, which has very low overhead. The Rust type system will prevent
/// you at compile time from sending a `Tendril` between threads. We plan to
/// relax this restriction in the future; see `README.md`.
///
/// Whereas `String` allocates in the heap for any non-empty string, `Tendril`
/// can store small strings (up to 8 bytes) in-line, without a heap allocation.
/// `Tendril` is also smaller than `String` on 64-bit platforms — 16 bytes
/// versus 24.
///
/// The type parameter `F` specifies the format of the tendril, for example
/// UTF-8 text or uninterpreted bytes. The parameter will be instantiated
/// with one of the marker types from `tendril::fmt`. See the `StrTendril`
/// and `ByteTendril` type aliases for two examples.
///
/// The maximum length of a `Tendril` is 4 GB. The library will panic if
/// you attempt to go over the limit.
#[unsafe_no_drop_flag]
#[repr(packed)]
pub struct Tendril<F>
where F: fmt::Format,
{
ptr: Cell<NonZero<usize>>,
len: u32,
aux: Cell<u32>,
marker: PhantomData<*mut F>,
}
/// `Tendril` for storing native Rust strings.
pub type StrTendril = Tendril<fmt::UTF8>;
/// `Tendril` for storing binary data.
pub type ByteTendril = Tendril<fmt::Bytes>;
impl<F> Clone for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn clone(&self) -> Tendril<F> {
unsafe {
if *self.ptr.get() > MAX_INLINE_TAG {
self.make_buf_shared();
self.incref();
}
ptr::read(self)
}
}
}
#[unsafe_destructor]
impl<F> Drop for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn drop(&mut self) {
unsafe {
let p = *self.ptr.get();
if p <= MAX_INLINE_TAG || p == mem::POST_DROP_USIZE {
return;
}
let (buf, shared, _) = self.assume_buf();
if shared {
let header = self.header();
let refcount = (*header).refcount.get() - 1;
if refcount == 0 {
buf.destroy();
} else {
(*header).refcount.set(refcount);
}
} else {
buf.destroy();
}
}
}
}
// impl FromIterator<char> for Tendril<fmt::UTF8> { }
// impl FromIterator<u8> for Tendril<fmt::Bytes> { }
impl<F> Deref for Tendril<F>
where F: fmt::SliceFormat,
{
type Target = F::Slice;
#[inline]
fn deref(&self) -> &F::Slice {
unsafe {
F::Slice::from_bytes(self.as_byte_slice())
}
}
}
impl<'a, F> Extend<&'a Tendril<F>> for Tendril<F>
where F: fmt::Format + 'a,
{
#[inline]
fn extend<I>(&mut self, iterable: I)
where I: IntoIterator<Item = &'a Tendril<F>>,
{
let iterator = iterable.into_iter();
for t in iterator {
self.push_tendril(t);
}
}
}
impl<F> PartialEq for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_byte_slice() == other.as_byte_slice()
}
#[inline]
fn ne(&self, other: &Self) -> bool {
self.as_byte_slice() != other.as_byte_slice()
}
}
impl<F> Eq for Tendril<F>
where F: fmt::Format,
{ }
impl<F> PartialOrd for Tendril<F>
where F: fmt::SliceFormat,
<F as fmt::SliceFormat>::Slice: PartialOrd,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
impl<F> Ord for Tendril<F>
where F: fmt::SliceFormat,
<F as fmt::SliceFormat>::Slice: Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
impl<F> Default for Tendril<F>
where F: fmt::Format,
{
#[inline(always)]
fn default() -> Tendril<F> {
Tendril::new()
}
}
impl<F> strfmt::Debug for Tendril<F>
where F: fmt::SliceFormat + Default + strfmt::Debug,
<F as fmt::SliceFormat>::Slice: strfmt::Debug,
{
#[inline]
fn fmt(&self, f: &mut strfmt::Formatter) -> strfmt::Result {
let kind = match *self.ptr.get() {
p if p <= MAX_INLINE_TAG => "inline",
p if p & 1 == 1 => "shared",
_ => "owned",
};
try!(write!(f, "Tendril<{:?}>({}: ", <F as Default>::default(), kind));
try!(<<F as fmt::SliceFormat>::Slice as strfmt::Debug>::fmt(&**self, f));
write!(f, ")")
}
}
impl<F> hash::Hash for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
self.as_byte_slice().hash(hasher)
}
}
impl<F> Tendril<F>
where F: fmt::Format,
{
/// Create a new, empty `Tendril` in any format.
#[inline(always)]
pub fn new() -> Tendril<F> {
unsafe {
Tendril::inline(&[])
}
}
/// Create a new, empty `Tendril` with a specified capacity.
#[inline]
pub fn with_capacity(capacity: u32) -> Tendril<F> {
let mut t: Tendril<F> = Tendril::new();
if capacity > MAX_INLINE_LEN as u32 {
unsafe {
t.make_owned_with_capacity(capacity);
}
}
t
}
/// Reserve space for additional bytes.
///
/// This is only a suggestion. There are cases where `Tendril` will
/// decline to allocate until the buffer is actually modified.
#[inline]
pub fn reserve(&mut self, additional: u32) {
if self.is_shared() {
// Don't grow a shared tendril because we'd have to copy
// right away.
return;
}
let new_len = self.len32().checked_add(additional).expect(OFLOW);
if new_len > MAX_INLINE_LEN as u32 {
unsafe {
self.make_owned_with_capacity(new_len);
}
}
}
/// Get the length of the `Tendril`.
///
/// This is named not to conflict with `len()` on the underlying
/// slice, if any.
#[inline(always)]
pub fn len32(&self) -> u32 {
match *self.ptr.get() {
EMPTY_TAG => 0,
n if n <= MAX_INLINE_LEN => n as u32,
_ => self.len,
}
}
/// Is the backing buffer shared?
#[inline]
pub fn is_shared(&self) -> bool {
let n = *self.ptr.get();
(n > MAX_INLINE_TAG) && ((n & 1) == 1)
}
/// Is the backing buffer shared with this other `Tendril`?
#[inline]
pub fn is_shared_with(&self, other: &Tendril<F>) -> bool {
let n = *self.ptr.get();
(n > MAX_INLINE_TAG) && (n == *other.ptr.get())
}
/// Truncate to length 0 without discarding any owned storage.
#[inline]
pub fn clear(&mut self) {
if *self.ptr.get() <= MAX_INLINE_TAG {
self.ptr.set(unsafe { NonZero::new(EMPTY_TAG) });
} else {
let (_, shared, _) = unsafe { self.assume_buf() };
if shared {
// No need to keep a reference alive for a 0-size slice.
*self = Tendril::new();
} else {
self.len = 0;
}
}
}
/// Build a `Tendril` by copying a byte slice, if it conforms to the format.
#[inline]
pub fn try_from_byte_slice(x: &[u8]) -> Result<Tendril<F>, ()> {
match F::validate(x) {
true => Ok(unsafe { Tendril::from_byte_slice_without_validating(x) }),
false => Err(()),
}
}
/// View as uninterpreted bytes.
#[inline(always)]
pub fn as_bytes(&self) -> &Tendril<fmt::Bytes> {
unsafe { mem::transmute(self) }
}
/// Convert into uninterpreted bytes.
#[inline(always)]
pub fn into_bytes(self) -> Tendril<fmt::Bytes> {
unsafe { mem::transmute(self) }
}
/// View as a superset format, for free.
#[inline(always)]
pub fn as_superset<Super>(&self) -> &Tendril<Super>
where F: fmt::SubsetOf<Super>,
Super: fmt::Format,
{
unsafe { mem::transmute(self) }
}
/// Convert into a superset format, for free.
#[inline(always)]
pub fn into_superset<Super>(self) -> Tendril<Super>
where F: fmt::SubsetOf<Super>,
Super: fmt::Format,
{
unsafe { mem::transmute(self) }
}
/// View as a subset format, if the `Tendril` conforms to that subset.
#[inline]
pub fn try_as_subset<Sub>(&self) -> Result<&Tendril<Sub>, ()>
where Sub: fmt::SubsetOf<F>,
{
match Sub::revalidate_subset(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(()),
}
}
/// Convert into a subset format, if the `Tendril` conforms to that subset.
#[inline]
pub fn try_into_subset<Sub>(self) -> Result<Tendril<Sub>, Self>
where Sub: fmt::SubsetOf<F>,
{
match Sub::revalidate_subset(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(self),
}
}
/// View as another format, if the bytes of the `Tendril` are valid for
/// that format.
#[inline]
pub fn try_reinterpret_view<Other>(&self) -> Result<&Tendril<Other>, ()>
where Other: fmt::Format,
{
match Other::validate(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(()),
}
}
/// Convert into another format, if the `Tendril` conforms to that format.
///
/// This only re-validates the existing bytes under the new format. It
/// will *not* change the byte content of the tendril!
///
/// See the `encode` and `decode` methods for character encoding conversion.
#[inline]
pub fn try_reinterpret<Other>(self) -> Result<Tendril<Other>, Self>
where Other: fmt::Format,
{
match Other::validate(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(self),
}
}
/// Push some bytes onto the end of the `Tendril`, if they conform to the
/// format.
#[inline]
pub fn try_push_bytes(&mut self, buf: &[u8]) -> Result<(), ()> {
match F::validate(buf) {
true => unsafe {
self.push_bytes_without_validating(buf);
Ok(())
},
false => Err(()),
}
}
/// Push another `Tendril` onto the end of this one.
#[inline]
pub fn push_tendril(&mut self, other: &Tendril<F>) {
let new_len = self.len32().checked_add(other.len32()).expect(OFLOW);
unsafe {
if (*self.ptr.get() > MAX_INLINE_TAG) && (*other.ptr.get() > MAX_INLINE_TAG) {
let (self_buf, self_shared, _) = self.assume_buf();
let (other_buf, other_shared, _) = other.assume_buf();
if self_shared && other_shared
&& (self_buf.data_ptr() == other_buf.data_ptr())
&& (other.aux.get() == self.aux.get() + self.len)
{
self.len = new_len;
return;
}
}
self.push_bytes_without_validating(other.as_byte_slice())
}
}
/// Attempt to slice this `Tendril` as a new `Tendril`.
///
/// This will share the buffer when possible. Mutating a shared buffer
/// will copy the contents.
///
/// The offset and length are in bytes. The function will return
/// `Err` if these are out of bounds, or if the resulting slice
/// does not conform to the format.
#[inline]
pub fn try_subtendril(&self, offset: u32, length: u32)
-> Result<Tendril<F>, SubtendrilError>
{
let self_len = self.len32();
if offset > self_len || length > (self_len - offset) {
return Err(SubtendrilError::OutOfBounds);
}
unsafe {
let byte_slice = unsafe_slice(self.as_byte_slice(),
offset as usize, length as usize);
if !F::validate_subseq(byte_slice) {
return Err(SubtendrilError::ValidationFailed);
}
Ok(self.unsafe_subtendril(offset, length))
}
}
/// Slice this `Tendril` as a new `Tendril`.
///
/// Panics on bounds or validity check failure.
#[inline]
pub fn subtendril(&self, offset: u32, length: u32) -> Tendril<F> {
self.try_subtendril(offset, length).unwrap()
}
/// Try to drop `n` bytes from the front.
///
/// Returns `Err` if the bytes are not available, or the suffix fails
/// validation.
#[inline]
pub fn try_pop_front(&mut self, n: u32) -> Result<(), SubtendrilError> {
let old_len = self.len32();
if n > old_len {
return Err(SubtendrilError::OutOfBounds);
}
let new_len = old_len - n;
unsafe {
if !F::validate_suffix(unsafe_slice(self.as_byte_slice(),
n as usize, new_len as usize)) {
return Err(SubtendrilError::ValidationFailed);
}
self.unsafe_pop_front(n);
Ok(())
}
}
/// Drop `n` bytes from the front.
///
/// Panics if the bytes are not available, or the suffix fails
/// validation.
#[inline]
pub fn pop_front(&mut self, n: u32) {
self.try_pop_front(n).unwrap()
}
/// Drop `n` bytes from the back.
///
/// Returns `Err` if the bytes are not available, or the prefix fails
/// validation.
#[inline]
pub fn try_pop_back(&mut self, n: u32) -> Result<(), SubtendrilError> {
let old_len = self.len32();
if n > old_len {
return Err(SubtendrilError::OutOfBounds);
}
let new_len = old_len - n;
unsafe {
if !F::validate_prefix(unsafe_slice(self.as_byte_slice(),
0, new_len as usize)) {
return Err(SubtendrilError::ValidationFailed);
}
self.unsafe_pop_back(n);
Ok(())
}
}
/// Drop `n` bytes from the back.
///
/// Panics if the bytes are not available, or the prefix fails
/// validation.
#[inline]
pub fn pop_back(&mut self, n: u32) {
self.try_pop_back(n).unwrap()
}
/// View as another format, without validating.
#[inline(always)]
pub unsafe fn reinterpret_view_without_validating<Other>(&self) -> &Tendril<Other>
where Other: fmt::Format,
{
mem::transmute(self)
}
/// Convert into another format, without validating.
#[inline(always)]
pub unsafe fn reinterpret_without_validating<Other>(self) -> Tendril<Other>
where Other: fmt::Format,
{
mem::transmute(self)
}
/// Build a `Tendril` by copying a byte slice, without validating.
#[inline]
pub unsafe fn from_byte_slice_without_validating(x: &[u8]) -> Tendril<F> {
assert!(x.len() <= buf32::MAX_LEN);
if x.len() <= MAX_INLINE_LEN {
Tendril::inline(x)
} else {
Tendril::owned_copy(x)
}
}
/// Push some bytes onto the end of the `Tendril`, without validating.
#[inline]
pub unsafe fn push_bytes_without_validating(&mut self, buf: &[u8]) {
assert!(buf.len() <= buf32::MAX_LEN);
let Fixup { drop_left, drop_right, insert_len, insert_bytes }
= F::fixup(self.as_byte_slice(), buf);
// FIXME: think more about overflow
let adj_len = self.len32() + insert_len - drop_left;
let new_len = adj_len.checked_add(buf.len() as u32).expect(OFLOW)
- drop_right;
let drop_left = drop_left as usize;
let drop_right = drop_right as usize;
if new_len <= MAX_INLINE_LEN as u32 {
let mut tmp: [u8; MAX_INLINE_LEN] = mem::uninitialized();
{
let old = self.as_byte_slice();
let mut dest = tmp.as_mut_ptr();
copy_and_advance(&mut dest, unsafe_slice(old, 0, old.len() - drop_left));
copy_and_advance(&mut dest, unsafe_slice(&insert_bytes, 0, insert_len as usize));
copy_and_advance(&mut dest, unsafe_slice(buf, drop_right, buf.len() - drop_right));
}
*self = Tendril::inline(&tmp[..new_len as usize]);
} else {
self.make_owned_with_capacity(new_len);
let (owned, _, _) = self.assume_buf();
let mut dest = owned.data_ptr().offset((owned.len as usize - drop_left) as isize);
copy_and_advance(&mut dest, unsafe_slice(&insert_bytes, 0, insert_len as usize));
copy_and_advance(&mut dest, unsafe_slice(buf, drop_right, buf.len() - drop_right));
self.len = new_len;
}
}
/// Slice this `Tendril` as a new `Tendril`.
///
/// Does not check validity or bounds!
#[inline]
pub unsafe fn unsafe_subtendril(&self, offset: u32, length: u32) -> Tendril<F> {
if length <= MAX_INLINE_LEN as u32 {
Tendril::inline(unsafe_slice(self.as_byte_slice(),
offset as usize, length as usize))
} else {
self.make_buf_shared();
self.incref();
let (buf, _, _) = self.assume_buf();
Tendril::shared(buf, self.aux.get() + offset, length)
}
}
/// Drop `n` bytes from the front.
///
/// Does not check validity or bounds!
#[inline]
pub unsafe fn unsafe_pop_front(&mut self, n: u32) {
let new_len = self.len32() - n;
if new_len <= MAX_INLINE_LEN as u32 {
*self = Tendril::inline(unsafe_slice(self.as_byte_slice(),
n as usize, new_len as usize));
} else {
self.make_buf_shared();
self.aux.set(self.aux.get() + n);
self.len -= n;
}
}
/// Drop `n` bytes from the back.
///
/// Does not check validity or bounds!
#[inline]
pub unsafe fn unsafe_pop_back(&mut self, n: u32) {
let new_len = self.len32() - n;
if new_len <= MAX_INLINE_LEN as u32 {
*self = Tendril::inline(unsafe_slice(self.as_byte_slice(),
0, new_len as usize));
} else {
self.make_buf_shared();
self.len -= n;
}
}
#[inline]
fn as_byte_slice<'a>(&'a self) -> &'a [u8] {
unsafe {
match *self.ptr.get() {
EMPTY_TAG => mem::transmute(raw::Slice {
data: ptr::null::<u8>(),
len: 0,
}),
n if n <= MAX_INLINE_LEN => mem::transmute(raw::Slice {
data: &self.len as *const u32 as *const u8,
len: n,
}),
_ => {
let (buf, _, offset) = self.assume_buf();
mem::copy_lifetime(self, unsafe_slice(buf.data(),
offset as usize, self.len32() as usize))
}
}
}
}
#[inline]
unsafe fn incref(&self) {
let header = self.header();
let refcount = (*header).refcount.get().checked_add(1).expect(OFLOW);
(*header).refcount.set(refcount);
}
#[inline]
unsafe fn make_buf_shared(&self) {
let p = *self.ptr.get();
if p & 1 == 0 {
let header = p as *mut Header;
(*header).cap = self.aux.get();
self.ptr.set(NonZero::new(p | 1));
self.aux.set(0);
}
}
#[inline]
unsafe fn make_owned_with_capacity(&mut self, cap: u32) {
let ptr = *self.ptr.get();
if ptr <= MAX_INLINE_TAG || (ptr & 1) == 1 {
*self = Tendril::owned_copy(self.as_byte_slice());
}
let mut buf = self.assume_buf().0;
buf.grow(cap);
self.ptr.set(NonZero::new(buf.ptr as usize));
self.aux.set(buf.cap);
}
#[inline(always)]
unsafe fn header(&self) -> *mut Header {
(*self.ptr.get() & !1) as *mut Header
}
#[inline]
unsafe fn assume_buf(&self) -> (Buf32<Header>, bool, u32) {
let ptr = self.ptr.get();
let header = self.header();
let shared = (*ptr & 1) == 1;
let (cap, offset) = match shared {
true => ((*header).cap, self.aux.get()),
false => (self.aux.get(), 0),
};
(Buf32 {
ptr: header,
len: offset + self.len32(),
cap: cap,
}, shared, offset)
}
#[inline]
unsafe fn inline(x: &[u8]) -> Tendril<F> {
let len = x.len();
let mut t = Tendril {
ptr: Cell::new(inline_tag(len as u32)),
len: mem::uninitialized(),
aux: mem::uninitialized(),
marker: PhantomData,
};
intrinsics::copy_nonoverlapping(x.as_ptr(), &mut t.len as *mut u32 as *mut u8, len);
t
}
#[inline]
unsafe fn owned(x: Buf32<Header>) -> Tendril<F> {
Tendril {
ptr: Cell::new(NonZero::new(x.ptr as usize)),
len: x.len,
aux: Cell::new(x.cap),
marker: PhantomData,
}
}
#[inline]
unsafe fn owned_copy(x: &[u8]) -> Tendril<F> {
let len32 = x.len() as u32;
let mut b = Buf32::with_capacity(len32, Header::new());
intrinsics::copy_nonoverlapping(x.as_ptr(), b.data_ptr(), x.len());
b.len = len32;
Tendril::owned(b)
}
#[inline]
unsafe fn shared(buf: Buf32<Header>, off: u32, len: u32) -> Tendril<F> {
Tendril {
ptr: Cell::new(NonZero::new((buf.ptr as usize) | 1)),
len: len,
aux: Cell::new(off),
marker: PhantomData,
}
}
}
impl<F> Tendril<F>
where F: fmt::SliceFormat,
{
/// Build a `Tendril` by copying a slice.
#[inline]
pub fn from_slice(x: &F::Slice) -> Tendril<F> {
unsafe {
Tendril::from_byte_slice_without_validating(x.as_bytes())
}
}
/// Push a slice onto the end of the `Tendril`.
#[inline]
pub fn push_slice(&mut self, x: &F::Slice) {
unsafe {
self.push_bytes_without_validating(x.as_bytes())
}
}
}
/// `Tendril`-related methods for Rust slices.
pub trait SliceExt: fmt::Slice {
/// Make a `Tendril` from this slice.
#[inline]
fn to_tendril(&self) -> Tendril<Self::Format> {
Tendril::from_slice(self)
}
}
impl SliceExt for str { }
impl SliceExt for [u8] { }
impl<F> Tendril<F>
where F: for<'a> fmt::CharFormat<'a>,
{
/// Remove and return the first character, if any.
#[inline]
pub fn pop_front_char<'a>(&'a mut self) -> Option<char> {
unsafe {
let mut it = F::char_indices(self.as_byte_slice());
it.next().map(|(_, c)| {
if let Some((n, _)) = it.next() {
self.unsafe_pop_front(n as u32);
} else {
self.clear();
}
c
})
}
}
/// Remove and return a run of characters at the front of the `Tendril`
/// which are classified the same according to the function `classify`.
///
/// Returns `None` on an empty string.
#[inline]
pub fn pop_front_char_run<'a, C, R>(&'a mut self, mut classify: C)
-> Option<(Tendril<F>, R)>
where C: FnMut(char) -> R,
R: PartialEq,
{
let (class, first_mismatch);
{
let mut chars = unsafe {
F::char_indices(self.as_byte_slice())
};
let (_, first) = unwrap_or_return!(chars.next(), None);
class = classify(first);
first_mismatch = chars.find(|&(_, ch)| &classify(ch) != &class);
}
match first_mismatch {
Some((idx, _)) => unsafe {
let t = self.unsafe_subtendril(0, idx as u32);
self.unsafe_pop_front(idx as u32);
Some((t, class))
},
None => {
let t = self.clone();
self.clear();
Some((t, class))
}
}
}
/// Push a character, if it can be represented in this format.
#[inline]
pub fn try_push_char(&mut self, c: char) -> Result<(), ()> {
F::encode_char(c, |b| unsafe {
self.push_bytes_without_validating(b);
})
}
}
impl DerefMut for Tendril<fmt::Bytes> {
#[inline]
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
mem::transmute(self.as_byte_slice())
}
}
}
impl io::Write for Tendril<fmt::Bytes> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.push_slice(buf);
Ok(buf.len())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.push_slice(buf);
Ok(())
}
#[inline(always)]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl encoding::ByteWriter for Tendril<fmt::Bytes> {
#[inline]
fn write_byte(&mut self, b: u8) {
self.push_slice(slice::ref_slice(&b));
}
#[inline]
fn write_bytes(&mut self, v: &[u8]) {
self.push_slice(v);
}
#[inline]
fn writer_hint(&mut self, additional: usize) {
self.reserve(cmp::min(u32::MAX as usize, additional) as u32);
}
}
impl Tendril<fmt::Bytes> {
/// Decode from some character encoding into UTF-8.
///
/// See the [rust-encoding docs](https://lifthrasiir.github.io/rust-encoding/encoding/)
/// for more information.
#[inline]
pub fn decode(&self, encoding: EncodingRef, trap: DecoderTrap)
-> Result<Tendril<fmt::UTF8>, Cow<'static, str>>
{
let mut ret = Tendril::new();
encoding.decode_to(&*self, trap, &mut ret).map(|_| ret)
}
/// Push "uninitialized bytes" onto the end.
///
/// Really, this grows the tendril without writing anything to the new area.
/// It's only defined for byte tendrils because it's only useful if you
/// plan to then mutate the buffer.
#[inline]
pub unsafe fn push_uninitialized(&mut self, n: u32) {
let new_len = self.len32().checked_add(n).expect(OFLOW);
if new_len <= MAX_INLINE_LEN as u32
&& *self.ptr.get() <= MAX_INLINE_TAG
{
self.ptr.set(inline_tag(new_len))
} else {
self.make_owned_with_capacity(new_len);
self.len = new_len;
}
}
}
impl strfmt::Display for Tendril<fmt::UTF8> {
#[inline]
fn fmt(&self, f: &mut strfmt::Formatter) -> strfmt::Result {
<str as strfmt::Display>::fmt(&**self, f)
}
}
impl str::FromStr for Tendril<fmt::UTF8> {
type Err = ();
#[inline]
fn from_str(s: &str) -> Result<Self, ()> {
Ok(Tendril::from_slice(s))
}
}
impl strfmt::Write for Tendril<fmt::UTF8> {
#[inline]
fn write_str(&mut self, s: &str) -> strfmt::Result {
self.push_slice(s);
Ok(())
}
}
impl encoding::StringWriter for Tendril<fmt::UTF8> {
#[inline]
fn write_char(&mut self, c: char) {
self.push_char(c);
}
#[inline]
fn write_str(&mut self, s: &str) {
self.push_slice(s);
}
#[inline]
fn writer_hint(&mut self, additional: usize) {
self.reserve(cmp::min(u32::MAX as usize, additional) as u32);
}
}
impl Tendril<fmt::UTF8> {
/// Encode from UTF-8 into some other character encoding.
///
/// See the [rust-encoding docs](https://lifthrasiir.github.io/rust-encoding/encoding/)
/// for more information.
#[inline]
pub fn encode(&self, encoding: EncodingRef, trap: EncoderTrap)
-> Result<Tendril<fmt::Bytes>, Cow<'static, str>>
{
let mut ret = Tendril::new();
encoding.encode_to(&*self, trap, &mut ret).map(|_| ret)
}
/// Push a character onto the end.
#[inline]
pub fn push_char(&mut self, c: char) {
unsafe {
let mut buf: [u8; 4] = mem::uninitialized();
let n = c.encode_utf8(&mut buf).expect("Tendril::push_char: internal error");
self.push_bytes_without_validating(unsafe_slice(&buf, 0, n));
}
}
/// Helper for the `format_tendril!` macro.
#[inline]
pub fn format(args: strfmt::Arguments) -> Tendril<fmt::UTF8> {
use std::fmt::Write;
let mut output: Tendril<fmt::UTF8> = Tendril::new();
let _ = write!(&mut output, "{}", args);
output
}
}
/// Create a `StrTendril` through string formatting.
///
/// Works just like the standard `format!` macro.
#[macro_export]
macro_rules! format_tendril {
($($arg:tt)*) => ($crate::Tendril::format(format_args!($($arg)*)))
}
#[cfg(test)]
#[path="bench.rs"]
mod bench;
#[cfg(test)]
mod test {
use super::{Tendril, ByteTendril, StrTendril, SliceExt, Header};
use fmt;
#[test]
fn smoke_test() {
assert_eq!("", &*"".to_tendril());
assert_eq!("abc", &*"abc".to_tendril());
assert_eq!("Hello, world!", &*"Hello, world!".to_tendril());
assert_eq!(b"", &*b"".to_tendril());
assert_eq!(b"abc", &*b"abc".to_tendril());
assert_eq!(b"Hello, world!", &*b"Hello, world!".to_tendril());
}
#[test]
fn assert_sizes() {
use std::mem;
let correct = mem::size_of::<*const ()>() + 8;
assert_eq!(correct, mem::size_of::<ByteTendril>());
assert_eq!(correct, mem::size_of::<StrTendril>());
// Check that the NonZero<T> optimization is working.
assert_eq!(correct, mem::size_of::<Option<ByteTendril>>());
assert_eq!(correct, mem::size_of::<Option<StrTendril>>());
let correct_header = mem::size_of::<*const ()>() + 4;
assert_eq!(correct_header, mem::size_of::<Header>());
}
#[test]
fn validate_utf8() {
assert!(ByteTendril::try_from_byte_slice(b"\xFF").is_ok());
assert!(StrTendril::try_from_byte_slice(b"\xFF").is_err());
assert!(StrTendril::try_from_byte_slice(b"\xEA\x99\xFF").is_err());
assert!(StrTendril::try_from_byte_slice(b"\xEA\x99").is_err());
assert!(StrTendril::try_from_byte_slice(b"\xEA\x99\xAE\xEA").is_err());
assert_eq!("\u{a66e}", &*StrTendril::try_from_byte_slice(b"\xEA\x99\xAE").unwrap());
let mut t = StrTendril::new();
assert!(t.try_push_bytes(b"\xEA\x99").is_err());
assert!(t.try_push_bytes(b"\xAE").is_err());
assert!(t.try_push_bytes(b"\xEA\x99\xAE").is_ok());
assert_eq!("\u{a66e}", &*t);
}
#[test]
fn share_and_unshare() {
let s = b"foobarbaz".to_tendril();
assert_eq!(b"foobarbaz", &*s);
assert!(!s.is_shared());
let mut t = s.clone();
assert_eq!(s.as_ptr(), t.as_ptr());
assert!(s.is_shared());
assert!(t.is_shared());
t.push_slice(b"quux");
assert_eq!(b"foobarbaz", &*s);
assert_eq!(b"foobarbazquux", &*t);
assert!(s.as_ptr() != t.as_ptr());
assert!(!t.is_shared());
}
#[test]
fn format_display() {
assert_eq!("foobar", &*format!("{}", "foobar".to_tendril()));
let mut s = "foo".to_tendril();
assert_eq!("foo", &*format!("{}", s));
let t = s.clone();
assert_eq!("foo", &*format!("{}", s));
assert_eq!("foo", &*format!("{}", t));
s.push_slice("barbaz!");
assert_eq!("foobarbaz!", &*format!("{}", s));
assert_eq!("foo", &*format!("{}", t));
}
#[test]
fn format_debug() {
assert_eq!(r#"Tendril<UTF8>(inline: "foobar")"#,
&*format!("{:?}", "foobar".to_tendril()));
assert_eq!(r#"Tendril<Bytes>(inline: [102, 111, 111, 98, 97, 114])"#,
&*format!("{:?}", b"foobar".to_tendril()));
let t = "anextralongstring".to_tendril();
assert_eq!(r#"Tendril<UTF8>(owned: "anextralongstring")"#,
&*format!("{:?}", t));
t.clone();
assert_eq!(r#"Tendril<UTF8>(shared: "anextralongstring")"#,
&*format!("{:?}", t));
}
#[test]
fn subtendril() {
assert_eq!("foo".to_tendril(), "foo-bar".to_tendril().subtendril(0, 3));
assert_eq!("bar".to_tendril(), "foo-bar".to_tendril().subtendril(4, 3));
let mut t = "foo-bar".to_tendril();
t.pop_front(2);
assert_eq!("o-bar".to_tendril(), t);
t.pop_back(1);
assert_eq!("o-ba".to_tendril(), t);
assert_eq!("foo".to_tendril(),
"foo-a-longer-string-bar-baz".to_tendril().subtendril(0, 3));
assert_eq!("oo-a-".to_tendril(),
"foo-a-longer-string-bar-baz".to_tendril().subtendril(1, 5));
assert_eq!("bar".to_tendril(),
"foo-a-longer-string-bar-baz".to_tendril().subtendril(20, 3));
let mut t = "another rather long string".to_tendril();
t.pop_front(2);
assert!(t.starts_with("other rather"));
t.pop_back(1);
assert_eq!("other rather long strin".to_tendril(), t);
assert!(t.is_shared());
}
#[test]
fn subtendril_invalid() {
assert!("\u{a66e}".to_tendril().try_subtendril(0, 2).is_err());
assert!("\u{a66e}".to_tendril().try_subtendril(1, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(0, 3).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(0, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(0, 1).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(1, 3).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(1, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(1, 1).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(2, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(2, 1).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(3, 1).is_err());
let mut t = "\u{1f4a9}zzzzzz".to_tendril();
assert!(t.try_pop_front(1).is_err());
assert!(t.try_pop_front(2).is_err());
assert!(t.try_pop_front(3).is_err());
assert!(t.try_pop_front(4).is_ok());
assert_eq!("zzzzzz", &*t);
let mut t = "zzzzzz\u{1f4a9}".to_tendril();
assert!(t.try_pop_back(1).is_err());
assert!(t.try_pop_back(2).is_err());
assert!(t.try_pop_back(3).is_err());
assert!(t.try_pop_back(4).is_ok());
assert_eq!("zzzzzz", &*t);
}
#[test]
fn conversion() {
assert_eq!(&[0x66, 0x6F, 0x6F].to_tendril(), "foo".to_tendril().as_bytes());
assert_eq!([0x66, 0x6F, 0x6F].to_tendril(), "foo".to_tendril().into_bytes());
let ascii: Tendril<fmt::ASCII> = b"hello".to_tendril().try_reinterpret().unwrap();
assert_eq!(&"hello".to_tendril(), ascii.as_superset());
assert_eq!("hello".to_tendril(), ascii.clone().into_superset());
assert!(b"\xFF".to_tendril().try_reinterpret::<fmt::ASCII>().is_err());
let t = "hello".to_tendril();
let ascii: &Tendril<fmt::ASCII> = t.try_as_subset().unwrap();
assert_eq!(b"hello", &**ascii.as_bytes());
assert!("ő".to_tendril().try_reinterpret_view::<fmt::ASCII>().is_err());
assert!("ő".to_tendril().try_as_subset::<fmt::ASCII>().is_err());
let ascii: Tendril<fmt::ASCII> = "hello".to_tendril().try_into_subset().unwrap();
assert_eq!(b"hello", &**ascii.as_bytes());
assert!("ő".to_tendril().try_reinterpret::<fmt::ASCII>().is_err());
assert!("ő".to_tendril().try_into_subset::<fmt::ASCII>().is_err());
}
#[test]
fn clear() {
let mut t = "foo-".to_tendril();
t.clear();
assert_eq!(t.len(), 0);
assert_eq!(t.len32(), 0);
assert_eq!(&*t, "");
let mut t = "much longer".to_tendril();
let s = t.clone();
t.clear();
assert_eq!(t.len(), 0);
assert_eq!(t.len32(), 0);
assert_eq!(&*t, "");
assert_eq!(&*s, "much longer");
}
#[test]
fn push_tendril() {
let mut t = "abc".to_tendril();
t.push_tendril(&"xyz".to_tendril());
assert_eq!("abcxyz", &*t);
}
#[test]
fn wtf8() {
assert!(Tendril::<fmt::WTF8>::try_from_byte_slice(b"\xED\xA0\xBD").is_ok());
assert!(Tendril::<fmt::WTF8>::try_from_byte_slice(b"\xED\xB2\xA9").is_ok());
assert!(Tendril::<fmt::WTF8>::try_from_byte_slice(b"\xED\xA0\xBD\xED\xB2\xA9").is_err());
let t: Tendril<fmt::WTF8>
= Tendril::try_from_byte_slice(b"\xED\xA0\xBD\xEA\x99\xAE").unwrap();
assert!(b"\xED\xA0\xBD".to_tendril().try_reinterpret().unwrap()
== t.subtendril(0, 3));
assert!(b"\xEA\x99\xAE".to_tendril().try_reinterpret().unwrap()
== t.subtendril(3, 3));
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_err());
assert!(t.try_subtendril(0, 1).is_err());
assert!(t.try_subtendril(0, 2).is_err());
assert!(t.try_subtendril(1, 1).is_err());
assert!(t.try_subtendril(3, 1).is_err());
assert!(t.try_subtendril(3, 2).is_err());
assert!(t.try_subtendril(4, 1).is_err());
// paired surrogates
let mut t: Tendril<fmt::WTF8> = Tendril::try_from_byte_slice(b"\xED\xA0\xBD").unwrap();
assert!(t.try_push_bytes(b"\xED\xB2\xA9").is_ok());
assert_eq!(b"\xF0\x9F\x92\xA9", t.as_byte_slice());
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_ok());
// unpaired surrogates
let mut t: Tendril<fmt::WTF8> = Tendril::try_from_byte_slice(b"\xED\xA0\xBB").unwrap();
assert!(t.try_push_bytes(b"\xED\xA0").is_err());
assert!(t.try_push_bytes(b"\xED").is_err());
assert!(t.try_push_bytes(b"\xA0").is_err());
assert!(t.try_push_bytes(b"\xED\xA0\xBD").is_ok());
assert_eq!(b"\xED\xA0\xBB\xED\xA0\xBD", t.as_byte_slice());
assert!(t.try_push_bytes(b"\xED\xB2\xA9").is_ok());
assert_eq!(b"\xED\xA0\xBB\xF0\x9F\x92\xA9", t.as_byte_slice());
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_err());
}
#[test]
fn front_char() {
let mut t = "".to_tendril();
assert_eq!(None, t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = "abc".to_tendril();
assert_eq!(Some('a'), t.pop_front_char());
assert_eq!(Some('b'), t.pop_front_char());
assert_eq!(Some('c'), t.pop_front_char());
assert_eq!(None, t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = "főo-a-longer-string-bar-baz".to_tendril();
assert_eq!(28, t.len());
assert_eq!(Some('f'), t.pop_front_char());
assert_eq!(Some('ő'), t.pop_front_char());
assert_eq!(Some('o'), t.pop_front_char());
assert_eq!(Some('-'), t.pop_front_char());
assert_eq!(23, t.len());
}
#[test]
fn char_run() {
for &(s, exp) in &[
("", None),
(" ", Some((" ", true))),
("x", Some(("x", false))),
(" \t \n", Some((" \t \n", true))),
("xyzzy", Some(("xyzzy", false))),
(" xyzzy", Some((" ", true))),
("xyzzy ", Some(("xyzzy", false))),
(" xyzzy ", Some((" ", true))),
("xyzzy hi", Some(("xyzzy", false))),
("中 ", Some(("中", false))),
(" 中 ", Some((" ", true))),
(" 中 ", Some((" ", true))),
(" 中 ", Some((" ", true))),
] {
let mut t = s.to_tendril();
let res = t.pop_front_char_run(char::is_whitespace);
match exp {
None => assert!(res.is_none()),
Some((es, ec)) => {
let (rt, rc) = res.unwrap();
assert_eq!(es, &*rt);
assert_eq!(ec, rc);
}
}
}
}
#[test]
fn deref_mut() {
let mut t = "xyő".to_tendril().into_bytes();
t[3] = 0xff;
assert_eq!(b"xy\xC5\xFF", &*t);
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_err());
t[3] = 0x8b;
assert_eq!("xyŋ", &**t.try_reinterpret_view::<fmt::UTF8>().unwrap());
unsafe {
t.push_uninitialized(3);
t[4] = 0xEA;
t[5] = 0x99;
t[6] = 0xAE;
assert_eq!("xyŋ\u{a66e}", &**t.try_reinterpret_view::<fmt::UTF8>().unwrap());
t.push_uninitialized(20);
t.pop_back(20);
assert_eq!("xyŋ\u{a66e}", &**t.try_reinterpret_view::<fmt::UTF8>().unwrap());
}
}
#[test]
fn push_char() {
let mut t = "xyz".to_tendril();
t.push_char('o');
assert_eq!("xyzo", &*t);
t.push_char('ő');
assert_eq!("xyzoő", &*t);
t.push_char('\u{a66e}');
assert_eq!("xyzoő\u{a66e}", &*t);
t.push_char('\u{1f4a9}');
assert_eq!("xyzoő\u{a66e}\u{1f4a9}", &*t);
assert_eq!(t.len(), 13);
}
#[test]
fn encode() {
use encoding::{all, EncoderTrap};
let t = "안녕하세요 러스트".to_tendril();
assert_eq!(b"\xbe\xc8\xb3\xe7\xc7\xcf\xbc\xbc\xbf\xe4\x20\xb7\xaf\xbd\xba\xc6\xae",
&*t.encode(all::WINDOWS_949, EncoderTrap::Strict).unwrap());
let t = "Энергия пробуждения ия-я-я! \u{a66e}".to_tendril();
assert_eq!(b"\xfc\xce\xc5\xd2\xc7\xc9\xd1 \xd0\xd2\xcf\xc2\xd5\xd6\xc4\xc5\xce\
\xc9\xd1 \xc9\xd1\x2d\xd1\x2d\xd1\x21 ?",
&*t.encode(all::KOI8_U, EncoderTrap::Replace).unwrap());
let t = "\u{1f4a9}".to_tendril();
assert!(t.encode(all::WINDOWS_1252, EncoderTrap::Strict).is_err());
}
#[test]
fn decode() {
use encoding::{all, DecoderTrap};
let t = b"\xbe\xc8\xb3\xe7\xc7\xcf\xbc\xbc\
\xbf\xe4\x20\xb7\xaf\xbd\xba\xc6\xae".to_tendril();
assert_eq!("안녕하세요 러스트",
&*t.decode(all::WINDOWS_949, DecoderTrap::Strict).unwrap());
let t = b"\xfc\xce\xc5\xd2\xc7\xc9\xd1 \xd0\xd2\xcf\xc2\xd5\xd6\xc4\xc5\xce\
\xc9\xd1 \xc9\xd1\x2d\xd1\x2d\xd1\x21".to_tendril();
assert_eq!("Энергия пробуждения ия-я-я!",
&*t.decode(all::KOI8_U, DecoderTrap::Replace).unwrap());
let t = b"x \xff y".to_tendril();
assert!(t.decode(all::UTF_8, DecoderTrap::Strict).is_err());
let t = b"x \xff y".to_tendril();
assert_eq!("x \u{fffd} y",
&*t.decode(all::UTF_8, DecoderTrap::Replace).unwrap());
}
#[test]
fn ascii() {
fn mk(x: &[u8]) -> Tendril<fmt::ASCII> {
x.to_tendril().try_reinterpret().unwrap()
}
let mut t = mk(b"xyz");
assert_eq!(Some('x'), t.pop_front_char());
assert_eq!(Some('y'), t.pop_front_char());
assert_eq!(Some('z'), t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = mk(b" \t xyz");
assert!(Some((mk(b" \t "), true))
== t.pop_front_char_run(char::is_whitespace));
assert!(Some((mk(b"xyz"), false))
== t.pop_front_char_run(char::is_whitespace));
assert!(t.pop_front_char_run(char::is_whitespace).is_none());
let mut t = Tendril::<fmt::ASCII>::new();
assert!(t.try_push_char('x').is_ok());
assert!(t.try_push_char('\0').is_ok());
assert!(t.try_push_char('\u{a0}').is_err());
assert_eq!(b"x\0", t.as_byte_slice());
}
#[test]
fn latin1() {
fn mk(x: &[u8]) -> Tendril<fmt::Latin1> {
x.to_tendril().try_reinterpret().unwrap()
}
let mut t = mk(b"\xd8_\xd8");
assert_eq!(Some('Ø'), t.pop_front_char());
assert_eq!(Some('_'), t.pop_front_char());
assert_eq!(Some('Ø'), t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = mk(b" \t \xfe\xa7z");
assert!(Some((mk(b" \t "), true))
== t.pop_front_char_run(char::is_whitespace));
assert!(Some((mk(b"\xfe\xa7z"), false))
== t.pop_front_char_run(char::is_whitespace));
assert!(t.pop_front_char_run(char::is_whitespace).is_none());
let mut t = Tendril::<fmt::Latin1>::new();
assert!(t.try_push_char('x').is_ok());
assert!(t.try_push_char('\0').is_ok());
assert!(t.try_push_char('\u{a0}').is_ok());
assert!(t.try_push_char('ő').is_err());
assert!(t.try_push_char('я').is_err());
assert!(t.try_push_char('\u{a66e}').is_err());
assert!(t.try_push_char('\u{1f4a9}').is_err());
assert_eq!(b"x\0\xa0", t.as_byte_slice());
}
#[test]
fn format() {
assert_eq!("", &*format_tendril!(""));
assert_eq!("two and two make 4", &*format_tendril!("two and two make {}", 2+2));
}
#[test]
fn merge_shared() {
let t = "012345678901234567890123456789".to_tendril();
let a = t.subtendril(10, 20);
assert!(a.is_shared());
assert_eq!("01234567890123456789", &*a);
let mut b = t.subtendril(0, 10);
assert!(b.is_shared());
assert_eq!("0123456789", &*b);
b.push_tendril(&a);
assert!(b.is_shared());
assert!(a.is_shared());
assert!(a.is_shared_with(&b));
assert!(b.is_shared_with(&a));
assert_eq!("012345678901234567890123456789", &*b);
assert!(t.is_shared());
assert!(t.is_shared_with(&a));
assert!(t.is_shared_with(&b));
}
#[test]
fn merge_cant_share() {
let t = "012345678901234567890123456789".to_tendril();
let mut b = t.subtendril(0, 10);
assert!(b.is_shared());
assert_eq!("0123456789", &*b);
b.push_tendril(&"abcd".to_tendril());
assert!(!b.is_shared());
assert_eq!("0123456789abcd", &*b);
}
#[test]
fn shared_doesnt_reserve() {
let mut t = "012345678901234567890123456789".to_tendril();
let a = t.subtendril(1, 10);
assert!(t.is_shared());
t.reserve(10);
assert!(t.is_shared());
let _ = a;
}
#[test]
fn out_of_bounds() {
assert!("".to_tendril().try_subtendril(0, 1).is_err());
assert!("abc".to_tendril().try_subtendril(0, 4).is_err());
assert!("abc".to_tendril().try_subtendril(3, 1).is_err());
assert!("abc".to_tendril().try_subtendril(7, 1).is_err());
let mut t = "".to_tendril();
assert!(t.try_pop_front(1).is_err());
assert!(t.try_pop_front(5).is_err());
assert!(t.try_pop_front(500).is_err());
assert!(t.try_pop_back(1).is_err());
assert!(t.try_pop_back(5).is_err());
assert!(t.try_pop_back(500).is_err());
let mut t = "abcd".to_tendril();
assert!(t.try_pop_front(1).is_ok());
assert!(t.try_pop_front(4).is_err());
assert!(t.try_pop_front(500).is_err());
assert!(t.try_pop_back(1).is_ok());
assert!(t.try_pop_back(3).is_err());
assert!(t.try_pop_back(500).is_err());
}
#[test]
fn compare() {
for &a in &["indiscretions", "validity", "hallucinogenics", "timelessness",
"original", "microcosms", "boilers", "mammoth"] {
for &b in &["intrepidly", "frigid", "spa", "cardigans",
"guileful", "evaporated", "unenthusiastic", "legitimate"] {
let ta = a.to_tendril();
let tb = b.to_tendril();
assert_eq!(a.eq(b), ta.eq(&tb));
assert_eq!(a.ne(b), ta.ne(&tb));
assert_eq!(a.lt(b), ta.lt(&tb));
assert_eq!(a.le(b), ta.le(&tb));
assert_eq!(a.gt(b), ta.gt(&tb));
assert_eq!(a.ge(b), ta.ge(&tb));
assert_eq!(a.partial_cmp(b), ta.partial_cmp(&tb));
assert_eq!(a.cmp(b), ta.cmp(&tb));
}
}
}
#[test]
fn extend() {
let mut t = "Hello".to_tendril();
t.extend(None.into_iter());
assert_eq!("Hello", &*t);
t.extend(&[", ".to_tendril(), "world".to_tendril(), "!".to_tendril()]);
assert_eq!("Hello, world!", &*t);
}
#[test]
fn from_str() {
use std::str::FromStr;
let t: Tendril<_> = FromStr::from_str("foo bar baz").unwrap();
assert_eq!("foo bar baz", &*t);
}
}
Add from_char
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{raw, ptr, mem, intrinsics, hash, str, u32, io, slice, cmp};
use std::borrow::Cow;
use std::marker::PhantomData;
use std::cell::Cell;
use std::ops::{Deref, DerefMut};
use std::iter::IntoIterator;
use std::default::Default;
use std::cmp::Ordering;
use std::fmt as strfmt;
use core::nonzero::NonZero;
use encoding::{self, EncodingRef, DecoderTrap, EncoderTrap};
use buf32::{self, Buf32};
use fmt::{self, Slice};
use fmt::imp::Fixup;
use util::{unsafe_slice, copy_and_advance};
use OFLOW;
const MAX_INLINE_LEN: usize = 8;
const MAX_INLINE_TAG: usize = 0xF;
const EMPTY_TAG: usize = 0xF;
#[inline(always)]
fn inline_tag(len: u32) -> NonZero<usize> {
debug_assert!(len <= MAX_INLINE_LEN as u32);
unsafe {
NonZero::new(if len == 0 {
EMPTY_TAG
} else {
len as usize
})
}
}
#[repr(packed)]
struct Header {
refcount: Cell<usize>,
cap: u32,
}
impl Header {
#[inline(always)]
unsafe fn new() -> Header {
Header {
refcount: Cell::new(1),
cap: mem::uninitialized(),
}
}
}
/// Errors that can occur when slicing a `Tendril`.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq)]
pub enum SubtendrilError {
OutOfBounds,
ValidationFailed,
}
/// Compact string type for zero-copy parsing.
///
/// `Tendril`s have the semantics of owned strings, but are sometimes views
/// into shared buffers. When you mutate a `Tendril`, an owned copy is made
/// if necessary. Further mutations occur in-place until the string becomes
/// shared, e.g. with `clone()` or `subtendril()`.
///
/// Buffer sharing is accomplished through thread-local (non-atomic) reference
/// counting, which has very low overhead. The Rust type system will prevent
/// you at compile time from sending a `Tendril` between threads. We plan to
/// relax this restriction in the future; see `README.md`.
///
/// Whereas `String` allocates in the heap for any non-empty string, `Tendril`
/// can store small strings (up to 8 bytes) in-line, without a heap allocation.
/// `Tendril` is also smaller than `String` on 64-bit platforms — 16 bytes
/// versus 24.
///
/// The type parameter `F` specifies the format of the tendril, for example
/// UTF-8 text or uninterpreted bytes. The parameter will be instantiated
/// with one of the marker types from `tendril::fmt`. See the `StrTendril`
/// and `ByteTendril` type aliases for two examples.
///
/// The maximum length of a `Tendril` is 4 GB. The library will panic if
/// you attempt to go over the limit.
#[unsafe_no_drop_flag]
#[repr(packed)]
pub struct Tendril<F>
where F: fmt::Format,
{
ptr: Cell<NonZero<usize>>,
len: u32,
aux: Cell<u32>,
marker: PhantomData<*mut F>,
}
/// `Tendril` for storing native Rust strings.
pub type StrTendril = Tendril<fmt::UTF8>;
/// `Tendril` for storing binary data.
pub type ByteTendril = Tendril<fmt::Bytes>;
impl<F> Clone for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn clone(&self) -> Tendril<F> {
unsafe {
if *self.ptr.get() > MAX_INLINE_TAG {
self.make_buf_shared();
self.incref();
}
ptr::read(self)
}
}
}
#[unsafe_destructor]
impl<F> Drop for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn drop(&mut self) {
unsafe {
let p = *self.ptr.get();
if p <= MAX_INLINE_TAG || p == mem::POST_DROP_USIZE {
return;
}
let (buf, shared, _) = self.assume_buf();
if shared {
let header = self.header();
let refcount = (*header).refcount.get() - 1;
if refcount == 0 {
buf.destroy();
} else {
(*header).refcount.set(refcount);
}
} else {
buf.destroy();
}
}
}
}
// impl FromIterator<char> for Tendril<fmt::UTF8> { }
// impl FromIterator<u8> for Tendril<fmt::Bytes> { }
impl<F> Deref for Tendril<F>
where F: fmt::SliceFormat,
{
type Target = F::Slice;
#[inline]
fn deref(&self) -> &F::Slice {
unsafe {
F::Slice::from_bytes(self.as_byte_slice())
}
}
}
impl<'a, F> Extend<&'a Tendril<F>> for Tendril<F>
where F: fmt::Format + 'a,
{
#[inline]
fn extend<I>(&mut self, iterable: I)
where I: IntoIterator<Item = &'a Tendril<F>>,
{
let iterator = iterable.into_iter();
for t in iterator {
self.push_tendril(t);
}
}
}
impl<F> PartialEq for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_byte_slice() == other.as_byte_slice()
}
#[inline]
fn ne(&self, other: &Self) -> bool {
self.as_byte_slice() != other.as_byte_slice()
}
}
impl<F> Eq for Tendril<F>
where F: fmt::Format,
{ }
impl<F> PartialOrd for Tendril<F>
where F: fmt::SliceFormat,
<F as fmt::SliceFormat>::Slice: PartialOrd,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
impl<F> Ord for Tendril<F>
where F: fmt::SliceFormat,
<F as fmt::SliceFormat>::Slice: Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
impl<F> Default for Tendril<F>
where F: fmt::Format,
{
#[inline(always)]
fn default() -> Tendril<F> {
Tendril::new()
}
}
impl<F> strfmt::Debug for Tendril<F>
where F: fmt::SliceFormat + Default + strfmt::Debug,
<F as fmt::SliceFormat>::Slice: strfmt::Debug,
{
#[inline]
fn fmt(&self, f: &mut strfmt::Formatter) -> strfmt::Result {
let kind = match *self.ptr.get() {
p if p <= MAX_INLINE_TAG => "inline",
p if p & 1 == 1 => "shared",
_ => "owned",
};
try!(write!(f, "Tendril<{:?}>({}: ", <F as Default>::default(), kind));
try!(<<F as fmt::SliceFormat>::Slice as strfmt::Debug>::fmt(&**self, f));
write!(f, ")")
}
}
impl<F> hash::Hash for Tendril<F>
where F: fmt::Format,
{
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
self.as_byte_slice().hash(hasher)
}
}
impl<F> Tendril<F>
where F: fmt::Format,
{
/// Create a new, empty `Tendril` in any format.
#[inline(always)]
pub fn new() -> Tendril<F> {
unsafe {
Tendril::inline(&[])
}
}
/// Create a new, empty `Tendril` with a specified capacity.
#[inline]
pub fn with_capacity(capacity: u32) -> Tendril<F> {
let mut t: Tendril<F> = Tendril::new();
if capacity > MAX_INLINE_LEN as u32 {
unsafe {
t.make_owned_with_capacity(capacity);
}
}
t
}
/// Reserve space for additional bytes.
///
/// This is only a suggestion. There are cases where `Tendril` will
/// decline to allocate until the buffer is actually modified.
#[inline]
pub fn reserve(&mut self, additional: u32) {
if self.is_shared() {
// Don't grow a shared tendril because we'd have to copy
// right away.
return;
}
let new_len = self.len32().checked_add(additional).expect(OFLOW);
if new_len > MAX_INLINE_LEN as u32 {
unsafe {
self.make_owned_with_capacity(new_len);
}
}
}
/// Get the length of the `Tendril`.
///
/// This is named not to conflict with `len()` on the underlying
/// slice, if any.
#[inline(always)]
pub fn len32(&self) -> u32 {
match *self.ptr.get() {
EMPTY_TAG => 0,
n if n <= MAX_INLINE_LEN => n as u32,
_ => self.len,
}
}
/// Is the backing buffer shared?
#[inline]
pub fn is_shared(&self) -> bool {
let n = *self.ptr.get();
(n > MAX_INLINE_TAG) && ((n & 1) == 1)
}
/// Is the backing buffer shared with this other `Tendril`?
#[inline]
pub fn is_shared_with(&self, other: &Tendril<F>) -> bool {
let n = *self.ptr.get();
(n > MAX_INLINE_TAG) && (n == *other.ptr.get())
}
/// Truncate to length 0 without discarding any owned storage.
#[inline]
pub fn clear(&mut self) {
if *self.ptr.get() <= MAX_INLINE_TAG {
self.ptr.set(unsafe { NonZero::new(EMPTY_TAG) });
} else {
let (_, shared, _) = unsafe { self.assume_buf() };
if shared {
// No need to keep a reference alive for a 0-size slice.
*self = Tendril::new();
} else {
self.len = 0;
}
}
}
/// Build a `Tendril` by copying a byte slice, if it conforms to the format.
#[inline]
pub fn try_from_byte_slice(x: &[u8]) -> Result<Tendril<F>, ()> {
match F::validate(x) {
true => Ok(unsafe { Tendril::from_byte_slice_without_validating(x) }),
false => Err(()),
}
}
/// View as uninterpreted bytes.
#[inline(always)]
pub fn as_bytes(&self) -> &Tendril<fmt::Bytes> {
unsafe { mem::transmute(self) }
}
/// Convert into uninterpreted bytes.
#[inline(always)]
pub fn into_bytes(self) -> Tendril<fmt::Bytes> {
unsafe { mem::transmute(self) }
}
/// View as a superset format, for free.
#[inline(always)]
pub fn as_superset<Super>(&self) -> &Tendril<Super>
where F: fmt::SubsetOf<Super>,
Super: fmt::Format,
{
unsafe { mem::transmute(self) }
}
/// Convert into a superset format, for free.
#[inline(always)]
pub fn into_superset<Super>(self) -> Tendril<Super>
where F: fmt::SubsetOf<Super>,
Super: fmt::Format,
{
unsafe { mem::transmute(self) }
}
/// View as a subset format, if the `Tendril` conforms to that subset.
#[inline]
pub fn try_as_subset<Sub>(&self) -> Result<&Tendril<Sub>, ()>
where Sub: fmt::SubsetOf<F>,
{
match Sub::revalidate_subset(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(()),
}
}
/// Convert into a subset format, if the `Tendril` conforms to that subset.
#[inline]
pub fn try_into_subset<Sub>(self) -> Result<Tendril<Sub>, Self>
where Sub: fmt::SubsetOf<F>,
{
match Sub::revalidate_subset(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(self),
}
}
/// View as another format, if the bytes of the `Tendril` are valid for
/// that format.
#[inline]
pub fn try_reinterpret_view<Other>(&self) -> Result<&Tendril<Other>, ()>
where Other: fmt::Format,
{
match Other::validate(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(()),
}
}
/// Convert into another format, if the `Tendril` conforms to that format.
///
/// This only re-validates the existing bytes under the new format. It
/// will *not* change the byte content of the tendril!
///
/// See the `encode` and `decode` methods for character encoding conversion.
#[inline]
pub fn try_reinterpret<Other>(self) -> Result<Tendril<Other>, Self>
where Other: fmt::Format,
{
match Other::validate(self.as_byte_slice()) {
true => Ok(unsafe { mem::transmute(self) }),
false => Err(self),
}
}
/// Push some bytes onto the end of the `Tendril`, if they conform to the
/// format.
#[inline]
pub fn try_push_bytes(&mut self, buf: &[u8]) -> Result<(), ()> {
match F::validate(buf) {
true => unsafe {
self.push_bytes_without_validating(buf);
Ok(())
},
false => Err(()),
}
}
/// Push another `Tendril` onto the end of this one.
#[inline]
pub fn push_tendril(&mut self, other: &Tendril<F>) {
let new_len = self.len32().checked_add(other.len32()).expect(OFLOW);
unsafe {
if (*self.ptr.get() > MAX_INLINE_TAG) && (*other.ptr.get() > MAX_INLINE_TAG) {
let (self_buf, self_shared, _) = self.assume_buf();
let (other_buf, other_shared, _) = other.assume_buf();
if self_shared && other_shared
&& (self_buf.data_ptr() == other_buf.data_ptr())
&& (other.aux.get() == self.aux.get() + self.len)
{
self.len = new_len;
return;
}
}
self.push_bytes_without_validating(other.as_byte_slice())
}
}
/// Attempt to slice this `Tendril` as a new `Tendril`.
///
/// This will share the buffer when possible. Mutating a shared buffer
/// will copy the contents.
///
/// The offset and length are in bytes. The function will return
/// `Err` if these are out of bounds, or if the resulting slice
/// does not conform to the format.
#[inline]
pub fn try_subtendril(&self, offset: u32, length: u32)
-> Result<Tendril<F>, SubtendrilError>
{
let self_len = self.len32();
if offset > self_len || length > (self_len - offset) {
return Err(SubtendrilError::OutOfBounds);
}
unsafe {
let byte_slice = unsafe_slice(self.as_byte_slice(),
offset as usize, length as usize);
if !F::validate_subseq(byte_slice) {
return Err(SubtendrilError::ValidationFailed);
}
Ok(self.unsafe_subtendril(offset, length))
}
}
/// Slice this `Tendril` as a new `Tendril`.
///
/// Panics on bounds or validity check failure.
#[inline]
pub fn subtendril(&self, offset: u32, length: u32) -> Tendril<F> {
self.try_subtendril(offset, length).unwrap()
}
/// Try to drop `n` bytes from the front.
///
/// Returns `Err` if the bytes are not available, or the suffix fails
/// validation.
#[inline]
pub fn try_pop_front(&mut self, n: u32) -> Result<(), SubtendrilError> {
let old_len = self.len32();
if n > old_len {
return Err(SubtendrilError::OutOfBounds);
}
let new_len = old_len - n;
unsafe {
if !F::validate_suffix(unsafe_slice(self.as_byte_slice(),
n as usize, new_len as usize)) {
return Err(SubtendrilError::ValidationFailed);
}
self.unsafe_pop_front(n);
Ok(())
}
}
/// Drop `n` bytes from the front.
///
/// Panics if the bytes are not available, or the suffix fails
/// validation.
#[inline]
pub fn pop_front(&mut self, n: u32) {
self.try_pop_front(n).unwrap()
}
/// Drop `n` bytes from the back.
///
/// Returns `Err` if the bytes are not available, or the prefix fails
/// validation.
#[inline]
pub fn try_pop_back(&mut self, n: u32) -> Result<(), SubtendrilError> {
let old_len = self.len32();
if n > old_len {
return Err(SubtendrilError::OutOfBounds);
}
let new_len = old_len - n;
unsafe {
if !F::validate_prefix(unsafe_slice(self.as_byte_slice(),
0, new_len as usize)) {
return Err(SubtendrilError::ValidationFailed);
}
self.unsafe_pop_back(n);
Ok(())
}
}
/// Drop `n` bytes from the back.
///
/// Panics if the bytes are not available, or the prefix fails
/// validation.
#[inline]
pub fn pop_back(&mut self, n: u32) {
self.try_pop_back(n).unwrap()
}
/// View as another format, without validating.
#[inline(always)]
pub unsafe fn reinterpret_view_without_validating<Other>(&self) -> &Tendril<Other>
where Other: fmt::Format,
{
mem::transmute(self)
}
/// Convert into another format, without validating.
#[inline(always)]
pub unsafe fn reinterpret_without_validating<Other>(self) -> Tendril<Other>
where Other: fmt::Format,
{
mem::transmute(self)
}
/// Build a `Tendril` by copying a byte slice, without validating.
#[inline]
pub unsafe fn from_byte_slice_without_validating(x: &[u8]) -> Tendril<F> {
assert!(x.len() <= buf32::MAX_LEN);
if x.len() <= MAX_INLINE_LEN {
Tendril::inline(x)
} else {
Tendril::owned_copy(x)
}
}
/// Push some bytes onto the end of the `Tendril`, without validating.
#[inline]
pub unsafe fn push_bytes_without_validating(&mut self, buf: &[u8]) {
assert!(buf.len() <= buf32::MAX_LEN);
let Fixup { drop_left, drop_right, insert_len, insert_bytes }
= F::fixup(self.as_byte_slice(), buf);
// FIXME: think more about overflow
let adj_len = self.len32() + insert_len - drop_left;
let new_len = adj_len.checked_add(buf.len() as u32).expect(OFLOW)
- drop_right;
let drop_left = drop_left as usize;
let drop_right = drop_right as usize;
if new_len <= MAX_INLINE_LEN as u32 {
let mut tmp: [u8; MAX_INLINE_LEN] = mem::uninitialized();
{
let old = self.as_byte_slice();
let mut dest = tmp.as_mut_ptr();
copy_and_advance(&mut dest, unsafe_slice(old, 0, old.len() - drop_left));
copy_and_advance(&mut dest, unsafe_slice(&insert_bytes, 0, insert_len as usize));
copy_and_advance(&mut dest, unsafe_slice(buf, drop_right, buf.len() - drop_right));
}
*self = Tendril::inline(&tmp[..new_len as usize]);
} else {
self.make_owned_with_capacity(new_len);
let (owned, _, _) = self.assume_buf();
let mut dest = owned.data_ptr().offset((owned.len as usize - drop_left) as isize);
copy_and_advance(&mut dest, unsafe_slice(&insert_bytes, 0, insert_len as usize));
copy_and_advance(&mut dest, unsafe_slice(buf, drop_right, buf.len() - drop_right));
self.len = new_len;
}
}
/// Slice this `Tendril` as a new `Tendril`.
///
/// Does not check validity or bounds!
#[inline]
pub unsafe fn unsafe_subtendril(&self, offset: u32, length: u32) -> Tendril<F> {
if length <= MAX_INLINE_LEN as u32 {
Tendril::inline(unsafe_slice(self.as_byte_slice(),
offset as usize, length as usize))
} else {
self.make_buf_shared();
self.incref();
let (buf, _, _) = self.assume_buf();
Tendril::shared(buf, self.aux.get() + offset, length)
}
}
/// Drop `n` bytes from the front.
///
/// Does not check validity or bounds!
#[inline]
pub unsafe fn unsafe_pop_front(&mut self, n: u32) {
let new_len = self.len32() - n;
if new_len <= MAX_INLINE_LEN as u32 {
*self = Tendril::inline(unsafe_slice(self.as_byte_slice(),
n as usize, new_len as usize));
} else {
self.make_buf_shared();
self.aux.set(self.aux.get() + n);
self.len -= n;
}
}
/// Drop `n` bytes from the back.
///
/// Does not check validity or bounds!
#[inline]
pub unsafe fn unsafe_pop_back(&mut self, n: u32) {
let new_len = self.len32() - n;
if new_len <= MAX_INLINE_LEN as u32 {
*self = Tendril::inline(unsafe_slice(self.as_byte_slice(),
0, new_len as usize));
} else {
self.make_buf_shared();
self.len -= n;
}
}
#[inline]
fn as_byte_slice<'a>(&'a self) -> &'a [u8] {
unsafe {
match *self.ptr.get() {
EMPTY_TAG => mem::transmute(raw::Slice {
data: ptr::null::<u8>(),
len: 0,
}),
n if n <= MAX_INLINE_LEN => mem::transmute(raw::Slice {
data: &self.len as *const u32 as *const u8,
len: n,
}),
_ => {
let (buf, _, offset) = self.assume_buf();
mem::copy_lifetime(self, unsafe_slice(buf.data(),
offset as usize, self.len32() as usize))
}
}
}
}
#[inline]
unsafe fn incref(&self) {
let header = self.header();
let refcount = (*header).refcount.get().checked_add(1).expect(OFLOW);
(*header).refcount.set(refcount);
}
#[inline]
unsafe fn make_buf_shared(&self) {
let p = *self.ptr.get();
if p & 1 == 0 {
let header = p as *mut Header;
(*header).cap = self.aux.get();
self.ptr.set(NonZero::new(p | 1));
self.aux.set(0);
}
}
#[inline]
unsafe fn make_owned_with_capacity(&mut self, cap: u32) {
let ptr = *self.ptr.get();
if ptr <= MAX_INLINE_TAG || (ptr & 1) == 1 {
*self = Tendril::owned_copy(self.as_byte_slice());
}
let mut buf = self.assume_buf().0;
buf.grow(cap);
self.ptr.set(NonZero::new(buf.ptr as usize));
self.aux.set(buf.cap);
}
#[inline(always)]
unsafe fn header(&self) -> *mut Header {
(*self.ptr.get() & !1) as *mut Header
}
#[inline]
unsafe fn assume_buf(&self) -> (Buf32<Header>, bool, u32) {
let ptr = self.ptr.get();
let header = self.header();
let shared = (*ptr & 1) == 1;
let (cap, offset) = match shared {
true => ((*header).cap, self.aux.get()),
false => (self.aux.get(), 0),
};
(Buf32 {
ptr: header,
len: offset + self.len32(),
cap: cap,
}, shared, offset)
}
#[inline]
unsafe fn inline(x: &[u8]) -> Tendril<F> {
let len = x.len();
let mut t = Tendril {
ptr: Cell::new(inline_tag(len as u32)),
len: mem::uninitialized(),
aux: mem::uninitialized(),
marker: PhantomData,
};
intrinsics::copy_nonoverlapping(x.as_ptr(), &mut t.len as *mut u32 as *mut u8, len);
t
}
#[inline]
unsafe fn owned(x: Buf32<Header>) -> Tendril<F> {
Tendril {
ptr: Cell::new(NonZero::new(x.ptr as usize)),
len: x.len,
aux: Cell::new(x.cap),
marker: PhantomData,
}
}
#[inline]
unsafe fn owned_copy(x: &[u8]) -> Tendril<F> {
let len32 = x.len() as u32;
let mut b = Buf32::with_capacity(len32, Header::new());
intrinsics::copy_nonoverlapping(x.as_ptr(), b.data_ptr(), x.len());
b.len = len32;
Tendril::owned(b)
}
#[inline]
unsafe fn shared(buf: Buf32<Header>, off: u32, len: u32) -> Tendril<F> {
Tendril {
ptr: Cell::new(NonZero::new((buf.ptr as usize) | 1)),
len: len,
aux: Cell::new(off),
marker: PhantomData,
}
}
}
impl<F> Tendril<F>
where F: fmt::SliceFormat,
{
/// Build a `Tendril` by copying a slice.
#[inline]
pub fn from_slice(x: &F::Slice) -> Tendril<F> {
unsafe {
Tendril::from_byte_slice_without_validating(x.as_bytes())
}
}
/// Push a slice onto the end of the `Tendril`.
#[inline]
pub fn push_slice(&mut self, x: &F::Slice) {
unsafe {
self.push_bytes_without_validating(x.as_bytes())
}
}
}
/// `Tendril`-related methods for Rust slices.
pub trait SliceExt: fmt::Slice {
/// Make a `Tendril` from this slice.
#[inline]
fn to_tendril(&self) -> Tendril<Self::Format> {
Tendril::from_slice(self)
}
}
impl SliceExt for str { }
impl SliceExt for [u8] { }
impl<F> Tendril<F>
where F: for<'a> fmt::CharFormat<'a>,
{
/// Remove and return the first character, if any.
#[inline]
pub fn pop_front_char<'a>(&'a mut self) -> Option<char> {
unsafe {
let mut it = F::char_indices(self.as_byte_slice());
it.next().map(|(_, c)| {
if let Some((n, _)) = it.next() {
self.unsafe_pop_front(n as u32);
} else {
self.clear();
}
c
})
}
}
/// Remove and return a run of characters at the front of the `Tendril`
/// which are classified the same according to the function `classify`.
///
/// Returns `None` on an empty string.
#[inline]
pub fn pop_front_char_run<'a, C, R>(&'a mut self, mut classify: C)
-> Option<(Tendril<F>, R)>
where C: FnMut(char) -> R,
R: PartialEq,
{
let (class, first_mismatch);
{
let mut chars = unsafe {
F::char_indices(self.as_byte_slice())
};
let (_, first) = unwrap_or_return!(chars.next(), None);
class = classify(first);
first_mismatch = chars.find(|&(_, ch)| &classify(ch) != &class);
}
match first_mismatch {
Some((idx, _)) => unsafe {
let t = self.unsafe_subtendril(0, idx as u32);
self.unsafe_pop_front(idx as u32);
Some((t, class))
},
None => {
let t = self.clone();
self.clear();
Some((t, class))
}
}
}
/// Push a character, if it can be represented in this format.
#[inline]
pub fn try_push_char(&mut self, c: char) -> Result<(), ()> {
F::encode_char(c, |b| unsafe {
self.push_bytes_without_validating(b);
})
}
}
impl DerefMut for Tendril<fmt::Bytes> {
#[inline]
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
mem::transmute(self.as_byte_slice())
}
}
}
impl io::Write for Tendril<fmt::Bytes> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.push_slice(buf);
Ok(buf.len())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.push_slice(buf);
Ok(())
}
#[inline(always)]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl encoding::ByteWriter for Tendril<fmt::Bytes> {
#[inline]
fn write_byte(&mut self, b: u8) {
self.push_slice(slice::ref_slice(&b));
}
#[inline]
fn write_bytes(&mut self, v: &[u8]) {
self.push_slice(v);
}
#[inline]
fn writer_hint(&mut self, additional: usize) {
self.reserve(cmp::min(u32::MAX as usize, additional) as u32);
}
}
impl Tendril<fmt::Bytes> {
/// Decode from some character encoding into UTF-8.
///
/// See the [rust-encoding docs](https://lifthrasiir.github.io/rust-encoding/encoding/)
/// for more information.
#[inline]
pub fn decode(&self, encoding: EncodingRef, trap: DecoderTrap)
-> Result<Tendril<fmt::UTF8>, Cow<'static, str>>
{
let mut ret = Tendril::new();
encoding.decode_to(&*self, trap, &mut ret).map(|_| ret)
}
/// Push "uninitialized bytes" onto the end.
///
/// Really, this grows the tendril without writing anything to the new area.
/// It's only defined for byte tendrils because it's only useful if you
/// plan to then mutate the buffer.
#[inline]
pub unsafe fn push_uninitialized(&mut self, n: u32) {
let new_len = self.len32().checked_add(n).expect(OFLOW);
if new_len <= MAX_INLINE_LEN as u32
&& *self.ptr.get() <= MAX_INLINE_TAG
{
self.ptr.set(inline_tag(new_len))
} else {
self.make_owned_with_capacity(new_len);
self.len = new_len;
}
}
}
impl strfmt::Display for Tendril<fmt::UTF8> {
#[inline]
fn fmt(&self, f: &mut strfmt::Formatter) -> strfmt::Result {
<str as strfmt::Display>::fmt(&**self, f)
}
}
impl str::FromStr for Tendril<fmt::UTF8> {
type Err = ();
#[inline]
fn from_str(s: &str) -> Result<Self, ()> {
Ok(Tendril::from_slice(s))
}
}
impl strfmt::Write for Tendril<fmt::UTF8> {
#[inline]
fn write_str(&mut self, s: &str) -> strfmt::Result {
self.push_slice(s);
Ok(())
}
}
impl encoding::StringWriter for Tendril<fmt::UTF8> {
#[inline]
fn write_char(&mut self, c: char) {
self.push_char(c);
}
#[inline]
fn write_str(&mut self, s: &str) {
self.push_slice(s);
}
#[inline]
fn writer_hint(&mut self, additional: usize) {
self.reserve(cmp::min(u32::MAX as usize, additional) as u32);
}
}
impl Tendril<fmt::UTF8> {
/// Encode from UTF-8 into some other character encoding.
///
/// See the [rust-encoding docs](https://lifthrasiir.github.io/rust-encoding/encoding/)
/// for more information.
#[inline]
pub fn encode(&self, encoding: EncodingRef, trap: EncoderTrap)
-> Result<Tendril<fmt::Bytes>, Cow<'static, str>>
{
let mut ret = Tendril::new();
encoding.encode_to(&*self, trap, &mut ret).map(|_| ret)
}
/// Push a character onto the end.
#[inline]
pub fn push_char(&mut self, c: char) {
unsafe {
let mut buf: [u8; 4] = mem::uninitialized();
let n = c.encode_utf8(&mut buf).expect("Tendril::push_char: internal error");
self.push_bytes_without_validating(unsafe_slice(&buf, 0, n));
}
}
/// Create a `Tendril` from a single character.
#[inline]
pub fn from_char(c: char) -> Tendril<fmt::UTF8> {
let mut t: Tendril<fmt::UTF8> = Tendril::new();
t.push_char(c);
t
}
/// Helper for the `format_tendril!` macro.
#[inline]
pub fn format(args: strfmt::Arguments) -> Tendril<fmt::UTF8> {
use std::fmt::Write;
let mut output: Tendril<fmt::UTF8> = Tendril::new();
let _ = write!(&mut output, "{}", args);
output
}
}
/// Create a `StrTendril` through string formatting.
///
/// Works just like the standard `format!` macro.
#[macro_export]
macro_rules! format_tendril {
($($arg:tt)*) => ($crate::Tendril::format(format_args!($($arg)*)))
}
#[cfg(test)]
#[path="bench.rs"]
mod bench;
#[cfg(test)]
mod test {
use super::{Tendril, ByteTendril, StrTendril, SliceExt, Header};
use fmt;
#[test]
fn smoke_test() {
assert_eq!("", &*"".to_tendril());
assert_eq!("abc", &*"abc".to_tendril());
assert_eq!("Hello, world!", &*"Hello, world!".to_tendril());
assert_eq!(b"", &*b"".to_tendril());
assert_eq!(b"abc", &*b"abc".to_tendril());
assert_eq!(b"Hello, world!", &*b"Hello, world!".to_tendril());
}
#[test]
fn assert_sizes() {
use std::mem;
let correct = mem::size_of::<*const ()>() + 8;
assert_eq!(correct, mem::size_of::<ByteTendril>());
assert_eq!(correct, mem::size_of::<StrTendril>());
// Check that the NonZero<T> optimization is working.
assert_eq!(correct, mem::size_of::<Option<ByteTendril>>());
assert_eq!(correct, mem::size_of::<Option<StrTendril>>());
let correct_header = mem::size_of::<*const ()>() + 4;
assert_eq!(correct_header, mem::size_of::<Header>());
}
#[test]
fn validate_utf8() {
assert!(ByteTendril::try_from_byte_slice(b"\xFF").is_ok());
assert!(StrTendril::try_from_byte_slice(b"\xFF").is_err());
assert!(StrTendril::try_from_byte_slice(b"\xEA\x99\xFF").is_err());
assert!(StrTendril::try_from_byte_slice(b"\xEA\x99").is_err());
assert!(StrTendril::try_from_byte_slice(b"\xEA\x99\xAE\xEA").is_err());
assert_eq!("\u{a66e}", &*StrTendril::try_from_byte_slice(b"\xEA\x99\xAE").unwrap());
let mut t = StrTendril::new();
assert!(t.try_push_bytes(b"\xEA\x99").is_err());
assert!(t.try_push_bytes(b"\xAE").is_err());
assert!(t.try_push_bytes(b"\xEA\x99\xAE").is_ok());
assert_eq!("\u{a66e}", &*t);
}
#[test]
fn share_and_unshare() {
let s = b"foobarbaz".to_tendril();
assert_eq!(b"foobarbaz", &*s);
assert!(!s.is_shared());
let mut t = s.clone();
assert_eq!(s.as_ptr(), t.as_ptr());
assert!(s.is_shared());
assert!(t.is_shared());
t.push_slice(b"quux");
assert_eq!(b"foobarbaz", &*s);
assert_eq!(b"foobarbazquux", &*t);
assert!(s.as_ptr() != t.as_ptr());
assert!(!t.is_shared());
}
#[test]
fn format_display() {
assert_eq!("foobar", &*format!("{}", "foobar".to_tendril()));
let mut s = "foo".to_tendril();
assert_eq!("foo", &*format!("{}", s));
let t = s.clone();
assert_eq!("foo", &*format!("{}", s));
assert_eq!("foo", &*format!("{}", t));
s.push_slice("barbaz!");
assert_eq!("foobarbaz!", &*format!("{}", s));
assert_eq!("foo", &*format!("{}", t));
}
#[test]
fn format_debug() {
assert_eq!(r#"Tendril<UTF8>(inline: "foobar")"#,
&*format!("{:?}", "foobar".to_tendril()));
assert_eq!(r#"Tendril<Bytes>(inline: [102, 111, 111, 98, 97, 114])"#,
&*format!("{:?}", b"foobar".to_tendril()));
let t = "anextralongstring".to_tendril();
assert_eq!(r#"Tendril<UTF8>(owned: "anextralongstring")"#,
&*format!("{:?}", t));
t.clone();
assert_eq!(r#"Tendril<UTF8>(shared: "anextralongstring")"#,
&*format!("{:?}", t));
}
#[test]
fn subtendril() {
assert_eq!("foo".to_tendril(), "foo-bar".to_tendril().subtendril(0, 3));
assert_eq!("bar".to_tendril(), "foo-bar".to_tendril().subtendril(4, 3));
let mut t = "foo-bar".to_tendril();
t.pop_front(2);
assert_eq!("o-bar".to_tendril(), t);
t.pop_back(1);
assert_eq!("o-ba".to_tendril(), t);
assert_eq!("foo".to_tendril(),
"foo-a-longer-string-bar-baz".to_tendril().subtendril(0, 3));
assert_eq!("oo-a-".to_tendril(),
"foo-a-longer-string-bar-baz".to_tendril().subtendril(1, 5));
assert_eq!("bar".to_tendril(),
"foo-a-longer-string-bar-baz".to_tendril().subtendril(20, 3));
let mut t = "another rather long string".to_tendril();
t.pop_front(2);
assert!(t.starts_with("other rather"));
t.pop_back(1);
assert_eq!("other rather long strin".to_tendril(), t);
assert!(t.is_shared());
}
#[test]
fn subtendril_invalid() {
assert!("\u{a66e}".to_tendril().try_subtendril(0, 2).is_err());
assert!("\u{a66e}".to_tendril().try_subtendril(1, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(0, 3).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(0, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(0, 1).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(1, 3).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(1, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(1, 1).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(2, 2).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(2, 1).is_err());
assert!("\u{1f4a9}".to_tendril().try_subtendril(3, 1).is_err());
let mut t = "\u{1f4a9}zzzzzz".to_tendril();
assert!(t.try_pop_front(1).is_err());
assert!(t.try_pop_front(2).is_err());
assert!(t.try_pop_front(3).is_err());
assert!(t.try_pop_front(4).is_ok());
assert_eq!("zzzzzz", &*t);
let mut t = "zzzzzz\u{1f4a9}".to_tendril();
assert!(t.try_pop_back(1).is_err());
assert!(t.try_pop_back(2).is_err());
assert!(t.try_pop_back(3).is_err());
assert!(t.try_pop_back(4).is_ok());
assert_eq!("zzzzzz", &*t);
}
#[test]
fn conversion() {
assert_eq!(&[0x66, 0x6F, 0x6F].to_tendril(), "foo".to_tendril().as_bytes());
assert_eq!([0x66, 0x6F, 0x6F].to_tendril(), "foo".to_tendril().into_bytes());
let ascii: Tendril<fmt::ASCII> = b"hello".to_tendril().try_reinterpret().unwrap();
assert_eq!(&"hello".to_tendril(), ascii.as_superset());
assert_eq!("hello".to_tendril(), ascii.clone().into_superset());
assert!(b"\xFF".to_tendril().try_reinterpret::<fmt::ASCII>().is_err());
let t = "hello".to_tendril();
let ascii: &Tendril<fmt::ASCII> = t.try_as_subset().unwrap();
assert_eq!(b"hello", &**ascii.as_bytes());
assert!("ő".to_tendril().try_reinterpret_view::<fmt::ASCII>().is_err());
assert!("ő".to_tendril().try_as_subset::<fmt::ASCII>().is_err());
let ascii: Tendril<fmt::ASCII> = "hello".to_tendril().try_into_subset().unwrap();
assert_eq!(b"hello", &**ascii.as_bytes());
assert!("ő".to_tendril().try_reinterpret::<fmt::ASCII>().is_err());
assert!("ő".to_tendril().try_into_subset::<fmt::ASCII>().is_err());
}
#[test]
fn clear() {
let mut t = "foo-".to_tendril();
t.clear();
assert_eq!(t.len(), 0);
assert_eq!(t.len32(), 0);
assert_eq!(&*t, "");
let mut t = "much longer".to_tendril();
let s = t.clone();
t.clear();
assert_eq!(t.len(), 0);
assert_eq!(t.len32(), 0);
assert_eq!(&*t, "");
assert_eq!(&*s, "much longer");
}
#[test]
fn push_tendril() {
let mut t = "abc".to_tendril();
t.push_tendril(&"xyz".to_tendril());
assert_eq!("abcxyz", &*t);
}
#[test]
fn wtf8() {
assert!(Tendril::<fmt::WTF8>::try_from_byte_slice(b"\xED\xA0\xBD").is_ok());
assert!(Tendril::<fmt::WTF8>::try_from_byte_slice(b"\xED\xB2\xA9").is_ok());
assert!(Tendril::<fmt::WTF8>::try_from_byte_slice(b"\xED\xA0\xBD\xED\xB2\xA9").is_err());
let t: Tendril<fmt::WTF8>
= Tendril::try_from_byte_slice(b"\xED\xA0\xBD\xEA\x99\xAE").unwrap();
assert!(b"\xED\xA0\xBD".to_tendril().try_reinterpret().unwrap()
== t.subtendril(0, 3));
assert!(b"\xEA\x99\xAE".to_tendril().try_reinterpret().unwrap()
== t.subtendril(3, 3));
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_err());
assert!(t.try_subtendril(0, 1).is_err());
assert!(t.try_subtendril(0, 2).is_err());
assert!(t.try_subtendril(1, 1).is_err());
assert!(t.try_subtendril(3, 1).is_err());
assert!(t.try_subtendril(3, 2).is_err());
assert!(t.try_subtendril(4, 1).is_err());
// paired surrogates
let mut t: Tendril<fmt::WTF8> = Tendril::try_from_byte_slice(b"\xED\xA0\xBD").unwrap();
assert!(t.try_push_bytes(b"\xED\xB2\xA9").is_ok());
assert_eq!(b"\xF0\x9F\x92\xA9", t.as_byte_slice());
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_ok());
// unpaired surrogates
let mut t: Tendril<fmt::WTF8> = Tendril::try_from_byte_slice(b"\xED\xA0\xBB").unwrap();
assert!(t.try_push_bytes(b"\xED\xA0").is_err());
assert!(t.try_push_bytes(b"\xED").is_err());
assert!(t.try_push_bytes(b"\xA0").is_err());
assert!(t.try_push_bytes(b"\xED\xA0\xBD").is_ok());
assert_eq!(b"\xED\xA0\xBB\xED\xA0\xBD", t.as_byte_slice());
assert!(t.try_push_bytes(b"\xED\xB2\xA9").is_ok());
assert_eq!(b"\xED\xA0\xBB\xF0\x9F\x92\xA9", t.as_byte_slice());
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_err());
}
#[test]
fn front_char() {
let mut t = "".to_tendril();
assert_eq!(None, t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = "abc".to_tendril();
assert_eq!(Some('a'), t.pop_front_char());
assert_eq!(Some('b'), t.pop_front_char());
assert_eq!(Some('c'), t.pop_front_char());
assert_eq!(None, t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = "főo-a-longer-string-bar-baz".to_tendril();
assert_eq!(28, t.len());
assert_eq!(Some('f'), t.pop_front_char());
assert_eq!(Some('ő'), t.pop_front_char());
assert_eq!(Some('o'), t.pop_front_char());
assert_eq!(Some('-'), t.pop_front_char());
assert_eq!(23, t.len());
}
#[test]
fn char_run() {
for &(s, exp) in &[
("", None),
(" ", Some((" ", true))),
("x", Some(("x", false))),
(" \t \n", Some((" \t \n", true))),
("xyzzy", Some(("xyzzy", false))),
(" xyzzy", Some((" ", true))),
("xyzzy ", Some(("xyzzy", false))),
(" xyzzy ", Some((" ", true))),
("xyzzy hi", Some(("xyzzy", false))),
("中 ", Some(("中", false))),
(" 中 ", Some((" ", true))),
(" 中 ", Some((" ", true))),
(" 中 ", Some((" ", true))),
] {
let mut t = s.to_tendril();
let res = t.pop_front_char_run(char::is_whitespace);
match exp {
None => assert!(res.is_none()),
Some((es, ec)) => {
let (rt, rc) = res.unwrap();
assert_eq!(es, &*rt);
assert_eq!(ec, rc);
}
}
}
}
#[test]
fn deref_mut() {
let mut t = "xyő".to_tendril().into_bytes();
t[3] = 0xff;
assert_eq!(b"xy\xC5\xFF", &*t);
assert!(t.try_reinterpret_view::<fmt::UTF8>().is_err());
t[3] = 0x8b;
assert_eq!("xyŋ", &**t.try_reinterpret_view::<fmt::UTF8>().unwrap());
unsafe {
t.push_uninitialized(3);
t[4] = 0xEA;
t[5] = 0x99;
t[6] = 0xAE;
assert_eq!("xyŋ\u{a66e}", &**t.try_reinterpret_view::<fmt::UTF8>().unwrap());
t.push_uninitialized(20);
t.pop_back(20);
assert_eq!("xyŋ\u{a66e}", &**t.try_reinterpret_view::<fmt::UTF8>().unwrap());
}
}
#[test]
fn push_char() {
let mut t = "xyz".to_tendril();
t.push_char('o');
assert_eq!("xyzo", &*t);
t.push_char('ő');
assert_eq!("xyzoő", &*t);
t.push_char('\u{a66e}');
assert_eq!("xyzoő\u{a66e}", &*t);
t.push_char('\u{1f4a9}');
assert_eq!("xyzoő\u{a66e}\u{1f4a9}", &*t);
assert_eq!(t.len(), 13);
}
#[test]
fn encode() {
use encoding::{all, EncoderTrap};
let t = "안녕하세요 러스트".to_tendril();
assert_eq!(b"\xbe\xc8\xb3\xe7\xc7\xcf\xbc\xbc\xbf\xe4\x20\xb7\xaf\xbd\xba\xc6\xae",
&*t.encode(all::WINDOWS_949, EncoderTrap::Strict).unwrap());
let t = "Энергия пробуждения ия-я-я! \u{a66e}".to_tendril();
assert_eq!(b"\xfc\xce\xc5\xd2\xc7\xc9\xd1 \xd0\xd2\xcf\xc2\xd5\xd6\xc4\xc5\xce\
\xc9\xd1 \xc9\xd1\x2d\xd1\x2d\xd1\x21 ?",
&*t.encode(all::KOI8_U, EncoderTrap::Replace).unwrap());
let t = "\u{1f4a9}".to_tendril();
assert!(t.encode(all::WINDOWS_1252, EncoderTrap::Strict).is_err());
}
#[test]
fn decode() {
use encoding::{all, DecoderTrap};
let t = b"\xbe\xc8\xb3\xe7\xc7\xcf\xbc\xbc\
\xbf\xe4\x20\xb7\xaf\xbd\xba\xc6\xae".to_tendril();
assert_eq!("안녕하세요 러스트",
&*t.decode(all::WINDOWS_949, DecoderTrap::Strict).unwrap());
let t = b"\xfc\xce\xc5\xd2\xc7\xc9\xd1 \xd0\xd2\xcf\xc2\xd5\xd6\xc4\xc5\xce\
\xc9\xd1 \xc9\xd1\x2d\xd1\x2d\xd1\x21".to_tendril();
assert_eq!("Энергия пробуждения ия-я-я!",
&*t.decode(all::KOI8_U, DecoderTrap::Replace).unwrap());
let t = b"x \xff y".to_tendril();
assert!(t.decode(all::UTF_8, DecoderTrap::Strict).is_err());
let t = b"x \xff y".to_tendril();
assert_eq!("x \u{fffd} y",
&*t.decode(all::UTF_8, DecoderTrap::Replace).unwrap());
}
#[test]
fn ascii() {
fn mk(x: &[u8]) -> Tendril<fmt::ASCII> {
x.to_tendril().try_reinterpret().unwrap()
}
let mut t = mk(b"xyz");
assert_eq!(Some('x'), t.pop_front_char());
assert_eq!(Some('y'), t.pop_front_char());
assert_eq!(Some('z'), t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = mk(b" \t xyz");
assert!(Some((mk(b" \t "), true))
== t.pop_front_char_run(char::is_whitespace));
assert!(Some((mk(b"xyz"), false))
== t.pop_front_char_run(char::is_whitespace));
assert!(t.pop_front_char_run(char::is_whitespace).is_none());
let mut t = Tendril::<fmt::ASCII>::new();
assert!(t.try_push_char('x').is_ok());
assert!(t.try_push_char('\0').is_ok());
assert!(t.try_push_char('\u{a0}').is_err());
assert_eq!(b"x\0", t.as_byte_slice());
}
#[test]
fn latin1() {
fn mk(x: &[u8]) -> Tendril<fmt::Latin1> {
x.to_tendril().try_reinterpret().unwrap()
}
let mut t = mk(b"\xd8_\xd8");
assert_eq!(Some('Ø'), t.pop_front_char());
assert_eq!(Some('_'), t.pop_front_char());
assert_eq!(Some('Ø'), t.pop_front_char());
assert_eq!(None, t.pop_front_char());
let mut t = mk(b" \t \xfe\xa7z");
assert!(Some((mk(b" \t "), true))
== t.pop_front_char_run(char::is_whitespace));
assert!(Some((mk(b"\xfe\xa7z"), false))
== t.pop_front_char_run(char::is_whitespace));
assert!(t.pop_front_char_run(char::is_whitespace).is_none());
let mut t = Tendril::<fmt::Latin1>::new();
assert!(t.try_push_char('x').is_ok());
assert!(t.try_push_char('\0').is_ok());
assert!(t.try_push_char('\u{a0}').is_ok());
assert!(t.try_push_char('ő').is_err());
assert!(t.try_push_char('я').is_err());
assert!(t.try_push_char('\u{a66e}').is_err());
assert!(t.try_push_char('\u{1f4a9}').is_err());
assert_eq!(b"x\0\xa0", t.as_byte_slice());
}
#[test]
fn format() {
assert_eq!("", &*format_tendril!(""));
assert_eq!("two and two make 4", &*format_tendril!("two and two make {}", 2+2));
}
#[test]
fn merge_shared() {
let t = "012345678901234567890123456789".to_tendril();
let a = t.subtendril(10, 20);
assert!(a.is_shared());
assert_eq!("01234567890123456789", &*a);
let mut b = t.subtendril(0, 10);
assert!(b.is_shared());
assert_eq!("0123456789", &*b);
b.push_tendril(&a);
assert!(b.is_shared());
assert!(a.is_shared());
assert!(a.is_shared_with(&b));
assert!(b.is_shared_with(&a));
assert_eq!("012345678901234567890123456789", &*b);
assert!(t.is_shared());
assert!(t.is_shared_with(&a));
assert!(t.is_shared_with(&b));
}
#[test]
fn merge_cant_share() {
let t = "012345678901234567890123456789".to_tendril();
let mut b = t.subtendril(0, 10);
assert!(b.is_shared());
assert_eq!("0123456789", &*b);
b.push_tendril(&"abcd".to_tendril());
assert!(!b.is_shared());
assert_eq!("0123456789abcd", &*b);
}
#[test]
fn shared_doesnt_reserve() {
let mut t = "012345678901234567890123456789".to_tendril();
let a = t.subtendril(1, 10);
assert!(t.is_shared());
t.reserve(10);
assert!(t.is_shared());
let _ = a;
}
#[test]
fn out_of_bounds() {
assert!("".to_tendril().try_subtendril(0, 1).is_err());
assert!("abc".to_tendril().try_subtendril(0, 4).is_err());
assert!("abc".to_tendril().try_subtendril(3, 1).is_err());
assert!("abc".to_tendril().try_subtendril(7, 1).is_err());
let mut t = "".to_tendril();
assert!(t.try_pop_front(1).is_err());
assert!(t.try_pop_front(5).is_err());
assert!(t.try_pop_front(500).is_err());
assert!(t.try_pop_back(1).is_err());
assert!(t.try_pop_back(5).is_err());
assert!(t.try_pop_back(500).is_err());
let mut t = "abcd".to_tendril();
assert!(t.try_pop_front(1).is_ok());
assert!(t.try_pop_front(4).is_err());
assert!(t.try_pop_front(500).is_err());
assert!(t.try_pop_back(1).is_ok());
assert!(t.try_pop_back(3).is_err());
assert!(t.try_pop_back(500).is_err());
}
#[test]
fn compare() {
for &a in &["indiscretions", "validity", "hallucinogenics", "timelessness",
"original", "microcosms", "boilers", "mammoth"] {
for &b in &["intrepidly", "frigid", "spa", "cardigans",
"guileful", "evaporated", "unenthusiastic", "legitimate"] {
let ta = a.to_tendril();
let tb = b.to_tendril();
assert_eq!(a.eq(b), ta.eq(&tb));
assert_eq!(a.ne(b), ta.ne(&tb));
assert_eq!(a.lt(b), ta.lt(&tb));
assert_eq!(a.le(b), ta.le(&tb));
assert_eq!(a.gt(b), ta.gt(&tb));
assert_eq!(a.ge(b), ta.ge(&tb));
assert_eq!(a.partial_cmp(b), ta.partial_cmp(&tb));
assert_eq!(a.cmp(b), ta.cmp(&tb));
}
}
}
#[test]
fn extend() {
let mut t = "Hello".to_tendril();
t.extend(None.into_iter());
assert_eq!("Hello", &*t);
t.extend(&[", ".to_tendril(), "world".to_tendril(), "!".to_tendril()]);
assert_eq!("Hello, world!", &*t);
}
#[test]
fn from_str() {
use std::str::FromStr;
let t: Tendril<_> = FromStr::from_str("foo bar baz").unwrap();
assert_eq!("foo bar baz", &*t);
}
#[test]
fn from_char() {
assert_eq!("o", &*StrTendril::from_char('o'));
assert_eq!("ő", &*StrTendril::from_char('ő'));
assert_eq!("\u{a66e}", &*StrTendril::from_char('\u{a66e}'));
assert_eq!("\u{1f4a9}", &*StrTendril::from_char('\u{1f4a9}'));
}
}
|
use std::mem;
use std::marker::PhantomData;
use gl;
use gl::types::*;
use context::Context;
use image_data::Image2d;
use types::GLError;
pub struct Texture<T: TextureType> {
gl_id: GLuint,
phantom: PhantomData<*mut T>
}
pub type Texture2d = Texture<Tx2d>;
pub type TextureCubeMap = Texture<TxCubeMap>;
impl<T: TextureType> Texture<T> {
pub fn gl_id(&self) -> GLuint {
self.gl_id
}
}
impl<T: TextureType> Drop for Texture<T> {
fn drop(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.gl_id as *const GLuint);
}
}
}
impl Context {
pub fn gen_texture<T: TextureType>(&self) -> Texture<T> {
unsafe {
let mut id : GLuint = 0;
gl::GenTextures(1, &mut id as *mut GLuint);
dbg_gl_sanity_check! {
GLError::InvalidValue => "`n` is negative",
_ => "Unknown error"
}
Texture {
gl_id: id,
phantom: PhantomData
}
}
}
}
pub trait ImageTargetType {
fn gl_enum(&self) -> GLenum;
}
pub trait TextureType {
type ImageTargetType: ImageTargetType;
fn target() -> TextureBindingTarget;
}
pub struct Tx2d;
gl_enum! {
pub gl_enum Tx2dImageTarget {
Texture2d as TEXTURE_2D_TARGET = gl::TEXTURE_2D
}
}
impl ImageTargetType for Tx2dImageTarget {
fn gl_enum(&self) -> GLenum {
self.gl_enum()
}
}
impl TextureType for Tx2d {
type ImageTargetType = Tx2dImageTarget;
fn target() -> TextureBindingTarget {
TextureBindingTarget::Texture2d
}
}
pub struct TxCubeMap;
gl_enum! {
pub gl_enum TxCubeMapImageTarget {
CubeMapPositiveX as TEXTURE_CUBE_MAP_POSITIVE_X =
gl::TEXTURE_CUBE_MAP_POSITIVE_X,
CubeMapNegativeX as TEXTURE_CUBE_MAP_NEGATIVE_X =
gl::TEXTURE_CUBE_MAP_NEGATIVE_X,
CubeMapPositiveY as TEXTURE_CUBE_MAP_POSITIVE_Y =
gl::TEXTURE_CUBE_MAP_POSITIVE_Y,
CubeMapNegativeY as TEXTURE_CUBE_MAP_NEGATIVE_Y =
gl::TEXTURE_CUBE_MAP_NEGATIVE_Y,
CubeMapPositiveZ as TEXTURE_CUBE_MAP_POSITIVE_Z =
gl::TEXTURE_CUBE_MAP_POSITIVE_Z,
CubeMapNegativeZ as TEXTURE_CUBE_MAP_NEGATIVE_Z =
gl::TEXTURE_CUBE_MAP_NEGATIVE_Z
}
}
impl ImageTargetType for TxCubeMapImageTarget {
fn gl_enum(&self) -> GLenum {
self.gl_enum()
}
}
impl TextureType for TxCubeMap {
type ImageTargetType = TxCubeMapImageTarget;
fn target() -> TextureBindingTarget {
TextureBindingTarget::TextureCubeMap
}
}
gl_enum! {
pub gl_enum TextureBindingTarget {
Texture2d as TEXTURE_2D = gl::TEXTURE_2D,
TextureCubeMap as TEXTURE_CUBE_MAP = gl::TEXTURE_CUBE_MAP
}
}
// TODO: Use type refinements someday...
#[derive(Debug, Clone, Copy)]
pub enum TextureFilter {
Nearest,
Linear
}
#[derive(Debug, Clone, Copy)]
pub enum TextureMipmapFilter {
Filter(TextureFilter),
MipmapFilter { criterion: TextureFilter, mipmap: TextureFilter }
}
pub const NEAREST : TextureFilter = TextureFilter::Nearest;
pub const LINEAR : TextureFilter = TextureFilter::Linear;
pub const NEAREST_MIPMAP_NEAREST : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Nearest,
mipmap: TextureFilter::Nearest
};
pub const LINEAR_MIPMAP_NEAREST : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Linear,
mipmap: TextureFilter::Nearest
};
pub const NEAREST_MIPMAP_LINEAR : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Nearest,
mipmap: TextureFilter::Linear
};
pub const LINEAR_MIPMAP_LINEAR : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Linear,
mipmap: TextureFilter::Linear
};
#[allow(dead_code)]
impl TextureFilter {
fn from_gl(gl_enum: GLenum) -> Result<Self, ()> {
match gl_enum {
gl::NEAREST => { Ok(self::NEAREST) },
gl::LINEAR => { Ok(self::LINEAR) },
_ => { Err(()) }
}
}
fn gl_enum(&self) -> GLenum {
match *self {
self::NEAREST => gl::NEAREST,
self::LINEAR => gl::LINEAR
}
}
}
#[allow(dead_code)]
impl TextureMipmapFilter {
fn from_gl(gl_enum: GLenum) -> Result<Self, ()> {
match gl_enum {
gl::NEAREST => { Ok(TextureMipmapFilter::Filter(self::NEAREST)) },
gl::LINEAR => { Ok(TextureMipmapFilter::Filter(self::LINEAR)) },
gl::NEAREST_MIPMAP_NEAREST => { Ok(self::NEAREST_MIPMAP_NEAREST) },
gl::LINEAR_MIPMAP_NEAREST => { Ok(self::LINEAR_MIPMAP_NEAREST) },
gl::NEAREST_MIPMAP_LINEAR => { Ok(self::NEAREST_MIPMAP_LINEAR) },
gl::LINEAR_MIPMAP_LINEAR => { Ok(self::LINEAR_MIPMAP_LINEAR) },
_ => { Err(()) }
}
}
fn gl_enum(&self) -> GLenum {
match *self {
TextureMipmapFilter::Filter(self::LINEAR) => { gl::LINEAR },
TextureMipmapFilter::Filter(self::NEAREST) => { gl::NEAREST },
self::NEAREST_MIPMAP_NEAREST => { gl::NEAREST_MIPMAP_NEAREST },
self::LINEAR_MIPMAP_NEAREST => { gl::LINEAR_MIPMAP_NEAREST },
self::NEAREST_MIPMAP_LINEAR => { gl::NEAREST_MIPMAP_LINEAR },
self::LINEAR_MIPMAP_LINEAR => { gl::LINEAR_MIPMAP_LINEAR }
}
}
}
impl From<TextureFilter> for TextureMipmapFilter {
fn from(filter: TextureFilter) -> TextureMipmapFilter {
TextureMipmapFilter::Filter(filter)
}
}
gl_enum! {
pub gl_enum TextureWrapMode {
ClampToEdge as CLAMP_TO_EDGE = gl::CLAMP_TO_EDGE,
MirroredRepeat as MIRRORED_REPEAT = gl::MIRRORED_REPEAT,
Repeat as REPEAT = gl::REPEAT
}
}
unsafe fn _tex_parameter_iv(target: TextureBindingTarget,
pname: GLenum,
params: *const GLint)
{
gl::TexParameteriv(target.gl_enum(), pname, params);
dbg_gl_sanity_check! {
GLError::InvalidEnum => "`target` or `pname` is not an accepted defined value, or `params` should have defined a symbolic constant and does not",
_ => "Unknown error"
}
}
pub trait TextureBinding {
type TextureType: TextureType;
fn target() -> TextureBindingTarget {
Self::TextureType::target()
}
fn set_min_filter<F: Into<TextureMipmapFilter>>(&mut self, filter: F) {
let gl_int = filter.into().gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_MIN_FILTER,
&gl_int as *const GLint);
}
}
fn set_mag_filter(&mut self, filter: TextureFilter) {
let gl_int = filter.gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_MAG_FILTER,
&gl_int as *const GLint);
}
}
fn set_wrap_s(&mut self, wrap_mode: TextureWrapMode) {
let gl_int = wrap_mode.gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_WRAP_S,
&gl_int as *const GLint);
}
}
fn set_wrap_t(&mut self, wrap_mode: TextureWrapMode) {
let gl_int = wrap_mode.gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_WRAP_T,
&gl_int as *const GLint);
}
}
fn image_2d<I>(&mut self,
level: i32,
target: <Self::TextureType as TextureType>::ImageTargetType,
img: &I)
where I: Image2d
{
unsafe {
let ptr = mem::transmute(img.textel_bytes().as_ptr());
gl::TexImage2D(target.gl_enum(),
level as GLint,
img.format().textel_format.gl_enum() as GLint,
img.width() as i32,
img.height() as i32,
0,
img.format().textel_format.gl_enum(),
img.format().textel_type.gl_enum(),
ptr);
dbg_gl_error! {
GLError::InvalidEnum => "`target`, `format`, or `type` is not an accepted value",
GLError::InvalidValue => "`target`, `level`, `internalformat`, `width`, `height`, or `border` is an invalid value",
GLError::InvalidOperation => "`format` conflicts with either `internalformat` or `type`",
_ => "Unknown error"
}
}
}
}
pub struct Texture2dBinding<'a> {
phantom: PhantomData<&'a mut Texture2d>
}
impl<'a> TextureBinding for Texture2dBinding<'a> {
type TextureType = Tx2d;
}
pub struct TextureCubeMapBinding<'a> {
phantom: PhantomData<&'a mut TextureCubeMap>
}
impl<'a> TextureBinding for TextureCubeMapBinding<'a> {
type TextureType = TxCubeMap;
}
unsafe fn _bind_texture<T: TextureType>(texture: &mut Texture<T>) {
gl::BindTexture(T::target().gl_enum(), texture.gl_id());
dbg_gl_error! {
GLError::InvalidEnum => "`target` is not one of the allowed values",
GLError::InvalidOperation => "`texture` was created with a target that doesn't match `target`",
_ => "Unknown error"
}
}
pub struct Texture2dBinder;
impl Texture2dBinder {
pub fn bind<'a>(&'a mut self, texture: &mut Texture2d)
-> Texture2dBinding<'a>
{
unsafe {
_bind_texture(texture);
}
Texture2dBinding { phantom: PhantomData }
}
}
pub struct TextureCubeMapBinder;
impl TextureCubeMapBinder {
pub fn bind<'a>(&'a mut self, texture: &mut TextureCubeMap)
-> TextureCubeMapBinding<'a>
{
unsafe {
_bind_texture(texture);
}
TextureCubeMapBinding { phantom: PhantomData }
}
}
Tweak params for `TextureBinding.image_2d()`
The param order now lines up with the equivalent OpenGL function (`glTexImage2D`); `level` is now unsigned to better match with what's expected form OpenGL (non-negative numbers)
use std::mem;
use std::marker::PhantomData;
use gl;
use gl::types::*;
use context::Context;
use image_data::Image2d;
use types::GLError;
pub struct Texture<T: TextureType> {
gl_id: GLuint,
phantom: PhantomData<*mut T>
}
pub type Texture2d = Texture<Tx2d>;
pub type TextureCubeMap = Texture<TxCubeMap>;
impl<T: TextureType> Texture<T> {
pub fn gl_id(&self) -> GLuint {
self.gl_id
}
}
impl<T: TextureType> Drop for Texture<T> {
fn drop(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.gl_id as *const GLuint);
}
}
}
impl Context {
pub fn gen_texture<T: TextureType>(&self) -> Texture<T> {
unsafe {
let mut id : GLuint = 0;
gl::GenTextures(1, &mut id as *mut GLuint);
dbg_gl_sanity_check! {
GLError::InvalidValue => "`n` is negative",
_ => "Unknown error"
}
Texture {
gl_id: id,
phantom: PhantomData
}
}
}
}
pub trait ImageTargetType {
fn gl_enum(&self) -> GLenum;
}
pub trait TextureType {
type ImageTargetType: ImageTargetType;
fn target() -> TextureBindingTarget;
}
pub struct Tx2d;
gl_enum! {
pub gl_enum Tx2dImageTarget {
Texture2d as TEXTURE_2D_TARGET = gl::TEXTURE_2D
}
}
impl ImageTargetType for Tx2dImageTarget {
fn gl_enum(&self) -> GLenum {
self.gl_enum()
}
}
impl TextureType for Tx2d {
type ImageTargetType = Tx2dImageTarget;
fn target() -> TextureBindingTarget {
TextureBindingTarget::Texture2d
}
}
pub struct TxCubeMap;
gl_enum! {
pub gl_enum TxCubeMapImageTarget {
CubeMapPositiveX as TEXTURE_CUBE_MAP_POSITIVE_X =
gl::TEXTURE_CUBE_MAP_POSITIVE_X,
CubeMapNegativeX as TEXTURE_CUBE_MAP_NEGATIVE_X =
gl::TEXTURE_CUBE_MAP_NEGATIVE_X,
CubeMapPositiveY as TEXTURE_CUBE_MAP_POSITIVE_Y =
gl::TEXTURE_CUBE_MAP_POSITIVE_Y,
CubeMapNegativeY as TEXTURE_CUBE_MAP_NEGATIVE_Y =
gl::TEXTURE_CUBE_MAP_NEGATIVE_Y,
CubeMapPositiveZ as TEXTURE_CUBE_MAP_POSITIVE_Z =
gl::TEXTURE_CUBE_MAP_POSITIVE_Z,
CubeMapNegativeZ as TEXTURE_CUBE_MAP_NEGATIVE_Z =
gl::TEXTURE_CUBE_MAP_NEGATIVE_Z
}
}
impl ImageTargetType for TxCubeMapImageTarget {
fn gl_enum(&self) -> GLenum {
self.gl_enum()
}
}
impl TextureType for TxCubeMap {
type ImageTargetType = TxCubeMapImageTarget;
fn target() -> TextureBindingTarget {
TextureBindingTarget::TextureCubeMap
}
}
gl_enum! {
pub gl_enum TextureBindingTarget {
Texture2d as TEXTURE_2D = gl::TEXTURE_2D,
TextureCubeMap as TEXTURE_CUBE_MAP = gl::TEXTURE_CUBE_MAP
}
}
// TODO: Use type refinements someday...
#[derive(Debug, Clone, Copy)]
pub enum TextureFilter {
Nearest,
Linear
}
#[derive(Debug, Clone, Copy)]
pub enum TextureMipmapFilter {
Filter(TextureFilter),
MipmapFilter { criterion: TextureFilter, mipmap: TextureFilter }
}
pub const NEAREST : TextureFilter = TextureFilter::Nearest;
pub const LINEAR : TextureFilter = TextureFilter::Linear;
pub const NEAREST_MIPMAP_NEAREST : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Nearest,
mipmap: TextureFilter::Nearest
};
pub const LINEAR_MIPMAP_NEAREST : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Linear,
mipmap: TextureFilter::Nearest
};
pub const NEAREST_MIPMAP_LINEAR : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Nearest,
mipmap: TextureFilter::Linear
};
pub const LINEAR_MIPMAP_LINEAR : TextureMipmapFilter =
TextureMipmapFilter::MipmapFilter {
criterion: TextureFilter::Linear,
mipmap: TextureFilter::Linear
};
#[allow(dead_code)]
impl TextureFilter {
fn from_gl(gl_enum: GLenum) -> Result<Self, ()> {
match gl_enum {
gl::NEAREST => { Ok(self::NEAREST) },
gl::LINEAR => { Ok(self::LINEAR) },
_ => { Err(()) }
}
}
fn gl_enum(&self) -> GLenum {
match *self {
self::NEAREST => gl::NEAREST,
self::LINEAR => gl::LINEAR
}
}
}
#[allow(dead_code)]
impl TextureMipmapFilter {
fn from_gl(gl_enum: GLenum) -> Result<Self, ()> {
match gl_enum {
gl::NEAREST => { Ok(TextureMipmapFilter::Filter(self::NEAREST)) },
gl::LINEAR => { Ok(TextureMipmapFilter::Filter(self::LINEAR)) },
gl::NEAREST_MIPMAP_NEAREST => { Ok(self::NEAREST_MIPMAP_NEAREST) },
gl::LINEAR_MIPMAP_NEAREST => { Ok(self::LINEAR_MIPMAP_NEAREST) },
gl::NEAREST_MIPMAP_LINEAR => { Ok(self::NEAREST_MIPMAP_LINEAR) },
gl::LINEAR_MIPMAP_LINEAR => { Ok(self::LINEAR_MIPMAP_LINEAR) },
_ => { Err(()) }
}
}
fn gl_enum(&self) -> GLenum {
match *self {
TextureMipmapFilter::Filter(self::LINEAR) => { gl::LINEAR },
TextureMipmapFilter::Filter(self::NEAREST) => { gl::NEAREST },
self::NEAREST_MIPMAP_NEAREST => { gl::NEAREST_MIPMAP_NEAREST },
self::LINEAR_MIPMAP_NEAREST => { gl::LINEAR_MIPMAP_NEAREST },
self::NEAREST_MIPMAP_LINEAR => { gl::NEAREST_MIPMAP_LINEAR },
self::LINEAR_MIPMAP_LINEAR => { gl::LINEAR_MIPMAP_LINEAR }
}
}
}
impl From<TextureFilter> for TextureMipmapFilter {
fn from(filter: TextureFilter) -> TextureMipmapFilter {
TextureMipmapFilter::Filter(filter)
}
}
gl_enum! {
pub gl_enum TextureWrapMode {
ClampToEdge as CLAMP_TO_EDGE = gl::CLAMP_TO_EDGE,
MirroredRepeat as MIRRORED_REPEAT = gl::MIRRORED_REPEAT,
Repeat as REPEAT = gl::REPEAT
}
}
unsafe fn _tex_parameter_iv(target: TextureBindingTarget,
pname: GLenum,
params: *const GLint)
{
gl::TexParameteriv(target.gl_enum(), pname, params);
dbg_gl_sanity_check! {
GLError::InvalidEnum => "`target` or `pname` is not an accepted defined value, or `params` should have defined a symbolic constant and does not",
_ => "Unknown error"
}
}
pub trait TextureBinding {
type TextureType: TextureType;
fn target() -> TextureBindingTarget {
Self::TextureType::target()
}
fn set_min_filter<F: Into<TextureMipmapFilter>>(&mut self, filter: F) {
let gl_int = filter.into().gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_MIN_FILTER,
&gl_int as *const GLint);
}
}
fn set_mag_filter(&mut self, filter: TextureFilter) {
let gl_int = filter.gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_MAG_FILTER,
&gl_int as *const GLint);
}
}
fn set_wrap_s(&mut self, wrap_mode: TextureWrapMode) {
let gl_int = wrap_mode.gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_WRAP_S,
&gl_int as *const GLint);
}
}
fn set_wrap_t(&mut self, wrap_mode: TextureWrapMode) {
let gl_int = wrap_mode.gl_enum() as GLint;
unsafe {
_tex_parameter_iv(Self::target(),
gl::TEXTURE_WRAP_T,
&gl_int as *const GLint);
}
}
fn image_2d<I>(&mut self,
target: <Self::TextureType as TextureType>::ImageTargetType,
level: u32,
img: &I)
where I: Image2d
{
unsafe {
let ptr = mem::transmute(img.textel_bytes().as_ptr());
gl::TexImage2D(target.gl_enum(),
level as GLint,
img.format().textel_format.gl_enum() as GLint,
img.width() as i32,
img.height() as i32,
0,
img.format().textel_format.gl_enum(),
img.format().textel_type.gl_enum(),
ptr);
dbg_gl_error! {
GLError::InvalidEnum => "`target`, `format`, or `type` is not an accepted value",
GLError::InvalidValue => "`target`, `level`, `internalformat`, `width`, `height`, or `border` is an invalid value",
GLError::InvalidOperation => "`format` conflicts with either `internalformat` or `type`",
_ => "Unknown error"
}
}
}
}
pub struct Texture2dBinding<'a> {
phantom: PhantomData<&'a mut Texture2d>
}
impl<'a> TextureBinding for Texture2dBinding<'a> {
type TextureType = Tx2d;
}
pub struct TextureCubeMapBinding<'a> {
phantom: PhantomData<&'a mut TextureCubeMap>
}
impl<'a> TextureBinding for TextureCubeMapBinding<'a> {
type TextureType = TxCubeMap;
}
unsafe fn _bind_texture<T: TextureType>(texture: &mut Texture<T>) {
gl::BindTexture(T::target().gl_enum(), texture.gl_id());
dbg_gl_error! {
GLError::InvalidEnum => "`target` is not one of the allowed values",
GLError::InvalidOperation => "`texture` was created with a target that doesn't match `target`",
_ => "Unknown error"
}
}
pub struct Texture2dBinder;
impl Texture2dBinder {
pub fn bind<'a>(&'a mut self, texture: &mut Texture2d)
-> Texture2dBinding<'a>
{
unsafe {
_bind_texture(texture);
}
Texture2dBinding { phantom: PhantomData }
}
}
pub struct TextureCubeMapBinder;
impl TextureCubeMapBinder {
pub fn bind<'a>(&'a mut self, texture: &mut TextureCubeMap)
-> TextureCubeMapBinding<'a>
{
unsafe {
_bind_texture(texture);
}
TextureCubeMapBinding { phantom: PhantomData }
}
}
|
//! Implementation of a Micro Transport Protocol library.
//!
//! http://www.bittorrent.org/beps/bep_0029.html
//!
//! TODO
//! ----
//!
//! - congestion control
//! - proper connection closing
//! - automatically send FIN (or should it be RST?) on `drop` if not already closed
//! - setters and getters that hide header field endianness conversion
//! - SACK extension
//! - handle packet loss
#![crate_name = "utp"]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![feature(macro_rules, phase)]
#![deny(missing_doc)]
extern crate time;
#[phase(plugin, link)] extern crate log;
use std::io::net::udp::UdpSocket;
use std::io::net::ip::SocketAddr;
use std::io::IoResult;
use std::mem::transmute;
use std::rand::random;
use std::fmt;
static HEADER_SIZE: uint = 20;
// For simplicity's sake, let us assume no packet will ever exceed the
// Ethernet maximum transfer unit of 1500 bytes.
static BUF_SIZE: uint = 1500;
macro_rules! u8_to_unsigned_be(
($src:ident[$start:expr..$end:expr] -> $t:ty) => ({
let mut result: $t = 0;
for i in range(0u, $end-$start+1).rev() {
result = result | $src[$start+i] as $t << i*8;
}
result
})
)
/// Return current time in microseconds since the UNIX epoch.
fn now_microseconds() -> u32 {
let t = time::get_time();
(t.sec * 1_000_000) as u32 + (t.nsec/1000) as u32
}
#[allow(dead_code,non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpPacketType {
ST_DATA = 0,
ST_FIN = 1,
ST_STATE = 2,
ST_RESET = 3,
ST_SYN = 4,
}
#[allow(dead_code)]
#[deriving(Clone)]
#[packed]
struct UtpPacketHeader {
type_ver: u8, // type: u4, ver: u4
extension: u8,
connection_id: u16,
timestamp_microseconds: u32,
timestamp_difference_microseconds: u32,
wnd_size: u32,
seq_nr: u16,
ack_nr: u16,
}
impl UtpPacketHeader {
/// Set type of packet to the specified type.
fn set_type(&mut self, t: UtpPacketType) {
let version = 0x0F & self.type_ver;
self.type_ver = t as u8 << 4 | version;
}
fn get_type(&self) -> UtpPacketType {
let t: UtpPacketType = unsafe { transmute(self.type_ver >> 4) };
t
}
fn get_version(&self) -> u8 {
self.type_ver & 0x0F
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacketHeader {
UtpPacketHeader {
wnd_size: new_wnd_size.to_be(),
.. self.clone()
}
}
/// Return packet header as a slice of bytes.
fn bytes(&self) -> &[u8] {
let buf: &[u8, ..HEADER_SIZE] = unsafe { transmute(self) };
return buf.as_slice();
}
fn len(&self) -> uint {
return HEADER_SIZE;
}
/// Read byte buffer and return corresponding packet header.
/// It assumes the fields are in network (big-endian) byte order,
/// preserving it.
fn decode(buf: &[u8]) -> UtpPacketHeader {
UtpPacketHeader {
type_ver: buf[0],
extension: buf[1],
connection_id: u8_to_unsigned_be!(buf[2..3] -> u16),
timestamp_microseconds: u8_to_unsigned_be!(buf[4..7] -> u32),
timestamp_difference_microseconds: u8_to_unsigned_be!(buf[8..11] -> u32),
wnd_size: u8_to_unsigned_be!(buf[12..15] -> u32),
seq_nr: u8_to_unsigned_be!(buf[16..17] -> u16),
ack_nr: u8_to_unsigned_be!(buf[18..19] -> u16),
}
}
}
impl fmt::Show for UtpPacketHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(type: {}, version: {}, extension: {}, \
connection_id: {}, timestamp_microseconds: {}, \
timestamp_difference_microseconds: {}, wnd_size: {}, \
seq_nr: {}, ack_nr: {})",
self.get_type(),
Int::from_be(self.get_version()),
Int::from_be(self.extension),
Int::from_be(self.connection_id),
Int::from_be(self.timestamp_microseconds),
Int::from_be(self.timestamp_difference_microseconds),
Int::from_be(self.wnd_size),
Int::from_be(self.seq_nr),
Int::from_be(self.ack_nr),
)
}
}
#[allow(dead_code)]
struct UtpPacket {
header: UtpPacketHeader,
payload: Vec<u8>,
}
impl UtpPacket {
/// Construct a new, empty packet.
fn new() -> UtpPacket {
UtpPacket {
header: UtpPacketHeader {
type_ver: ST_DATA as u8 << 4 | 1,
extension: 0,
connection_id: 0,
timestamp_microseconds: 0,
timestamp_difference_microseconds: 0,
wnd_size: 0,
seq_nr: 0,
ack_nr: 0,
},
payload: Vec::new(),
}
}
fn set_type(&mut self, t: UtpPacketType) {
self.header.set_type(t);
}
// TODO: Read up on pointers and ownership
fn get_type(&self) -> UtpPacketType {
self.header.get_type()
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacket {
UtpPacket {
header: self.header.wnd_size(new_wnd_size),
payload: self.payload.clone(),
}
}
/// TODO: return slice
fn bytes(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(self.len());
buf.push_all(self.header.bytes());
buf.push_all(self.payload.as_slice());
return buf;
}
fn len(&self) -> uint {
self.header.len() + self.payload.len()
}
/// Decode a byte slice and construct the equivalent UtpPacket.
///
/// Note that this method makes no attempt to guess the payload size, saving
/// all except the initial 20 bytes corresponding to the header as payload.
/// It's the caller's responsability to use an appropriately sized buffer.
fn decode(buf: &[u8]) -> UtpPacket {
UtpPacket {
header: UtpPacketHeader::decode(buf),
payload: Vec::from_slice(buf.slice(HEADER_SIZE, buf.len()))
}
}
}
impl Clone for UtpPacket {
fn clone(&self) -> UtpPacket {
UtpPacket {
header: self.header,
payload: self.payload.clone(),
}
}
}
impl fmt::Show for UtpPacket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.header.fmt(f)
}
}
#[allow(non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpSocketState {
CS_NEW,
CS_CONNECTED,
CS_SYN_SENT,
CS_FIN_RECEIVED,
CS_FIN_SENT,
CS_RST_RECEIVED,
CS_CLOSED,
CS_EOF,
}
/// A uTP (Micro Transport Protocol) socket.
pub struct UtpSocket {
socket: UdpSocket,
connected_to: SocketAddr,
sender_connection_id: u16,
receiver_connection_id: u16,
seq_nr: u16,
ack_nr: u16,
state: UtpSocketState,
// Received but not acknowledged packets
incoming_buffer: Vec<UtpPacket>,
// Sent but not yet acknowledged packets
send_buffer: Vec<UtpPacket>,
duplicate_ack_count: uint,
last_acked: u16,
rtt: int,
rtt_variance: int,
timeout: int,
}
macro_rules! reply_with_ack(
($header:expr, $src:expr) => ({
let resp = self.prepare_reply($header, ST_STATE).wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(resp.bytes().as_slice(), $src));
debug!("sent {}", resp.header);
})
)
impl UtpSocket {
/// Create a UTP socket from the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpSocket> {
let skt = UdpSocket::bind(addr);
let connection_id = random::<u16>();
match skt {
Ok(x) => Ok(UtpSocket {
socket: x,
connected_to: addr,
receiver_connection_id: connection_id,
sender_connection_id: connection_id + 1,
seq_nr: 1,
ack_nr: 0,
state: CS_NEW,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
rtt: 0,
rtt_variance: 0,
timeout: 1000,
}),
Err(e) => Err(e)
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(mut self, other: SocketAddr) -> IoResult<UtpSocket> {
use std::io::{IoError, ConnectionFailed};
self.connected_to = other;
assert_eq!(self.receiver_connection_id + 1, self.sender_connection_id);
let mut packet = UtpPacket::new();
packet.set_type(ST_SYN);
packet.header.connection_id = self.receiver_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
// Send packet
let dst = self.connected_to;
let _result = self.socket.send_to(packet.bytes().as_slice(), dst);
debug!("sent {}", packet.header);
self.state = CS_SYN_SENT;
let mut buf = [0, ..BUF_SIZE];
let (_len, addr) = match self.socket.recv_from(buf) {
Ok(v) => v,
Err(e) => fail!("{}", e),
};
assert!(_len == HEADER_SIZE);
assert!(addr == self.connected_to);
let packet = UtpPacket::decode(buf.slice_to(_len));
if packet.get_type() != ST_STATE {
return Err(IoError {
kind: ConnectionFailed,
desc: "The remote peer sent an incorrect reply",
detail: None,
});
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
debug!("connected to: {} {}", addr, self.connected_to);
self.state = CS_CONNECTED;
self.seq_nr += 1;
Ok(self)
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
let mut packet = UtpPacket::new();
packet.header.connection_id = self.sender_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.set_type(ST_FIN);
// Send FIN
let dst = self.connected_to;
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
debug!("sent {}", packet);
self.state = CS_FIN_SENT;
// Receive JAKE
let mut buf = [0u8, ..BUF_SIZE];
try!(self.socket.recv_from(buf));
let resp = UtpPacket::decode(buf);
debug!("received {}", resp);
assert!(resp.get_type() == ST_STATE);
// Set socket state
self.state = CS_CLOSED;
Ok(())
}
/// Receive data from socket.
///
/// On success, returns the number of bytes read and the sender's address.
/// Returns CS_EOF after receiving a FIN packet when the remaining
/// inflight packets are consumed. Subsequent calls return CS_CLOSED.
#[unstable]
pub fn recv_from(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
use std::cmp::min;
use std::io::{IoError, EndOfFile, Closed, TimedOut};
if self.state == CS_EOF {
self.state = CS_CLOSED;
return Err(IoError {
kind: EndOfFile,
desc: "End of file reached",
detail: None,
});
}
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
let mut b = [0, ..BUF_SIZE + HEADER_SIZE];
debug!("setting read timeout of {} ms", self.timeout);
self.socket.set_read_timeout(Some(self.timeout as u64));
let (read, src) = match self.socket.recv_from(b) {
Err(ref e) if e.kind == TimedOut => {
debug!("recv_from timed out");
self.timeout = self.timeout * 2;
self.send_fast_resend_request();
return Ok((0, self.connected_to));
},
Ok(x) => x,
Err(e) => return Err(e),
};
let packet = UtpPacket::decode(b.slice_to(read));
debug!("received {}", packet.header);
if packet.get_type() == ST_RESET {
use std::io::{IoError, ConnectionReset};
return Err(IoError {
kind: ConnectionReset,
desc: "Remote host aborted connection (incorrect connection id)",
detail: None,
});
}
// TODO: move this to handle_packet?
if packet.get_type() == ST_SYN {
self.connected_to = src;
}
// Check if the packet is out of order (that is, it's sequence number
// does not immediately follow the ACK number)
if packet.get_type() != ST_STATE && packet.get_type() != ST_SYN
&& self.ack_nr + 1 < Int::from_be(packet.header.seq_nr) {
debug!("current ack_nr ({}) is behind received packet seq_nr ({})",
self.ack_nr, Int::from_be(packet.header.seq_nr));
// Add to buffer but do not acknowledge until all packets between
// ack_nr + 1 and curr_packet.seq_nr - 1 are received
self.insert_into_buffer(packet);
return Ok((0, self.connected_to));
}
match self.handle_packet(packet.clone()) {
Some(pkt) => {
let pkt = pkt.wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(pkt.bytes().as_slice(), src));
debug!("sent {}", pkt.header);
},
None => {}
};
for i in range(0u, min(buf.len(), read - HEADER_SIZE)) {
buf[i] = b[i + HEADER_SIZE];
}
// Empty buffer if possible
let mut read = read - HEADER_SIZE;
while !self.incoming_buffer.is_empty() &&
self.ack_nr + 1 == Int::from_be(self.incoming_buffer[0].header.seq_nr) {
let packet = self.incoming_buffer.shift().unwrap();
debug!("Removing packet from buffer: {}", packet);
for i in range(0u, packet.payload.len()) {
buf[read] = packet.payload[i];
read += 1;
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
Ok((read, src))
}
#[allow(missing_doc)]
#[deprecated = "renamed to `recv_from`"]
pub fn recvfrom(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
self.recv_from(buf)
}
fn prepare_reply(&self, original: &UtpPacketHeader, t: UtpPacketType) -> UtpPacket {
let mut resp = UtpPacket::new();
resp.set_type(t);
let self_t_micro: u32 = now_microseconds();
let other_t_micro: u32 = Int::from_be(original.timestamp_microseconds);
resp.header.timestamp_microseconds = self_t_micro.to_be();
resp.header.timestamp_difference_microseconds = (self_t_micro - other_t_micro).to_be();
resp.header.connection_id = self.sender_connection_id.to_be();
resp.header.seq_nr = self.seq_nr.to_be();
resp.header.ack_nr = self.ack_nr.to_be();
resp
}
/// Send data on socket to the given address. Returns nothing on success.
//
// # Implementation details
//
// This method inserts packets into the send buffer and keeps trying to
// advance the send window until an ACK corresponding to the last packet is
// received.
//
// Note that the buffer passed to `send_to` might exceed the maximum packet
// size, which will result in the data being split over several packets.
#[unstable]
pub fn send_to(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
use std::io::{IoError, Closed};
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
for chunk in buf.chunks(BUF_SIZE) {
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.payload = Vec::from_slice(chunk);
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.connection_id = self.sender_connection_id.to_be();
debug!("Pushing packet into send buffer: {}", packet);
self.send_buffer.push(packet.clone());
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
self.seq_nr += 1;
}
// Consume acknowledgements until latest packet
let mut buf = [0, ..BUF_SIZE];
while self.last_acked < self.seq_nr - 1 {
try!(self.recv_from(buf));
}
Ok(())
}
#[allow(missing_doc)]
#[deprecated = "renamed to `send_to`"]
pub fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
self.send_to(buf, dst)
}
/// Send fast resend request.
///
/// Sends three identical ACK/STATE packets to the remote host, signalling a
/// fast resend request.
fn send_fast_resend_request(&mut self) {
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.connection_id = self.sender_connection_id.to_be();
for _ in range(0u, 3) {
self.socket.send_to(packet.bytes().as_slice(), self.connected_to);
debug!("sent {}", packet.header);
}
}
/// Handle incoming packet, updating socket state accordingly.
///
/// Returns appropriate reply packet, if needed.
fn handle_packet(&mut self, packet: UtpPacket) -> Option<UtpPacket> {
// Reset connection if connection id doesn't match and this isn't a SYN
if packet.get_type() != ST_SYN &&
!(Int::from_be(packet.header.connection_id) == self.sender_connection_id ||
Int::from_be(packet.header.connection_id) == self.receiver_connection_id) {
return Some(self.prepare_reply(&packet.header, ST_RESET));
}
// Acknowledge only if the packet strictly follows the previous one
if self.ack_nr + 1 == Int::from_be(packet.header.seq_nr) {
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
match packet.header.get_type() {
ST_SYN => { // Respond with an ACK and populate own fields
// Update socket information for new connections
self.ack_nr = Int::from_be(packet.header.seq_nr);
self.seq_nr = random();
self.receiver_connection_id = Int::from_be(packet.header.connection_id) + 1;
self.sender_connection_id = Int::from_be(packet.header.connection_id);
self.state = CS_CONNECTED;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_DATA => Some(self.prepare_reply(&packet.header, ST_STATE)),
ST_FIN => {
self.state = CS_FIN_RECEIVED;
// TODO: check if no packets are missing
// If all packets are received
self.state = CS_EOF;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_STATE => {
let packet_rtt = Int::from_be(packet.header.timestamp_difference_microseconds) as int;
let delta = self.rtt - packet_rtt;
self.rtt_variance += (std::num::abs(delta) - self.rtt_variance) / 4;
self.rtt += (packet_rtt - self.rtt) / 8;
self.timeout = std::cmp::max(self.rtt + self.rtt_variance * 4, 500);
debug!("packet_rtt: {}", packet_rtt);
debug!("delta: {}", delta);
debug!("self.rtt_variance: {}", self.rtt_variance);
debug!("self.rtt: {}", self.rtt);
debug!("self.timeout: {}", self.timeout);
if packet.header.ack_nr == Int::from_be(self.last_acked) {
self.duplicate_ack_count += 1;
} else {
self.last_acked = Int::from_be(packet.header.ack_nr);
self.duplicate_ack_count = 1;
}
// Three duplicate ACKs, must resend packets since `ack_nr + 1`
if self.duplicate_ack_count == 3 {
assert!(!self.send_buffer.is_empty());
match self.send_buffer.iter().position(|pkt| Int::from_be(pkt.header.seq_nr) == Int::from_be(packet.header.ack_nr) + 1) {
None => fail!("Received request to resend packets since {} but none was found in send buffer!", Int::from_be(packet.header.ack_nr) + 1),
Some(position) => {
for _ in range(0u, position + 1) {
let to_send = self.send_buffer.shift().unwrap();
debug!("resending: {}", to_send);
self.socket.send_to(to_send.bytes().as_slice(), self.connected_to);
}
},
}
}
// Success, advance send window
while !self.send_buffer.is_empty() &&
Int::from_be(self.send_buffer[0].header.seq_nr) <= self.last_acked {
self.send_buffer.shift();
}
None
},
ST_RESET => { // TODO
self.state = CS_RST_RECEIVED;
None
},
}
}
/// Insert a packet into the socket's buffer.
///
/// The packet is inserted in such a way that the buffer is
/// ordered ascendingly by their sequence number. This allows
/// storing packets that were received out of order.
fn insert_into_buffer(&mut self, packet: UtpPacket) {
let mut i = 0;
for pkt in self.incoming_buffer.iter() {
if Int::from_be(pkt.header.seq_nr) >= Int::from_be(packet.header.seq_nr) {
break;
}
i += 1;
}
self.incoming_buffer.insert(i, packet);
}
}
impl Clone for UtpSocket {
fn clone(&self) -> UtpSocket {
UtpSocket {
socket: self.socket.clone(),
connected_to: self.connected_to,
receiver_connection_id: self.receiver_connection_id,
sender_connection_id: self.sender_connection_id,
seq_nr: self.seq_nr,
ack_nr: self.ack_nr,
state: self.state,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
rtt: 0,
rtt_variance: 0,
timeout: 500,
}
}
}
/// Stream interface for UtpSocket.
pub struct UtpStream {
socket: UtpSocket,
}
impl UtpStream {
/// Create a uTP stream listening on the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpStream> {
let socket = UtpSocket::bind(addr);
match socket {
Ok(s) => Ok(UtpStream { socket: s }),
Err(e) => Err(e),
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(dst: SocketAddr) -> IoResult<UtpStream> {
use std::io::net::ip::Ipv4Addr;
// Port 0 means the operating system gets to choose it
let my_addr = SocketAddr { ip: Ipv4Addr(127,0,0,1), port: 0 };
let socket = match UtpSocket::bind(my_addr) {
Ok(s) => s,
Err(e) => return Err(e),
};
match socket.connect(dst) {
Ok(socket) => Ok(UtpStream { socket: socket }),
Err(e) => Err(e),
}
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
self.socket.close()
}
}
impl Reader for UtpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
match self.socket.recv_from(buf) {
Ok((read, _src)) => Ok(read),
Err(e) => Err(e),
}
}
}
impl Writer for UtpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let dst = self.socket.connected_to;
self.socket.send_to(buf, dst)
}
}
#[cfg(test)]
mod test {
use super::{UtpSocket, UtpPacket};
use super::{ST_STATE, ST_FIN, ST_DATA, ST_RESET, ST_SYN};
use super::{BUF_SIZE, HEADER_SIZE};
use super::{CS_CONNECTED, CS_NEW, CS_CLOSED, CS_EOF};
use std::rand::random;
macro_rules! expect_eq(
($left:expr, $right:expr) => (
if !($left == $right) {
fail!("expected {}, got {}", $right, $left);
}
);
)
macro_rules! iotry(
($e:expr) => (match $e { Ok(e) => e, Err(e) => fail!("{}", e) })
)
#[test]
fn test_packet_decode() {
let buf = [0x21, 0x00, 0x41, 0xa8, 0x99, 0x2f, 0xd0, 0x2a, 0x9f, 0x4a,
0x26, 0x21, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x6c, 0x79];
let pkt = UtpPacket::decode(buf);
assert_eq!(pkt.header.get_version(), 1);
assert_eq!(pkt.header.get_type(), ST_STATE);
assert_eq!(pkt.header.extension, 0);
assert_eq!(Int::from_be(pkt.header.connection_id), 16808);
assert_eq!(Int::from_be(pkt.header.timestamp_microseconds), 2570047530);
assert_eq!(Int::from_be(pkt.header.timestamp_difference_microseconds), 2672436769);
assert_eq!(Int::from_be(pkt.header.wnd_size), ::std::num::pow(2u32, 20));
assert_eq!(Int::from_be(pkt.header.seq_nr), 15090);
assert_eq!(Int::from_be(pkt.header.ack_nr), 27769);
assert_eq!(pkt.len(), buf.len());
assert!(pkt.payload.is_empty());
}
#[test]
fn test_packet_encode() {
let payload = Vec::from_slice("Hello\n".as_bytes());
let (timestamp, timestamp_diff): (u32, u32) = (15270793, 1707040186);
let (connection_id, seq_nr, ack_nr): (u16, u16, u16) = (16808, 15090, 17096);
let window_size: u32 = 1048576;
let mut pkt = UtpPacket::new();
pkt.set_type(ST_DATA);
pkt.header.timestamp_microseconds = timestamp.to_be();
pkt.header.timestamp_difference_microseconds = timestamp_diff.to_be();
pkt.header.connection_id = connection_id.to_be();
pkt.header.seq_nr = seq_nr.to_be();
pkt.header.ack_nr = ack_nr.to_be();
pkt.header.wnd_size = window_size.to_be();
pkt.payload = payload.clone();
let header = pkt.header;
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(pkt.len(), buf.len());
assert_eq!(pkt.len(), HEADER_SIZE + payload.len());
assert_eq!(pkt.payload, payload);
assert_eq!(header.get_version(), 1);
assert_eq!(header.get_type(), ST_DATA);
assert_eq!(header.extension, 0);
assert_eq!(Int::from_be(header.connection_id), connection_id);
assert_eq!(Int::from_be(header.seq_nr), seq_nr);
assert_eq!(Int::from_be(header.ack_nr), ack_nr);
assert_eq!(Int::from_be(header.wnd_size), window_size);
assert_eq!(Int::from_be(header.timestamp_microseconds), timestamp);
assert_eq!(Int::from_be(header.timestamp_difference_microseconds), timestamp_diff);
assert_eq!(pkt.bytes(), Vec::from_slice(buf));
}
#[test]
fn test_reversible() {
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(UtpPacket::decode(buf).bytes().as_slice(), buf);
}
#[test]
fn test_socket_ipv4() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.connected_to, serverAddr);
drop(client);
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert_eq!(server.connected_to, clientAddr);
assert!(server.state == CS_CONNECTED);
drop(server);
}
#[test]
fn test_recvfrom_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::{Closed, EndOfFile};
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.close(), Ok(()));
drop(client);
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
assert!(server.state == CS_CONNECTED);
// Closing the connection is fine
match server.recv_from(buf) {
Err(e) => fail!("{}", e),
_ => {},
}
expect_eq!(server.state, CS_EOF);
// Trying to listen on the socket after closing it raises an
// EOF error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, EndOfFile),
v => fail!("expected {}, got {}", EndOfFile, v),
}
expect_eq!(server.state, CS_CLOSED);
// Trying again raises a Closed error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_sendto_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::Closed;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut buf = [0u8, ..BUF_SIZE];
let mut client = client;
iotry!(client.recv_from(buf));
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let (_read, _src) = iotry!(server.recv_from(buf));
assert!(server.state == CS_CONNECTED);
iotry!(server.close());
expect_eq!(server.state, CS_CLOSED);
// Trying to send to the socket after closing it raises an
// error
match server.send_to(buf, clientAddr) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_acks_on_socket() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let (tx, rx) = channel();
let client = iotry!(UtpSocket::bind(clientAddr));
let server = iotry!(UtpSocket::bind(serverAddr));
spawn(proc() {
// Make the server listen for incoming connections
let mut server = server;
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
tx.send(server.seq_nr);
// Close the connection
iotry!(server.recv_from(buf));
drop(server);
});
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let sender_seq_nr = rx.recv();
let ack_nr = client.ack_nr;
assert!(ack_nr != 0);
assert!(ack_nr == sender_seq_nr);
assert_eq!(client.close(), Ok(()));
// The reply to both connect (SYN) and close (FIN) should be
// STATE packets, which don't increase the sequence number
// and, hence, the receiver's acknowledgement number.
assert!(client.ack_nr == ack_nr);
drop(client);
}
#[test]
fn test_handle_packet() {
use std::io::test::next_test_ip4;
//fn test_connection_setup() {
let initial_connection_id: u16 = random();
let sender_connection_id = initial_connection_id + 1;
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let sent = packet.header;
// Do we have a response?
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
// Is is of the correct type?
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Same connection id on both ends during connection establishment
assert!(response.header.connection_id == sent.connection_id);
// Response acknowledges SYN
assert!(response.header.ack_nr == sent.seq_nr);
// No payload?
assert!(response.payload.is_empty());
//}
// ---------------------------------
// fn test_connection_usage() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Sender (i.e., who initated connection and sent SYN) has connection id
// equal to initial connection id + 1
// Receiver (i.e., who accepted connection) has connection id equal to
// initial connection id
assert!(Int::from_be(response.header.connection_id) == initial_connection_id);
assert!(Int::from_be(response.header.connection_id) == Int::from_be(sent.connection_id) - 1);
// Previous packets should be ack'ed
assert!(Int::from_be(response.header.ack_nr) == Int::from_be(sent.seq_nr));
// Responses with no payload should not increase the sequence number
assert!(response.payload.is_empty());
assert!(Int::from_be(response.header.seq_nr) == Int::from_be(old_response.header.seq_nr));
// }
//fn test_connection_teardown() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_FIN);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet);
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// FIN packets have no payload but the sequence number shouldn't increase
assert!(Int::from_be(sent.seq_nr) == Int::from_be(old_packet.header.seq_nr) + 1);
// Nor should the ACK packet's sequence number
assert!(response.header.seq_nr == old_response.header.seq_nr);
// FIN should be acknowledged
assert!(response.header.ack_nr == sent.seq_nr);
//}
}
#[test]
fn test_response_to_keepalive_ack() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
// Send a second keepalive packet, identical to the previous one
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
}
#[test]
fn test_response_to_wrong_connection_id() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
assert!(response.unwrap().get_type() == ST_STATE);
// Now, disrupt connection with a packet with an incorrect connection id
let new_connection_id = initial_connection_id.to_le();
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = new_connection_id;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_RESET);
assert!(response.header.ack_nr == packet.header.seq_nr);
}
#[test]
fn test_utp_stream() {
use super::UtpStream;
use std::io::test::next_test_ip4;
let serverAddr = next_test_ip4();
let mut server = iotry!(UtpStream::bind(serverAddr));
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.close());
});
iotry!(server.read_to_end());
}
#[test]
fn test_utp_stream_small_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_large_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Has to be sent over several packets
static len: uint = 1024 * 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_successive_reads() {
use super::UtpStream;
use std::io::test::next_test_ip4;
use std::io::Closed;
static len: uint = 1024;
let data: Vec<u8> = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
iotry!(server.read_to_end());
let mut buf = [0u8, ..4096];
match server.read(buf) {
Err(ref e) if e.kind == Closed => {},
_ => fail!("should have failed with Closed"),
};
}
#[test]
fn test_unordered_packets() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
let mut window: Vec<UtpPacket> = Vec::new();
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(1,2,3);
window.push(packet);
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 2).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(4,5,6);
window.push(packet);
// Send packets in reverse order
let response = socket.handle_packet(window[1].clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.header.ack_nr != window[1].header.seq_nr);
let response = socket.handle_packet(window[0].clone());
assert!(response.is_some());
}
#[test]
fn test_socket_unordered_packets() {
use std::io::test::next_test_ip4;
use super::UtpStream;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut s = client.socket;
let mut window: Vec<UtpPacket> = Vec::new();
let mut i = 0;
for data in Vec::from_fn(12, |idx| idx as u8 + 1).as_slice().chunks(3) {
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + i).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
packet.payload = Vec::from_slice(data);
window.push(packet);
i += 1;
}
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_FIN);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + 2).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
window.push(packet);
iotry!(s.send_to(window[3].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[2].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[1].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[0].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[4].bytes().as_slice(), serverAddr));
for _ in range(0u, 2) {
let mut buf = [0, ..BUF_SIZE];
iotry!(s.recv_from(buf));
}
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert!(server.state == CS_CONNECTED);
let mut stream = UtpStream { socket: server };
let expected: Vec<u8> = Vec::from_fn(12, |idx| idx as u8 + 1);
match stream.read_to_end() {
Ok(data) => {
expect_eq!(data.len(), expected.len());
expect_eq!(data, expected);
},
Err(e) => fail!("{}", e),
}
}
#[test]
fn test_socket_should_not_buffer_syn_packets() {
use std::io::test::next_test_ip4;
use std::io::net::udp::UdpSocket;
use super::UtpSocket;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UdpSocket::bind(clientAddr));
let test_syn_raw = [0x41, 0x00, 0x41, 0xa7, 0x00, 0x00, 0x00,
0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x3a,
0xf1, 0x00, 0x00];
let test_syn_pkt = UtpPacket::decode(test_syn_raw);
let seq_nr = Int::from_be(test_syn_pkt.header.seq_nr);
spawn(proc() {
let mut client = client;
iotry!(client.send_to(test_syn_raw, serverAddr));
client.set_timeout(Some(10));
let mut buf = [0, ..BUF_SIZE];
let packet = match client.recv_from(buf) {
Ok((nread, _src)) => UtpPacket::decode(buf.slice_to(nread)),
Err(e) => fail!("{}", e),
};
expect_eq!(packet.header.ack_nr, seq_nr.to_be());
drop(client);
});
let mut server = server;
let mut buf = [0, ..20];
iotry!(server.recv_from(buf));
assert!(server.ack_nr != 0);
expect_eq!(server.ack_nr, seq_nr);
assert!(server.incoming_buffer.is_empty());
}
#[test]
fn test_response_to_triple_ack() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let mut server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UtpSocket::bind(clientAddr));
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
let d = data.clone();
expect_eq!(len, data.len());
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
iotry!(client.send_to(d.as_slice(), serverAddr));
iotry!(client.close());
});
let mut buf = [0, ..BUF_SIZE];
// Expect SYN
iotry!(server.recv_from(buf));
// Receive data
let mut data_packet;
match server.socket.recv_from(buf) {
Ok((read, _src)) => {
data_packet = UtpPacket::decode(buf.slice_to(read));
assert!(data_packet.get_type() == ST_DATA);
expect_eq!(data_packet.payload, data);
assert_eq!(data_packet.payload.len(), data.len());
},
Err(e) => fail!("{}", e),
}
let data_packet = data_packet;
// Send triple ACK
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.seq_nr = server.seq_nr.to_be();
packet.header.ack_nr = (Int::from_be(data_packet.header.seq_nr) - 1).to_be();
packet.header.connection_id = server.sender_connection_id.to_be();
for _ in range(0u, 3) {
iotry!(server.socket.send_to(packet.bytes().as_slice(), clientAddr));
}
// Receive data again and check that it's the same we reported as missing
match server.socket.recv_from(buf) {
Ok((0, _)) => fail!("Received 0 bytes from socket"),
Ok((read, _src)) => {
let packet = UtpPacket::decode(buf.slice_to(read));
assert_eq!(packet.get_type(), ST_DATA);
assert_eq!(Int::from_be(packet.header.seq_nr), Int::from_be(data_packet.header.seq_nr));
assert!(packet.payload == data_packet.payload);
let response = server.handle_packet(packet).unwrap();
iotry!(server.socket.send_to(response.bytes().as_slice(), server.connected_to));
},
Err(e) => fail!("{}", e),
}
// Receive close
iotry!(server.recv_from(buf));
}
}
Add test for timeout request.
//! Implementation of a Micro Transport Protocol library.
//!
//! http://www.bittorrent.org/beps/bep_0029.html
//!
//! TODO
//! ----
//!
//! - congestion control
//! - proper connection closing
//! - automatically send FIN (or should it be RST?) on `drop` if not already closed
//! - setters and getters that hide header field endianness conversion
//! - SACK extension
//! - handle packet loss
#![crate_name = "utp"]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![feature(macro_rules, phase)]
#![deny(missing_doc)]
extern crate time;
#[phase(plugin, link)] extern crate log;
use std::io::net::udp::UdpSocket;
use std::io::net::ip::SocketAddr;
use std::io::IoResult;
use std::mem::transmute;
use std::rand::random;
use std::fmt;
static HEADER_SIZE: uint = 20;
// For simplicity's sake, let us assume no packet will ever exceed the
// Ethernet maximum transfer unit of 1500 bytes.
static BUF_SIZE: uint = 1500;
macro_rules! u8_to_unsigned_be(
($src:ident[$start:expr..$end:expr] -> $t:ty) => ({
let mut result: $t = 0;
for i in range(0u, $end-$start+1).rev() {
result = result | $src[$start+i] as $t << i*8;
}
result
})
)
/// Return current time in microseconds since the UNIX epoch.
fn now_microseconds() -> u32 {
let t = time::get_time();
(t.sec * 1_000_000) as u32 + (t.nsec/1000) as u32
}
#[allow(dead_code,non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpPacketType {
ST_DATA = 0,
ST_FIN = 1,
ST_STATE = 2,
ST_RESET = 3,
ST_SYN = 4,
}
#[allow(dead_code)]
#[deriving(Clone)]
#[packed]
struct UtpPacketHeader {
type_ver: u8, // type: u4, ver: u4
extension: u8,
connection_id: u16,
timestamp_microseconds: u32,
timestamp_difference_microseconds: u32,
wnd_size: u32,
seq_nr: u16,
ack_nr: u16,
}
impl UtpPacketHeader {
/// Set type of packet to the specified type.
fn set_type(&mut self, t: UtpPacketType) {
let version = 0x0F & self.type_ver;
self.type_ver = t as u8 << 4 | version;
}
fn get_type(&self) -> UtpPacketType {
let t: UtpPacketType = unsafe { transmute(self.type_ver >> 4) };
t
}
fn get_version(&self) -> u8 {
self.type_ver & 0x0F
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacketHeader {
UtpPacketHeader {
wnd_size: new_wnd_size.to_be(),
.. self.clone()
}
}
/// Return packet header as a slice of bytes.
fn bytes(&self) -> &[u8] {
let buf: &[u8, ..HEADER_SIZE] = unsafe { transmute(self) };
return buf.as_slice();
}
fn len(&self) -> uint {
return HEADER_SIZE;
}
/// Read byte buffer and return corresponding packet header.
/// It assumes the fields are in network (big-endian) byte order,
/// preserving it.
fn decode(buf: &[u8]) -> UtpPacketHeader {
UtpPacketHeader {
type_ver: buf[0],
extension: buf[1],
connection_id: u8_to_unsigned_be!(buf[2..3] -> u16),
timestamp_microseconds: u8_to_unsigned_be!(buf[4..7] -> u32),
timestamp_difference_microseconds: u8_to_unsigned_be!(buf[8..11] -> u32),
wnd_size: u8_to_unsigned_be!(buf[12..15] -> u32),
seq_nr: u8_to_unsigned_be!(buf[16..17] -> u16),
ack_nr: u8_to_unsigned_be!(buf[18..19] -> u16),
}
}
}
impl fmt::Show for UtpPacketHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(type: {}, version: {}, extension: {}, \
connection_id: {}, timestamp_microseconds: {}, \
timestamp_difference_microseconds: {}, wnd_size: {}, \
seq_nr: {}, ack_nr: {})",
self.get_type(),
Int::from_be(self.get_version()),
Int::from_be(self.extension),
Int::from_be(self.connection_id),
Int::from_be(self.timestamp_microseconds),
Int::from_be(self.timestamp_difference_microseconds),
Int::from_be(self.wnd_size),
Int::from_be(self.seq_nr),
Int::from_be(self.ack_nr),
)
}
}
#[allow(dead_code)]
struct UtpPacket {
header: UtpPacketHeader,
payload: Vec<u8>,
}
impl UtpPacket {
/// Construct a new, empty packet.
fn new() -> UtpPacket {
UtpPacket {
header: UtpPacketHeader {
type_ver: ST_DATA as u8 << 4 | 1,
extension: 0,
connection_id: 0,
timestamp_microseconds: 0,
timestamp_difference_microseconds: 0,
wnd_size: 0,
seq_nr: 0,
ack_nr: 0,
},
payload: Vec::new(),
}
}
fn set_type(&mut self, t: UtpPacketType) {
self.header.set_type(t);
}
// TODO: Read up on pointers and ownership
fn get_type(&self) -> UtpPacketType {
self.header.get_type()
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacket {
UtpPacket {
header: self.header.wnd_size(new_wnd_size),
payload: self.payload.clone(),
}
}
/// TODO: return slice
fn bytes(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(self.len());
buf.push_all(self.header.bytes());
buf.push_all(self.payload.as_slice());
return buf;
}
fn len(&self) -> uint {
self.header.len() + self.payload.len()
}
/// Decode a byte slice and construct the equivalent UtpPacket.
///
/// Note that this method makes no attempt to guess the payload size, saving
/// all except the initial 20 bytes corresponding to the header as payload.
/// It's the caller's responsability to use an appropriately sized buffer.
fn decode(buf: &[u8]) -> UtpPacket {
UtpPacket {
header: UtpPacketHeader::decode(buf),
payload: Vec::from_slice(buf.slice(HEADER_SIZE, buf.len()))
}
}
}
impl Clone for UtpPacket {
fn clone(&self) -> UtpPacket {
UtpPacket {
header: self.header,
payload: self.payload.clone(),
}
}
}
impl fmt::Show for UtpPacket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.header.fmt(f)
}
}
#[allow(non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpSocketState {
CS_NEW,
CS_CONNECTED,
CS_SYN_SENT,
CS_FIN_RECEIVED,
CS_FIN_SENT,
CS_RST_RECEIVED,
CS_CLOSED,
CS_EOF,
}
/// A uTP (Micro Transport Protocol) socket.
pub struct UtpSocket {
socket: UdpSocket,
connected_to: SocketAddr,
sender_connection_id: u16,
receiver_connection_id: u16,
seq_nr: u16,
ack_nr: u16,
state: UtpSocketState,
// Received but not acknowledged packets
incoming_buffer: Vec<UtpPacket>,
// Sent but not yet acknowledged packets
send_buffer: Vec<UtpPacket>,
duplicate_ack_count: uint,
last_acked: u16,
rtt: int,
rtt_variance: int,
timeout: int,
}
macro_rules! reply_with_ack(
($header:expr, $src:expr) => ({
let resp = self.prepare_reply($header, ST_STATE).wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(resp.bytes().as_slice(), $src));
debug!("sent {}", resp.header);
})
)
impl UtpSocket {
/// Create a UTP socket from the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpSocket> {
let skt = UdpSocket::bind(addr);
let connection_id = random::<u16>();
match skt {
Ok(x) => Ok(UtpSocket {
socket: x,
connected_to: addr,
receiver_connection_id: connection_id,
sender_connection_id: connection_id + 1,
seq_nr: 1,
ack_nr: 0,
state: CS_NEW,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
rtt: 0,
rtt_variance: 0,
timeout: 1000,
}),
Err(e) => Err(e)
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(mut self, other: SocketAddr) -> IoResult<UtpSocket> {
use std::io::{IoError, ConnectionFailed};
self.connected_to = other;
assert_eq!(self.receiver_connection_id + 1, self.sender_connection_id);
let mut packet = UtpPacket::new();
packet.set_type(ST_SYN);
packet.header.connection_id = self.receiver_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
// Send packet
let dst = self.connected_to;
let _result = self.socket.send_to(packet.bytes().as_slice(), dst);
debug!("sent {}", packet.header);
self.state = CS_SYN_SENT;
let mut buf = [0, ..BUF_SIZE];
let (_len, addr) = match self.socket.recv_from(buf) {
Ok(v) => v,
Err(e) => fail!("{}", e),
};
assert!(_len == HEADER_SIZE);
assert!(addr == self.connected_to);
let packet = UtpPacket::decode(buf.slice_to(_len));
if packet.get_type() != ST_STATE {
return Err(IoError {
kind: ConnectionFailed,
desc: "The remote peer sent an incorrect reply",
detail: None,
});
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
debug!("connected to: {} {}", addr, self.connected_to);
self.state = CS_CONNECTED;
self.seq_nr += 1;
Ok(self)
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
let mut packet = UtpPacket::new();
packet.header.connection_id = self.sender_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.set_type(ST_FIN);
// Send FIN
let dst = self.connected_to;
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
debug!("sent {}", packet);
self.state = CS_FIN_SENT;
// Receive JAKE
let mut buf = [0u8, ..BUF_SIZE];
try!(self.socket.recv_from(buf));
let resp = UtpPacket::decode(buf);
debug!("received {}", resp);
assert!(resp.get_type() == ST_STATE);
// Set socket state
self.state = CS_CLOSED;
Ok(())
}
/// Receive data from socket.
///
/// On success, returns the number of bytes read and the sender's address.
/// Returns CS_EOF after receiving a FIN packet when the remaining
/// inflight packets are consumed. Subsequent calls return CS_CLOSED.
#[unstable]
pub fn recv_from(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
use std::cmp::min;
use std::io::{IoError, EndOfFile, Closed, TimedOut};
if self.state == CS_EOF {
self.state = CS_CLOSED;
return Err(IoError {
kind: EndOfFile,
desc: "End of file reached",
detail: None,
});
}
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
let mut b = [0, ..BUF_SIZE + HEADER_SIZE];
debug!("setting read timeout of {} ms", self.timeout);
self.socket.set_read_timeout(Some(self.timeout as u64));
let (read, src) = match self.socket.recv_from(b) {
Err(ref e) if e.kind == TimedOut => {
debug!("recv_from timed out");
self.timeout = self.timeout * 2;
self.send_fast_resend_request();
return Ok((0, self.connected_to));
},
Ok(x) => x,
Err(e) => return Err(e),
};
let packet = UtpPacket::decode(b.slice_to(read));
debug!("received {}", packet.header);
if packet.get_type() == ST_RESET {
use std::io::{IoError, ConnectionReset};
return Err(IoError {
kind: ConnectionReset,
desc: "Remote host aborted connection (incorrect connection id)",
detail: None,
});
}
// TODO: move this to handle_packet?
if packet.get_type() == ST_SYN {
self.connected_to = src;
}
// Check if the packet is out of order (that is, it's sequence number
// does not immediately follow the ACK number)
if packet.get_type() != ST_STATE && packet.get_type() != ST_SYN
&& self.ack_nr + 1 < Int::from_be(packet.header.seq_nr) {
debug!("current ack_nr ({}) is behind received packet seq_nr ({})",
self.ack_nr, Int::from_be(packet.header.seq_nr));
// Add to buffer but do not acknowledge until all packets between
// ack_nr + 1 and curr_packet.seq_nr - 1 are received
self.insert_into_buffer(packet);
return Ok((0, self.connected_to));
}
match self.handle_packet(packet.clone()) {
Some(pkt) => {
let pkt = pkt.wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(pkt.bytes().as_slice(), src));
debug!("sent {}", pkt.header);
},
None => {}
};
for i in range(0u, min(buf.len(), read - HEADER_SIZE)) {
buf[i] = b[i + HEADER_SIZE];
}
// Empty buffer if possible
let mut read = read - HEADER_SIZE;
while !self.incoming_buffer.is_empty() &&
self.ack_nr + 1 == Int::from_be(self.incoming_buffer[0].header.seq_nr) {
let packet = self.incoming_buffer.shift().unwrap();
debug!("Removing packet from buffer: {}", packet);
for i in range(0u, packet.payload.len()) {
buf[read] = packet.payload[i];
read += 1;
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
Ok((read, src))
}
#[allow(missing_doc)]
#[deprecated = "renamed to `recv_from`"]
pub fn recvfrom(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
self.recv_from(buf)
}
fn prepare_reply(&self, original: &UtpPacketHeader, t: UtpPacketType) -> UtpPacket {
let mut resp = UtpPacket::new();
resp.set_type(t);
let self_t_micro: u32 = now_microseconds();
let other_t_micro: u32 = Int::from_be(original.timestamp_microseconds);
resp.header.timestamp_microseconds = self_t_micro.to_be();
resp.header.timestamp_difference_microseconds = (self_t_micro - other_t_micro).to_be();
resp.header.connection_id = self.sender_connection_id.to_be();
resp.header.seq_nr = self.seq_nr.to_be();
resp.header.ack_nr = self.ack_nr.to_be();
resp
}
/// Send data on socket to the given address. Returns nothing on success.
//
// # Implementation details
//
// This method inserts packets into the send buffer and keeps trying to
// advance the send window until an ACK corresponding to the last packet is
// received.
//
// Note that the buffer passed to `send_to` might exceed the maximum packet
// size, which will result in the data being split over several packets.
#[unstable]
pub fn send_to(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
use std::io::{IoError, Closed};
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
for chunk in buf.chunks(BUF_SIZE) {
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.payload = Vec::from_slice(chunk);
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.connection_id = self.sender_connection_id.to_be();
debug!("Pushing packet into send buffer: {}", packet);
self.send_buffer.push(packet.clone());
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
self.seq_nr += 1;
}
// Consume acknowledgements until latest packet
let mut buf = [0, ..BUF_SIZE];
while self.last_acked < self.seq_nr - 1 {
try!(self.recv_from(buf));
}
Ok(())
}
#[allow(missing_doc)]
#[deprecated = "renamed to `send_to`"]
pub fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
self.send_to(buf, dst)
}
/// Send fast resend request.
///
/// Sends three identical ACK/STATE packets to the remote host, signalling a
/// fast resend request.
fn send_fast_resend_request(&mut self) {
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.connection_id = self.sender_connection_id.to_be();
for _ in range(0u, 3) {
self.socket.send_to(packet.bytes().as_slice(), self.connected_to);
debug!("sent {}", packet.header);
}
}
/// Handle incoming packet, updating socket state accordingly.
///
/// Returns appropriate reply packet, if needed.
fn handle_packet(&mut self, packet: UtpPacket) -> Option<UtpPacket> {
// Reset connection if connection id doesn't match and this isn't a SYN
if packet.get_type() != ST_SYN &&
!(Int::from_be(packet.header.connection_id) == self.sender_connection_id ||
Int::from_be(packet.header.connection_id) == self.receiver_connection_id) {
return Some(self.prepare_reply(&packet.header, ST_RESET));
}
// Acknowledge only if the packet strictly follows the previous one
if self.ack_nr + 1 == Int::from_be(packet.header.seq_nr) {
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
match packet.header.get_type() {
ST_SYN => { // Respond with an ACK and populate own fields
// Update socket information for new connections
self.ack_nr = Int::from_be(packet.header.seq_nr);
self.seq_nr = random();
self.receiver_connection_id = Int::from_be(packet.header.connection_id) + 1;
self.sender_connection_id = Int::from_be(packet.header.connection_id);
self.state = CS_CONNECTED;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_DATA => Some(self.prepare_reply(&packet.header, ST_STATE)),
ST_FIN => {
self.state = CS_FIN_RECEIVED;
// TODO: check if no packets are missing
// If all packets are received
self.state = CS_EOF;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_STATE => {
let packet_rtt = Int::from_be(packet.header.timestamp_difference_microseconds) as int;
let delta = self.rtt - packet_rtt;
self.rtt_variance += (std::num::abs(delta) - self.rtt_variance) / 4;
self.rtt += (packet_rtt - self.rtt) / 8;
self.timeout = std::cmp::max(self.rtt + self.rtt_variance * 4, 500);
debug!("packet_rtt: {}", packet_rtt);
debug!("delta: {}", delta);
debug!("self.rtt_variance: {}", self.rtt_variance);
debug!("self.rtt: {}", self.rtt);
debug!("self.timeout: {}", self.timeout);
if packet.header.ack_nr == Int::from_be(self.last_acked) {
self.duplicate_ack_count += 1;
} else {
self.last_acked = Int::from_be(packet.header.ack_nr);
self.duplicate_ack_count = 1;
}
// Three duplicate ACKs, must resend packets since `ack_nr + 1`
if self.duplicate_ack_count == 3 {
assert!(!self.send_buffer.is_empty());
match self.send_buffer.iter().position(|pkt| Int::from_be(pkt.header.seq_nr) == Int::from_be(packet.header.ack_nr) + 1) {
None => fail!("Received request to resend packets since {} but none was found in send buffer!", Int::from_be(packet.header.ack_nr) + 1),
Some(position) => {
for _ in range(0u, position + 1) {
let to_send = self.send_buffer.shift().unwrap();
debug!("resending: {}", to_send);
self.socket.send_to(to_send.bytes().as_slice(), self.connected_to);
}
},
}
}
// Success, advance send window
while !self.send_buffer.is_empty() &&
Int::from_be(self.send_buffer[0].header.seq_nr) <= self.last_acked {
self.send_buffer.shift();
}
None
},
ST_RESET => { // TODO
self.state = CS_RST_RECEIVED;
None
},
}
}
/// Insert a packet into the socket's buffer.
///
/// The packet is inserted in such a way that the buffer is
/// ordered ascendingly by their sequence number. This allows
/// storing packets that were received out of order.
fn insert_into_buffer(&mut self, packet: UtpPacket) {
let mut i = 0;
for pkt in self.incoming_buffer.iter() {
if Int::from_be(pkt.header.seq_nr) >= Int::from_be(packet.header.seq_nr) {
break;
}
i += 1;
}
self.incoming_buffer.insert(i, packet);
}
}
impl Clone for UtpSocket {
fn clone(&self) -> UtpSocket {
UtpSocket {
socket: self.socket.clone(),
connected_to: self.connected_to,
receiver_connection_id: self.receiver_connection_id,
sender_connection_id: self.sender_connection_id,
seq_nr: self.seq_nr,
ack_nr: self.ack_nr,
state: self.state,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
rtt: 0,
rtt_variance: 0,
timeout: 500,
}
}
}
/// Stream interface for UtpSocket.
pub struct UtpStream {
socket: UtpSocket,
}
impl UtpStream {
/// Create a uTP stream listening on the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpStream> {
let socket = UtpSocket::bind(addr);
match socket {
Ok(s) => Ok(UtpStream { socket: s }),
Err(e) => Err(e),
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(dst: SocketAddr) -> IoResult<UtpStream> {
use std::io::net::ip::Ipv4Addr;
// Port 0 means the operating system gets to choose it
let my_addr = SocketAddr { ip: Ipv4Addr(127,0,0,1), port: 0 };
let socket = match UtpSocket::bind(my_addr) {
Ok(s) => s,
Err(e) => return Err(e),
};
match socket.connect(dst) {
Ok(socket) => Ok(UtpStream { socket: socket }),
Err(e) => Err(e),
}
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
self.socket.close()
}
}
impl Reader for UtpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
match self.socket.recv_from(buf) {
Ok((read, _src)) => Ok(read),
Err(e) => Err(e),
}
}
}
impl Writer for UtpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let dst = self.socket.connected_to;
self.socket.send_to(buf, dst)
}
}
#[cfg(test)]
mod test {
use super::{UtpSocket, UtpPacket};
use super::{ST_STATE, ST_FIN, ST_DATA, ST_RESET, ST_SYN};
use super::{BUF_SIZE, HEADER_SIZE};
use super::{CS_CONNECTED, CS_NEW, CS_CLOSED, CS_EOF};
use std::rand::random;
macro_rules! expect_eq(
($left:expr, $right:expr) => (
if !($left == $right) {
fail!("expected {}, got {}", $right, $left);
}
);
)
macro_rules! iotry(
($e:expr) => (match $e { Ok(e) => e, Err(e) => fail!("{}", e) })
)
#[test]
fn test_packet_decode() {
let buf = [0x21, 0x00, 0x41, 0xa8, 0x99, 0x2f, 0xd0, 0x2a, 0x9f, 0x4a,
0x26, 0x21, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x6c, 0x79];
let pkt = UtpPacket::decode(buf);
assert_eq!(pkt.header.get_version(), 1);
assert_eq!(pkt.header.get_type(), ST_STATE);
assert_eq!(pkt.header.extension, 0);
assert_eq!(Int::from_be(pkt.header.connection_id), 16808);
assert_eq!(Int::from_be(pkt.header.timestamp_microseconds), 2570047530);
assert_eq!(Int::from_be(pkt.header.timestamp_difference_microseconds), 2672436769);
assert_eq!(Int::from_be(pkt.header.wnd_size), ::std::num::pow(2u32, 20));
assert_eq!(Int::from_be(pkt.header.seq_nr), 15090);
assert_eq!(Int::from_be(pkt.header.ack_nr), 27769);
assert_eq!(pkt.len(), buf.len());
assert!(pkt.payload.is_empty());
}
#[test]
fn test_packet_encode() {
let payload = Vec::from_slice("Hello\n".as_bytes());
let (timestamp, timestamp_diff): (u32, u32) = (15270793, 1707040186);
let (connection_id, seq_nr, ack_nr): (u16, u16, u16) = (16808, 15090, 17096);
let window_size: u32 = 1048576;
let mut pkt = UtpPacket::new();
pkt.set_type(ST_DATA);
pkt.header.timestamp_microseconds = timestamp.to_be();
pkt.header.timestamp_difference_microseconds = timestamp_diff.to_be();
pkt.header.connection_id = connection_id.to_be();
pkt.header.seq_nr = seq_nr.to_be();
pkt.header.ack_nr = ack_nr.to_be();
pkt.header.wnd_size = window_size.to_be();
pkt.payload = payload.clone();
let header = pkt.header;
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(pkt.len(), buf.len());
assert_eq!(pkt.len(), HEADER_SIZE + payload.len());
assert_eq!(pkt.payload, payload);
assert_eq!(header.get_version(), 1);
assert_eq!(header.get_type(), ST_DATA);
assert_eq!(header.extension, 0);
assert_eq!(Int::from_be(header.connection_id), connection_id);
assert_eq!(Int::from_be(header.seq_nr), seq_nr);
assert_eq!(Int::from_be(header.ack_nr), ack_nr);
assert_eq!(Int::from_be(header.wnd_size), window_size);
assert_eq!(Int::from_be(header.timestamp_microseconds), timestamp);
assert_eq!(Int::from_be(header.timestamp_difference_microseconds), timestamp_diff);
assert_eq!(pkt.bytes(), Vec::from_slice(buf));
}
#[test]
fn test_reversible() {
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(UtpPacket::decode(buf).bytes().as_slice(), buf);
}
#[test]
fn test_socket_ipv4() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.connected_to, serverAddr);
drop(client);
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert_eq!(server.connected_to, clientAddr);
assert!(server.state == CS_CONNECTED);
drop(server);
}
#[test]
fn test_recvfrom_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::{Closed, EndOfFile};
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.close(), Ok(()));
drop(client);
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
assert!(server.state == CS_CONNECTED);
// Closing the connection is fine
match server.recv_from(buf) {
Err(e) => fail!("{}", e),
_ => {},
}
expect_eq!(server.state, CS_EOF);
// Trying to listen on the socket after closing it raises an
// EOF error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, EndOfFile),
v => fail!("expected {}, got {}", EndOfFile, v),
}
expect_eq!(server.state, CS_CLOSED);
// Trying again raises a Closed error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_sendto_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::Closed;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut buf = [0u8, ..BUF_SIZE];
let mut client = client;
iotry!(client.recv_from(buf));
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let (_read, _src) = iotry!(server.recv_from(buf));
assert!(server.state == CS_CONNECTED);
iotry!(server.close());
expect_eq!(server.state, CS_CLOSED);
// Trying to send to the socket after closing it raises an
// error
match server.send_to(buf, clientAddr) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_acks_on_socket() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let (tx, rx) = channel();
let client = iotry!(UtpSocket::bind(clientAddr));
let server = iotry!(UtpSocket::bind(serverAddr));
spawn(proc() {
// Make the server listen for incoming connections
let mut server = server;
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
tx.send(server.seq_nr);
// Close the connection
iotry!(server.recv_from(buf));
drop(server);
});
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let sender_seq_nr = rx.recv();
let ack_nr = client.ack_nr;
assert!(ack_nr != 0);
assert!(ack_nr == sender_seq_nr);
assert_eq!(client.close(), Ok(()));
// The reply to both connect (SYN) and close (FIN) should be
// STATE packets, which don't increase the sequence number
// and, hence, the receiver's acknowledgement number.
assert!(client.ack_nr == ack_nr);
drop(client);
}
#[test]
fn test_handle_packet() {
use std::io::test::next_test_ip4;
//fn test_connection_setup() {
let initial_connection_id: u16 = random();
let sender_connection_id = initial_connection_id + 1;
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let sent = packet.header;
// Do we have a response?
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
// Is is of the correct type?
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Same connection id on both ends during connection establishment
assert!(response.header.connection_id == sent.connection_id);
// Response acknowledges SYN
assert!(response.header.ack_nr == sent.seq_nr);
// No payload?
assert!(response.payload.is_empty());
//}
// ---------------------------------
// fn test_connection_usage() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Sender (i.e., who initated connection and sent SYN) has connection id
// equal to initial connection id + 1
// Receiver (i.e., who accepted connection) has connection id equal to
// initial connection id
assert!(Int::from_be(response.header.connection_id) == initial_connection_id);
assert!(Int::from_be(response.header.connection_id) == Int::from_be(sent.connection_id) - 1);
// Previous packets should be ack'ed
assert!(Int::from_be(response.header.ack_nr) == Int::from_be(sent.seq_nr));
// Responses with no payload should not increase the sequence number
assert!(response.payload.is_empty());
assert!(Int::from_be(response.header.seq_nr) == Int::from_be(old_response.header.seq_nr));
// }
//fn test_connection_teardown() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_FIN);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet);
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// FIN packets have no payload but the sequence number shouldn't increase
assert!(Int::from_be(sent.seq_nr) == Int::from_be(old_packet.header.seq_nr) + 1);
// Nor should the ACK packet's sequence number
assert!(response.header.seq_nr == old_response.header.seq_nr);
// FIN should be acknowledged
assert!(response.header.ack_nr == sent.seq_nr);
//}
}
#[test]
fn test_response_to_keepalive_ack() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
// Send a second keepalive packet, identical to the previous one
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
}
#[test]
fn test_response_to_wrong_connection_id() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
assert!(response.unwrap().get_type() == ST_STATE);
// Now, disrupt connection with a packet with an incorrect connection id
let new_connection_id = initial_connection_id.to_le();
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = new_connection_id;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_RESET);
assert!(response.header.ack_nr == packet.header.seq_nr);
}
#[test]
fn test_utp_stream() {
use super::UtpStream;
use std::io::test::next_test_ip4;
let serverAddr = next_test_ip4();
let mut server = iotry!(UtpStream::bind(serverAddr));
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.close());
});
iotry!(server.read_to_end());
}
#[test]
fn test_utp_stream_small_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_large_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Has to be sent over several packets
static len: uint = 1024 * 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_successive_reads() {
use super::UtpStream;
use std::io::test::next_test_ip4;
use std::io::Closed;
static len: uint = 1024;
let data: Vec<u8> = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
iotry!(server.read_to_end());
let mut buf = [0u8, ..4096];
match server.read(buf) {
Err(ref e) if e.kind == Closed => {},
_ => fail!("should have failed with Closed"),
};
}
#[test]
fn test_unordered_packets() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
let mut window: Vec<UtpPacket> = Vec::new();
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(1,2,3);
window.push(packet);
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 2).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(4,5,6);
window.push(packet);
// Send packets in reverse order
let response = socket.handle_packet(window[1].clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.header.ack_nr != window[1].header.seq_nr);
let response = socket.handle_packet(window[0].clone());
assert!(response.is_some());
}
#[test]
fn test_socket_unordered_packets() {
use std::io::test::next_test_ip4;
use super::UtpStream;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut s = client.socket;
let mut window: Vec<UtpPacket> = Vec::new();
let mut i = 0;
for data in Vec::from_fn(12, |idx| idx as u8 + 1).as_slice().chunks(3) {
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + i).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
packet.payload = Vec::from_slice(data);
window.push(packet);
i += 1;
}
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_FIN);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + 2).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
window.push(packet);
iotry!(s.send_to(window[3].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[2].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[1].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[0].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[4].bytes().as_slice(), serverAddr));
for _ in range(0u, 2) {
let mut buf = [0, ..BUF_SIZE];
iotry!(s.recv_from(buf));
}
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert!(server.state == CS_CONNECTED);
let mut stream = UtpStream { socket: server };
let expected: Vec<u8> = Vec::from_fn(12, |idx| idx as u8 + 1);
match stream.read_to_end() {
Ok(data) => {
expect_eq!(data.len(), expected.len());
expect_eq!(data, expected);
},
Err(e) => fail!("{}", e),
}
}
#[test]
fn test_socket_should_not_buffer_syn_packets() {
use std::io::test::next_test_ip4;
use std::io::net::udp::UdpSocket;
use super::UtpSocket;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UdpSocket::bind(clientAddr));
let test_syn_raw = [0x41, 0x00, 0x41, 0xa7, 0x00, 0x00, 0x00,
0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x3a,
0xf1, 0x00, 0x00];
let test_syn_pkt = UtpPacket::decode(test_syn_raw);
let seq_nr = Int::from_be(test_syn_pkt.header.seq_nr);
spawn(proc() {
let mut client = client;
iotry!(client.send_to(test_syn_raw, serverAddr));
client.set_timeout(Some(10));
let mut buf = [0, ..BUF_SIZE];
let packet = match client.recv_from(buf) {
Ok((nread, _src)) => UtpPacket::decode(buf.slice_to(nread)),
Err(e) => fail!("{}", e),
};
expect_eq!(packet.header.ack_nr, seq_nr.to_be());
drop(client);
});
let mut server = server;
let mut buf = [0, ..20];
iotry!(server.recv_from(buf));
assert!(server.ack_nr != 0);
expect_eq!(server.ack_nr, seq_nr);
assert!(server.incoming_buffer.is_empty());
}
#[test]
fn test_response_to_triple_ack() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let mut server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UtpSocket::bind(clientAddr));
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
let d = data.clone();
expect_eq!(len, data.len());
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
iotry!(client.send_to(d.as_slice(), serverAddr));
iotry!(client.close());
});
let mut buf = [0, ..BUF_SIZE];
// Expect SYN
iotry!(server.recv_from(buf));
// Receive data
let mut data_packet;
match server.socket.recv_from(buf) {
Ok((read, _src)) => {
data_packet = UtpPacket::decode(buf.slice_to(read));
assert!(data_packet.get_type() == ST_DATA);
expect_eq!(data_packet.payload, data);
assert_eq!(data_packet.payload.len(), data.len());
},
Err(e) => fail!("{}", e),
}
let data_packet = data_packet;
// Send triple ACK
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.seq_nr = server.seq_nr.to_be();
packet.header.ack_nr = (Int::from_be(data_packet.header.seq_nr) - 1).to_be();
packet.header.connection_id = server.sender_connection_id.to_be();
for _ in range(0u, 3) {
iotry!(server.socket.send_to(packet.bytes().as_slice(), clientAddr));
}
// Receive data again and check that it's the same we reported as missing
match server.socket.recv_from(buf) {
Ok((0, _)) => fail!("Received 0 bytes from socket"),
Ok((read, _src)) => {
let packet = UtpPacket::decode(buf.slice_to(read));
assert_eq!(packet.get_type(), ST_DATA);
assert_eq!(Int::from_be(packet.header.seq_nr), Int::from_be(data_packet.header.seq_nr));
assert!(packet.payload == data_packet.payload);
let response = server.handle_packet(packet).unwrap();
iotry!(server.socket.send_to(response.bytes().as_slice(), server.connected_to));
},
Err(e) => fail!("{}", e),
}
// Receive close
iotry!(server.recv_from(buf));
}
#[test]
fn test_socket_timeout_request() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
let len = 512;
let data = Vec::from_fn(len, |idx| idx as u8);
let d = data.clone();
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.connected_to, serverAddr);
iotry!(client.send_to(d.as_slice(), serverAddr));
drop(client);
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert_eq!(server.connected_to, clientAddr);
assert!(server.state == CS_CONNECTED);
// Purposefully read from UDP socket directly and discard it, in order
// to behave as if the packet was lost and thus trigger the timeout
// handling in the *next* call to `UtpSocket.recv_from`.
iotry!(server.socket.recv_from(buf));
// Now wait for the previously discarded packet
loop {
match server.recv_from(buf) {
Ok((0, _)) => continue,
Ok(_) => break,
Err(e) => fail!("{}", e),
}
}
drop(server);
}
}
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `version` module gives you tools to create and compare SemVer-compliant versions.
use std::char;
use std::cmp;
use std::fmt::Show;
use std::fmt;
use std::hash;
/// An identifier in the pre-release or build metadata.
///
/// See sections 9 and 10 of the spec for more about pre-release identifers and build metadata.
#[deriving(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Identifier {
/// An identifier that's solely numbers.
Numeric(u64),
/// An identifier with letters and numbers.
AlphaNumeric(String)
}
impl fmt::Show for Identifier {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Numeric(ref n) => n.fmt(f),
AlphaNumeric(ref s) => s.fmt(f)
}
}
}
/// Represents a version number conforming to the semantic versioning scheme.
#[deriving(Clone, Eq)]
pub struct Version {
/// The major version, to be incremented on incompatible changes.
pub major: uint,
/// The minor version, to be incremented when functionality is added in a
/// backwards-compatible manner.
pub minor: uint,
/// The patch version, to be incremented when backwards-compatible bug
/// fixes are made.
pub patch: uint,
/// The pre-release version identifier, if one exists.
pub pre: Vec<Identifier>,
/// The build metadata, ignored when determining version precedence.
pub build: Vec<Identifier>,
}
/// A `ParseError` is returned as the `Err` side of a `Result` when a version is attempted
/// to be parsed.
#[deriving(Clone,PartialEq,Show,PartialOrd)]
pub enum ParseError {
/// All identifiers must be ASCII.
NonAsciiIdentifier,
/// The version was mis-parsed.
IncorrectParse(Version, String),
/// Any other failure.
GenericFailure,
}
impl Version {
/// Parse a string into a semver object.
pub fn parse(s: &str) -> Result<Version, ParseError> {
if !s.is_ascii() {
return Err(NonAsciiIdentifier)
}
let s = s.trim();
let v = parse_iter(&mut s.chars());
match v {
Some(v) => {
if v.to_string().equiv(&s) {
Ok(v)
} else {
Err(IncorrectParse(v, s.to_string()))
}
}
None => Err(GenericFailure)
}
}
}
impl fmt::Show for Version {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "{}.{}.{}", self.major, self.minor, self.patch))
if !self.pre.is_empty() {
try!(write!(f, "-"));
for (i, x) in self.pre.iter().enumerate() {
if i != 0 { try!(write!(f, ".")) };
try!(x.fmt(f));
}
}
if !self.build.is_empty() {
try!(write!(f, "+"));
for (i, x) in self.build.iter().enumerate() {
if i != 0 { try!(write!(f, ".")) };
try!(x.fmt(f));
}
}
Ok(())
}
}
impl cmp::PartialEq for Version {
#[inline]
fn eq(&self, other: &Version) -> bool {
// We should ignore build metadata here, otherwise versions v1 and v2
// can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which
// violate strict total ordering rules.
self.major == other.major &&
self.minor == other.minor &&
self.patch == other.patch &&
self.pre == other.pre
}
}
impl cmp::PartialOrd for Version {
fn partial_cmp(&self, other: &Version) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl cmp::Ord for Version {
fn cmp(&self, other: &Version) -> Ordering {
match self.major.cmp(&other.major) {
Equal => {}
r => return r,
}
match self.minor.cmp(&other.minor) {
Equal => {}
r => return r,
}
match self.patch.cmp(&other.patch) {
Equal => {}
r => return r,
}
// NB: semver spec says 0.0.0-pre < 0.0.0
// but the version of ord defined for vec
// says that [] < [pre] so we alter it here
match (self.pre.len(), other.pre.len()) {
(0, 0) => Equal,
(0, _) => Greater,
(_, 0) => Less,
(_, _) => self.pre.cmp(&other.pre)
}
}
}
impl<S: hash::Writer> hash::Hash<S> for Version {
fn hash(&self, into: &mut S) {
self.major.hash(into);
self.minor.hash(into);
self.patch.hash(into);
self.pre.hash(into);
}
}
fn take_nonempty_prefix<T:Iterator<char>>(rdr: &mut T, pred: |char| -> bool)
-> (String, Option<char>) {
let mut buf = String::new();
let mut ch = rdr.next();
loop {
match ch {
None => break,
Some(c) if !pred(c) => break,
Some(c) => {
buf.push(c);
ch = rdr.next();
}
}
}
(buf, ch)
}
fn take_num<T: Iterator<char>>(rdr: &mut T) -> Option<(uint, Option<char>)> {
let (s, ch) = take_nonempty_prefix(rdr, char::is_digit);
match from_str::<uint>(s[]) {
None => None,
Some(i) => Some((i, ch))
}
}
fn take_ident<T: Iterator<char>>(rdr: &mut T) -> Option<(Identifier, Option<char>)> {
let (s,ch) = take_nonempty_prefix(rdr, char::is_alphanumeric);
if s[].chars().all(char::is_digit) {
match from_str::<u64>(s[]) {
None => None,
Some(i) => Some((Numeric(i), ch))
}
} else {
Some((AlphaNumeric(s), ch))
}
}
fn expect(ch: Option<char>, c: char) -> Option<()> {
if ch != Some(c) {
None
} else {
Some(())
}
}
fn parse_iter<T: Iterator<char>>(rdr: &mut T) -> Option<Version> {
let maybe_vers = take_num(rdr).and_then(|(major, ch)| {
expect(ch, '.').and_then(|_| Some(major))
}).and_then(|major| {
take_num(rdr).and_then(|(minor, ch)| {
expect(ch, '.').and_then(|_| Some((major, minor)))
})
}).and_then(|(major, minor)| {
take_num(rdr).and_then(|(patch, ch)| {
Some((major, minor, patch, ch))
})
});
let (major, minor, patch, ch) = match maybe_vers {
Some((a, b, c, d)) => (a, b, c, d),
None => return None
};
let mut pre = vec!();
let mut build = vec!();
let mut ch = ch;
if ch == Some('-') {
loop {
let (id, c) = match take_ident(rdr) {
Some((id, c)) => (id, c),
None => return None
};
pre.push(id);
ch = c;
if ch != Some('.') { break; }
}
}
if ch == Some('+') {
loop {
let (id, c) = match take_ident(rdr) {
Some((id, c)) => (id, c),
None => return None
};
build.push(id);
ch = c;
if ch != Some('.') { break; }
}
}
Some(Version {
major: major,
minor: minor,
patch: patch,
pre: pre,
build: build,
})
}
#[cfg(test)]
mod test {
use super::{
Version,
Numeric,
AlphaNumeric,
IncorrectParse,
GenericFailure,
};
#[test]
fn test_parse() {
assert_eq!(Version::parse(""), Err(GenericFailure));
assert_eq!(Version::parse(" "), Err(GenericFailure));
assert_eq!(Version::parse("1"), Err(GenericFailure));
assert_eq!(Version::parse("1.2"), Err(GenericFailure));
assert_eq!(Version::parse("1.2"), Err(GenericFailure));
assert_eq!(Version::parse("1"), Err(GenericFailure));
assert_eq!(Version::parse("1.2"), Err(GenericFailure));
assert_eq!(Version::parse("1.2.3-"), Err(GenericFailure));
assert_eq!(Version::parse("a.b.c"), Err(GenericFailure));
let version = Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(),
};
let error = Err(IncorrectParse(version, "1.2.3 abc".to_string()));
assert_eq!(Version::parse("1.2.3 abc"), error);
assert!(Version::parse("1.2.3") == Ok(Version {
major: 1,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(),
}));
assert!(Version::parse(" 1.2.3 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(),
}));
assert!(Version::parse("1.2.3-alpha1") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!(),
}));
assert!(Version::parse(" 1.2.3-alpha1 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!()
}));
assert!(Version::parse("1.2.3+build5") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse(" 1.2.3+build5 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse("1.2.3-alpha1+build5") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse(" 1.2.3-alpha1+build5 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(Numeric(1),AlphaNumeric("alpha1".to_string()),Numeric(9)),
build: vec!(AlphaNumeric("build5".to_string()),
Numeric(7),
AlphaNumeric("3aedf".to_string()))
}));
}
#[test]
fn test_eq() {
assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3"));
assert_eq!(Version::parse("1.2.3-alpha1"), Version::parse("1.2.3-alpha1"));
assert_eq!(Version::parse("1.2.3+build.42"), Version::parse("1.2.3+build.42"));
assert_eq!(Version::parse("1.2.3-alpha1+42"), Version::parse("1.2.3-alpha1+42"));
assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42"));
}
#[test]
fn test_ne() {
assert!(Version::parse("0.0.0") != Version::parse("0.0.1"));
assert!(Version::parse("0.0.0") != Version::parse("0.1.0"));
assert!(Version::parse("0.0.0") != Version::parse("1.0.0"));
assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta"));
}
#[test]
fn test_show() {
assert_eq!(format!("{}", Version::parse("1.2.3").unwrap()),
"1.2.3".to_string());
assert_eq!(format!("{}", Version::parse("1.2.3-alpha1").unwrap()),
"1.2.3-alpha1".to_string());
assert_eq!(format!("{}", Version::parse("1.2.3+build.42").unwrap()),
"1.2.3+build.42".to_string());
assert_eq!(format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()),
"1.2.3-alpha1+42".to_string());
}
#[test]
fn test_to_string() {
assert_eq!(Version::parse("1.2.3").unwrap().to_string(), "1.2.3".to_string());
assert_eq!(Version::parse("1.2.3-alpha1").unwrap().to_string(), "1.2.3-alpha1".to_string());
assert_eq!(Version::parse("1.2.3+build.42").unwrap().to_string(), "1.2.3+build.42".to_string());
assert_eq!(Version::parse("1.2.3-alpha1+42").unwrap().to_string(), "1.2.3-alpha1+42".to_string());
}
#[test]
fn test_lt() {
assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3"));
assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2"));
assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2")));
assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42")));
}
#[test]
fn test_le() {
assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42"));
}
#[test]
fn test_gt() {
assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0"));
assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0"));
assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0"));
assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1"));
assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2"));
assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2")));
assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42")));
}
#[test]
fn test_ge() {
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42"));
}
#[test]
fn test_spec_order() {
let vs = ["1.0.0-alpha",
"1.0.0-alpha.1",
"1.0.0-alpha.beta",
"1.0.0-beta",
"1.0.0-beta.2",
"1.0.0-beta.11",
"1.0.0-rc.1",
"1.0.0"];
let mut i = 1;
while i < vs.len() {
let a = Version::parse(vs[i-1]).unwrap();
let b = Version::parse(vs[i]).unwrap();
assert!(a < b);
i += 1;
}
}
}
Don't parse idents with leading 0s as numerics
The round-trip check in Version::parse will fail otherwise because the
`to_string()` implementation of a parsed number will strip all the leading 0s.
If an identifier starts with a leading 0 then parse it as an `AlphaNumeric` to
preserve the round-trip-ness of a parse.
Note that the current specification also states:
> Numeric identifiers MUST NOT include leading zeroes
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `version` module gives you tools to create and compare SemVer-compliant versions.
use std::char;
use std::cmp;
use std::fmt::Show;
use std::fmt;
use std::hash;
/// An identifier in the pre-release or build metadata.
///
/// See sections 9 and 10 of the spec for more about pre-release identifers and build metadata.
#[deriving(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Identifier {
/// An identifier that's solely numbers.
Numeric(u64),
/// An identifier with letters and numbers.
AlphaNumeric(String)
}
impl fmt::Show for Identifier {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Numeric(ref n) => n.fmt(f),
AlphaNumeric(ref s) => s.fmt(f)
}
}
}
/// Represents a version number conforming to the semantic versioning scheme.
#[deriving(Clone, Eq)]
pub struct Version {
/// The major version, to be incremented on incompatible changes.
pub major: uint,
/// The minor version, to be incremented when functionality is added in a
/// backwards-compatible manner.
pub minor: uint,
/// The patch version, to be incremented when backwards-compatible bug
/// fixes are made.
pub patch: uint,
/// The pre-release version identifier, if one exists.
pub pre: Vec<Identifier>,
/// The build metadata, ignored when determining version precedence.
pub build: Vec<Identifier>,
}
/// A `ParseError` is returned as the `Err` side of a `Result` when a version is attempted
/// to be parsed.
#[deriving(Clone,PartialEq,Show,PartialOrd)]
pub enum ParseError {
/// All identifiers must be ASCII.
NonAsciiIdentifier,
/// The version was mis-parsed.
IncorrectParse(Version, String),
/// Any other failure.
GenericFailure,
}
impl Version {
/// Parse a string into a semver object.
pub fn parse(s: &str) -> Result<Version, ParseError> {
if !s.is_ascii() {
return Err(NonAsciiIdentifier)
}
let s = s.trim();
let v = parse_iter(&mut s.chars());
match v {
Some(v) => {
if v.to_string().equiv(&s) {
Ok(v)
} else {
Err(IncorrectParse(v, s.to_string()))
}
}
None => Err(GenericFailure)
}
}
}
impl fmt::Show for Version {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "{}.{}.{}", self.major, self.minor, self.patch))
if !self.pre.is_empty() {
try!(write!(f, "-"));
for (i, x) in self.pre.iter().enumerate() {
if i != 0 { try!(write!(f, ".")) };
try!(x.fmt(f));
}
}
if !self.build.is_empty() {
try!(write!(f, "+"));
for (i, x) in self.build.iter().enumerate() {
if i != 0 { try!(write!(f, ".")) };
try!(x.fmt(f));
}
}
Ok(())
}
}
impl cmp::PartialEq for Version {
#[inline]
fn eq(&self, other: &Version) -> bool {
// We should ignore build metadata here, otherwise versions v1 and v2
// can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which
// violate strict total ordering rules.
self.major == other.major &&
self.minor == other.minor &&
self.patch == other.patch &&
self.pre == other.pre
}
}
impl cmp::PartialOrd for Version {
fn partial_cmp(&self, other: &Version) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl cmp::Ord for Version {
fn cmp(&self, other: &Version) -> Ordering {
match self.major.cmp(&other.major) {
Equal => {}
r => return r,
}
match self.minor.cmp(&other.minor) {
Equal => {}
r => return r,
}
match self.patch.cmp(&other.patch) {
Equal => {}
r => return r,
}
// NB: semver spec says 0.0.0-pre < 0.0.0
// but the version of ord defined for vec
// says that [] < [pre] so we alter it here
match (self.pre.len(), other.pre.len()) {
(0, 0) => Equal,
(0, _) => Greater,
(_, 0) => Less,
(_, _) => self.pre.cmp(&other.pre)
}
}
}
impl<S: hash::Writer> hash::Hash<S> for Version {
fn hash(&self, into: &mut S) {
self.major.hash(into);
self.minor.hash(into);
self.patch.hash(into);
self.pre.hash(into);
}
}
fn take_nonempty_prefix<T:Iterator<char>>(rdr: &mut T, pred: |char| -> bool)
-> (String, Option<char>) {
let mut buf = String::new();
let mut ch = rdr.next();
loop {
match ch {
None => break,
Some(c) if !pred(c) => break,
Some(c) => {
buf.push(c);
ch = rdr.next();
}
}
}
(buf, ch)
}
fn take_num<T: Iterator<char>>(rdr: &mut T) -> Option<(uint, Option<char>)> {
let (s, ch) = take_nonempty_prefix(rdr, char::is_digit);
match from_str::<uint>(s[]) {
None => None,
Some(i) => Some((i, ch))
}
}
fn take_ident<T: Iterator<char>>(rdr: &mut T) -> Option<(Identifier, Option<char>)> {
let (s,ch) = take_nonempty_prefix(rdr, char::is_alphanumeric);
if s.len() == 0 {
None
} else if s[].chars().all(char::is_digit) && s[].char_at(0) != '0' {
match from_str::<u64>(s.as_slice()) {
None => None,
Some(i) => Some((Numeric(i), ch))
}
} else {
Some((AlphaNumeric(s), ch))
}
}
fn expect(ch: Option<char>, c: char) -> Option<()> {
if ch != Some(c) {
None
} else {
Some(())
}
}
fn parse_iter<T: Iterator<char>>(rdr: &mut T) -> Option<Version> {
let maybe_vers = take_num(rdr).and_then(|(major, ch)| {
expect(ch, '.').and_then(|_| Some(major))
}).and_then(|major| {
take_num(rdr).and_then(|(minor, ch)| {
expect(ch, '.').and_then(|_| Some((major, minor)))
})
}).and_then(|(major, minor)| {
take_num(rdr).and_then(|(patch, ch)| {
Some((major, minor, patch, ch))
})
});
let (major, minor, patch, ch) = match maybe_vers {
Some((a, b, c, d)) => (a, b, c, d),
None => return None
};
let mut pre = vec!();
let mut build = vec!();
let mut ch = ch;
if ch == Some('-') {
loop {
let (id, c) = match take_ident(rdr) {
Some((id, c)) => (id, c),
None => return None
};
pre.push(id);
ch = c;
if ch != Some('.') { break; }
}
}
if ch == Some('+') {
loop {
let (id, c) = match take_ident(rdr) {
Some((id, c)) => (id, c),
None => return None
};
build.push(id);
ch = c;
if ch != Some('.') { break; }
}
}
Some(Version {
major: major,
minor: minor,
patch: patch,
pre: pre,
build: build,
})
}
#[cfg(test)]
mod test {
use super::{
Version,
Numeric,
AlphaNumeric,
IncorrectParse,
GenericFailure,
};
#[test]
fn test_parse() {
assert_eq!(Version::parse(""), Err(GenericFailure));
assert_eq!(Version::parse(" "), Err(GenericFailure));
assert_eq!(Version::parse("1"), Err(GenericFailure));
assert_eq!(Version::parse("1.2"), Err(GenericFailure));
assert_eq!(Version::parse("1.2"), Err(GenericFailure));
assert_eq!(Version::parse("1"), Err(GenericFailure));
assert_eq!(Version::parse("1.2"), Err(GenericFailure));
assert_eq!(Version::parse("1.2.3-"), Err(GenericFailure));
assert_eq!(Version::parse("a.b.c"), Err(GenericFailure));
let version = Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(),
};
let error = Err(IncorrectParse(version, "1.2.3 abc".to_string()));
assert_eq!(Version::parse("1.2.3 abc"), error);
assert!(Version::parse("1.2.3") == Ok(Version {
major: 1,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(),
}));
assert!(Version::parse(" 1.2.3 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(),
}));
assert!(Version::parse("1.2.3-alpha1") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!(),
}));
assert!(Version::parse(" 1.2.3-alpha1 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!()
}));
assert!(Version::parse("1.2.3+build5") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse(" 1.2.3+build5 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse("1.2.3-alpha1+build5") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse(" 1.2.3-alpha1+build5 ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(AlphaNumeric("alpha1".to_string())),
build: vec!(AlphaNumeric("build5".to_string()))
}));
assert!(Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf ") == Ok(Version {
major: 1u,
minor: 2u,
patch: 3u,
pre: vec!(Numeric(1),AlphaNumeric("alpha1".to_string()),Numeric(9)),
build: vec!(AlphaNumeric("build5".to_string()),
Numeric(7),
AlphaNumeric("3aedf".to_string()))
}));
assert_eq!(Version::parse("0.4.0-beta.1+0851523"), Ok(Version {
major: 0,
minor: 4,
patch: 0,
pre: vec![AlphaNumeric("beta".to_string()), Numeric(1)],
build: vec![AlphaNumeric("0851523".to_string())],
}));
}
#[test]
fn test_eq() {
assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3"));
assert_eq!(Version::parse("1.2.3-alpha1"), Version::parse("1.2.3-alpha1"));
assert_eq!(Version::parse("1.2.3+build.42"), Version::parse("1.2.3+build.42"));
assert_eq!(Version::parse("1.2.3-alpha1+42"), Version::parse("1.2.3-alpha1+42"));
assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42"));
}
#[test]
fn test_ne() {
assert!(Version::parse("0.0.0") != Version::parse("0.0.1"));
assert!(Version::parse("0.0.0") != Version::parse("0.1.0"));
assert!(Version::parse("0.0.0") != Version::parse("1.0.0"));
assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta"));
}
#[test]
fn test_show() {
assert_eq!(format!("{}", Version::parse("1.2.3").unwrap()),
"1.2.3".to_string());
assert_eq!(format!("{}", Version::parse("1.2.3-alpha1").unwrap()),
"1.2.3-alpha1".to_string());
assert_eq!(format!("{}", Version::parse("1.2.3+build.42").unwrap()),
"1.2.3+build.42".to_string());
assert_eq!(format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()),
"1.2.3-alpha1+42".to_string());
}
#[test]
fn test_to_string() {
assert_eq!(Version::parse("1.2.3").unwrap().to_string(), "1.2.3".to_string());
assert_eq!(Version::parse("1.2.3-alpha1").unwrap().to_string(), "1.2.3-alpha1".to_string());
assert_eq!(Version::parse("1.2.3+build.42").unwrap().to_string(), "1.2.3+build.42".to_string());
assert_eq!(Version::parse("1.2.3-alpha1+42").unwrap().to_string(), "1.2.3-alpha1+42".to_string());
}
#[test]
fn test_lt() {
assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3"));
assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2"));
assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2")));
assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42")));
}
#[test]
fn test_le() {
assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42"));
}
#[test]
fn test_gt() {
assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0"));
assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0"));
assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0"));
assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1"));
assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2"));
assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2")));
assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42")));
}
#[test]
fn test_ge() {
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1"));
assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2"));
assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42"));
}
#[test]
fn test_spec_order() {
let vs = ["1.0.0-alpha",
"1.0.0-alpha.1",
"1.0.0-alpha.beta",
"1.0.0-beta",
"1.0.0-beta.2",
"1.0.0-beta.11",
"1.0.0-rc.1",
"1.0.0"];
let mut i = 1;
while i < vs.len() {
let a = Version::parse(vs[i-1]).unwrap();
let b = Version::parse(vs[i]).unwrap();
assert!(a < b);
i += 1;
}
}
}
|
//! Module: zmq
#![crate_name = "zmq"]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![feature(phase, macro_rules)]
#[phase(plugin, link)]
extern crate log;
extern crate libc;
use libc::{c_int, c_long, c_void, size_t, c_char, int64_t, uint64_t};
use libc::consts::os::posix88;
use std::{mem, ptr, str, slice};
use std::fmt;
/// The ZMQ container that manages all the sockets
type Context_ = *mut c_void;
/// A ZMQ socket
type Socket_ = *mut c_void;
static MsgSize_: uint = 48;
/// A message
type Msg_ = [c_char, ..MsgSize_];
#[link(name = "zmq")]
extern {
fn zmq_version(major: *mut c_int, minor: *mut c_int, patch: *mut c_int);
fn zmq_ctx_new() -> Context_;
fn zmq_ctx_destroy(ctx: Context_) -> c_int;
fn zmq_errno() -> c_int;
fn zmq_strerror(errnum: c_int) -> *const c_char;
fn zmq_socket(ctx: Context_, typ: c_int) -> Socket_;
fn zmq_close(socket: Socket_) -> c_int;
fn zmq_getsockopt(socket: Socket_, opt: c_int, optval: *mut c_void, size: *mut size_t) -> c_int;
fn zmq_setsockopt(socket: Socket_, opt: c_int, optval: *const c_void, size: size_t) -> c_int;
fn zmq_bind(socket: Socket_, endpoint: *const c_char) -> c_int;
fn zmq_connect(socket: Socket_, endpoint: *const c_char) -> c_int;
fn zmq_msg_init(msg: &Msg_) -> c_int;
fn zmq_msg_init_size(msg: &Msg_, size: size_t) -> c_int;
fn zmq_msg_data(msg: &Msg_) -> *const u8;
fn zmq_msg_size(msg: &Msg_) -> size_t;
fn zmq_msg_close(msg: &Msg_) -> c_int;
fn zmq_msg_send(msg: &Msg_, socket: Socket_, flags: c_int) -> c_int;
fn zmq_msg_recv(msg: &Msg_, socket: Socket_, flags: c_int) -> c_int;
fn zmq_poll(items: *mut PollItem, nitems: c_int, timeout: c_long) -> c_int;
}
/// Socket types
#[allow(non_camel_case_types)]
#[deriving(Clone, Show)]
pub enum SocketType {
PAIR = 0,
PUB = 1,
SUB = 2,
REQ = 3,
REP = 4,
DEALER = 5,
ROUTER = 6,
PULL = 7,
PUSH = 8,
XPUB = 9,
XSUB = 10,
}
pub static DONTWAIT : int = 1;
pub static SNDMORE : int = 2;
#[allow(non_camel_case_types)]
#[deriving(Clone)]
#[allow(non_camel_case_types)]
pub enum Constants {
ZMQ_AFFINITY = 4,
ZMQ_IDENTITY = 5,
ZMQ_SUBSCRIBE = 6,
ZMQ_UNSUBSCRIBE = 7,
ZMQ_RATE = 8,
ZMQ_RECOVERY_IVL = 9,
ZMQ_MCAST_LOOP = 10,
ZMQ_SNDBUF = 11,
ZMQ_RCVBUF = 12,
ZMQ_RCVMORE = 13,
ZMQ_FD = 14,
ZMQ_EVENTS = 15,
ZMQ_TYPE = 16,
ZMQ_LINGER = 17,
ZMQ_RECONNECT_IVL = 18,
ZMQ_BACKLOG = 19,
ZMQ_RECOVERY_IVL_MSEC = 20,
ZMQ_RECONNECT_IVL_MAX = 21,
ZMQ_MAXMSGSIZE = 22,
ZMQ_SNDHWM = 23,
ZMQ_RCVHWM = 24,
ZMQ_MAX_VSM_SIZE = 30,
ZMQ_DELIMITER = 31,
ZMQ_VSM = 32,
ZMQ_MSG_MORE = 1,
ZMQ_MSG_SHARED = 128,
ZMQ_MSG_MASK = 129,
ZMQ_HAUSNUMERO = 156384712,
}
impl Constants {
pub fn to_raw(&self) -> i32 {
*self as i32
}
pub fn from_raw(raw: i32) -> Constants {
// fails if `raw` is not a valid value
match raw {
4 => ZMQ_AFFINITY,
5 => ZMQ_IDENTITY,
6 => ZMQ_SUBSCRIBE,
7 => ZMQ_UNSUBSCRIBE,
8 => ZMQ_RATE,
9 => ZMQ_RECOVERY_IVL,
10 => ZMQ_MCAST_LOOP,
11 => ZMQ_SNDBUF,
12 => ZMQ_RCVBUF,
13 => ZMQ_RCVMORE,
14 => ZMQ_FD,
15 => ZMQ_EVENTS,
16 => ZMQ_TYPE,
17 => ZMQ_LINGER,
18 => ZMQ_RECONNECT_IVL,
19 => ZMQ_BACKLOG,
20 => ZMQ_RECOVERY_IVL_MSEC,
21 => ZMQ_RECONNECT_IVL_MAX,
22 => ZMQ_MAXMSGSIZE,
23 => ZMQ_SNDHWM,
24 => ZMQ_RCVHWM,
30 => ZMQ_MAX_VSM_SIZE,
31 => ZMQ_DELIMITER,
32 => ZMQ_VSM,
1 => ZMQ_MSG_MORE,
128 => ZMQ_MSG_SHARED,
129 => ZMQ_MSG_MASK,
156384712 => ZMQ_HAUSNUMERO,
x => fail!("invalid constant {}", x as int),
}
}
}
#[deriving(Clone, Eq, PartialEq)]
pub enum Error {
EACCES = posix88::EACCES as int,
EADDRINUSE = posix88::EADDRINUSE as int,
EAGAIN = posix88::EAGAIN as int,
EBUSY = posix88::EBUSY as int,
ECONNREFUSED = posix88::ECONNREFUSED as int,
EFAULT = posix88::EFAULT as int,
EHOSTUNREACH = posix88::EHOSTUNREACH as int,
EINPROGRESS = posix88::EINPROGRESS as int,
EINVAL = posix88::EINVAL as int,
EMFILE = posix88::EMFILE as int,
EMSGSIZE = posix88::EMSGSIZE as int,
ENAMETOOLONG = posix88::ENAMETOOLONG as int,
ENODEV = posix88::ENODEV as int,
ENOENT = posix88::ENOENT as int,
ENOMEM = posix88::ENOMEM as int,
ENOTCONN = posix88::ENOTCONN as int,
ENOTSOCK = posix88::ENOTSOCK as int,
EPROTO = posix88::EPROTO as int,
EPROTONOSUPPORT = posix88::EPROTONOSUPPORT as int,
// magic number is EHAUSNUMERO + num
ENOTSUP = 156384713,
ENOBUFS = 156384715,
ENETDOWN = 156384716,
EADDRNOTAVAIL = 156384718,
// native zmq error codes
EFSM = 156384763,
ENOCOMPATPROTO = 156384764,
ETERM = 156384765,
EMTHREAD = 156384766,
}
impl Error {
pub fn to_raw(&self) -> i32 {
*self as i32
}
pub fn from_raw(raw: i32) -> Error {
match raw {
posix88::EACCES => EACCES,
posix88::EADDRINUSE => EADDRINUSE,
posix88::EAGAIN => EAGAIN,
posix88::EBUSY => EBUSY,
posix88::ECONNREFUSED => ECONNREFUSED,
posix88::EFAULT => EFAULT,
posix88::EHOSTUNREACH => EHOSTUNREACH,
posix88::EINPROGRESS => EINPROGRESS,
posix88::EINVAL => EINVAL,
posix88::EMFILE => EMFILE,
posix88::EMSGSIZE => EMSGSIZE,
posix88::ENAMETOOLONG => ENAMETOOLONG,
posix88::ENODEV => ENODEV,
posix88::ENOENT => ENOENT,
posix88::ENOMEM => ENOMEM,
posix88::ENOTCONN => ENOTCONN,
posix88::ENOTSOCK => ENOTSOCK,
posix88::EPROTO => EPROTO,
posix88::EPROTONOSUPPORT => EPROTONOSUPPORT,
156384713 => ENOTSUP,
156384714 => EPROTONOSUPPORT,
156384715 => ENOBUFS,
156384716 => ENETDOWN,
156384717 => EADDRINUSE,
156384718 => EADDRNOTAVAIL,
156384719 => ECONNREFUSED,
156384720 => EINPROGRESS,
156384721 => ENOTSOCK,
156384763 => EFSM,
156384764 => ENOCOMPATPROTO,
156384765 => ETERM,
156384766 => EMTHREAD,
x => {
unsafe {
fail!("unknown error [{}]: {}",
x as int,
str::raw::from_c_str(zmq_strerror(x))
)
}
}
}
}
}
// Return the current zeromq version.
pub fn version() -> (int, int, int) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_version(&mut major, &mut minor, &mut patch);
}
(major as int, minor as int, patch as int)
}
/// zmq context, used to create sockets. Is thread safe, and can be safely
/// shared, but dropping it while sockets are still open will cause them to
/// close (see zmq_ctx_destroy(3)).
///
/// For this reason, one should use an Arc to share it, rather than any unsafe
/// trickery you might think up that would call the destructor.
pub struct Context {
ctx: Context_,
}
impl Context {
pub fn new() -> Context {
Context {
ctx: unsafe { zmq_ctx_new() }
}
}
pub fn socket(&mut self, socket_type: SocketType) -> Result<Socket, Error> {
let sock = unsafe {zmq_socket(self.ctx, socket_type as c_int)};
if sock.is_null() {
return Err(errno_to_error());
}
Ok(Socket { sock: sock, closed: false })
}
/// Try to destroy the context. This is different than the destructor; the
/// destructor will loop when zmq_ctx_destroy returns EINTR
pub fn destroy(&mut self) -> Result<(), Error> {
if unsafe { zmq_ctx_destroy(self.ctx) } == -1i32 {
Err(errno_to_error())
} else {
Ok(())
}
}
}
impl Drop for Context {
fn drop(&mut self) {
debug!("context dropped");
let mut e = self.destroy();
while e.is_err() && (e.unwrap_err() != EFAULT) {
e = self.destroy();
}
}
}
pub struct Socket {
sock: Socket_,
closed: bool
}
impl Drop for Socket {
fn drop(&mut self) {
match self.close_final() {
Ok(()) => { debug!("socket dropped") },
Err(e) => fail!(e.to_string())
}
}
}
impl Socket {
/// Accept connections on a socket.
pub fn bind(&mut self, endpoint: &str) -> Result<(), Error> {
let rc = endpoint.with_c_str (|cstr| {
unsafe {zmq_bind(self.sock, cstr)}
});
if rc == -1i32 { Err(errno_to_error()) } else { Ok(()) }
}
/// Connect a socket.
pub fn connect(&mut self, endpoint: &str) -> Result<(), Error> {
let rc = endpoint.with_c_str (|cstr| {
unsafe {zmq_connect(self.sock, cstr)}
});
if rc == -1i32 { Err(errno_to_error()) } else { Ok(()) }
}
/// Send a message
pub fn send(&mut self, data: &[u8], flags: int) -> Result<(), Error> {
unsafe {
let base_ptr = data.as_ptr();
let len = data.len();
let msg = [0, ..MsgSize_];
// Copy the data into the message.
let rc = zmq_msg_init_size(&msg, len as size_t);
if rc == -1i32 { return Err(errno_to_error()); }
ptr::copy_memory(zmq_msg_data(&msg) as *mut u8, base_ptr, len);
let rc = zmq_msg_send(&msg, self.sock, flags as c_int);
let _ = zmq_msg_close(&msg);
if rc == -1i32 { Err(errno_to_error()) } else { Ok(()) }
}
}
pub fn send_str(&mut self, data: &str, flags: int) -> Result<(), Error> {
self.send(data.as_bytes(), flags)
}
/// Receive a message into a `Message`. The length passed to zmq_msg_recv
/// is the length of the buffer.
pub fn recv(&mut self, msg: &mut Message, flags: int) -> Result<(), Error> {
let rc = unsafe {
zmq_msg_recv(&msg.msg, self.sock, flags as c_int)
};
if rc == -1i32 {
Err(errno_to_error())
} else {
Ok(())
}
}
pub fn recv_msg(&mut self, flags: int) -> Result<Message, Error> {
let mut msg = Message::new();
match self.recv(&mut msg, flags) {
Ok(()) => Ok(msg),
Err(e) => Err(e),
}
}
pub fn recv_bytes(&mut self, flags: int) -> Result<Vec<u8>, Error> {
match self.recv_msg(flags) {
Ok(msg) => Ok(msg.to_bytes()),
Err(e) => Err(e),
}
}
pub fn recv_str(&mut self, flags: int) -> Result<String, Error> {
match self.recv_msg(flags) {
Ok(msg) => Ok(msg.to_string()),
Err(e) => Err(e),
}
}
pub fn close(&mut self) -> Result<(), Error> {
if !self.closed {
self.closed = true;
if unsafe { zmq_close(self.sock) } == -1i32 {
return Err(errno_to_error());
}
}
Ok(())
}
pub fn close_final(&mut self) -> Result<(), Error> {
if !self.closed {
if unsafe { zmq_close(self.sock) } == -1i32 {
return Err(errno_to_error());
}
}
Ok(())
}
pub fn get_socket_type(&self) -> Result<SocketType, Error> {
getsockopt_int(self.sock, ZMQ_TYPE.to_raw()).map(|ty| {
match ty {
0 => PAIR,
1 => PUB,
2 => SUB,
3 => REQ,
4 => REP,
5 => DEALER,
6 => ROUTER,
7 => PULL,
8 => PUSH,
9 => XPUB,
10 => XSUB,
_ => fail!("socket type is out of range!")
}
})
}
pub fn get_rcvmore(&self) -> Result<bool, Error> {
getsockopt_i64(self.sock, ZMQ_RCVMORE.to_raw()).and_then (|o| {
Ok(o == 1i64)
})
}
pub fn get_maxmsgsize(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_MAXMSGSIZE.to_raw())
}
pub fn get_sndhwm(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_SNDHWM.to_raw())
}
pub fn get_rcvhwm(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_RCVHWM.to_raw())
}
pub fn get_affinity(&self) -> Result<u64, Error> {
getsockopt_u64(self.sock, ZMQ_AFFINITY.to_raw())
}
pub fn get_identity(&self) -> Result<Vec<u8>, Error> {
getsockopt_bytes(self.sock, ZMQ_IDENTITY.to_raw())
}
pub fn get_rate(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_RATE.to_raw())
}
pub fn get_recovery_ivl(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_RECOVERY_IVL.to_raw())
}
pub fn get_recovery_ivl_msec(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_RECOVERY_IVL_MSEC.to_raw())
}
pub fn get_mcast_loop(&self) -> Result<bool, Error> {
getsockopt_i64(self.sock, ZMQ_MCAST_LOOP.to_raw()).and_then(|o| {
Ok(o == 1i64)
})
}
pub fn get_sndbuf(&self) -> Result<u64, Error> {
getsockopt_u64(self.sock, ZMQ_SNDBUF.to_raw())
}
pub fn get_rcvbuf(&self) -> Result<u64, Error> {
getsockopt_u64(self.sock, ZMQ_RCVBUF.to_raw())
}
pub fn get_linger(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_LINGER.to_raw())
}
pub fn get_reconnect_ivl(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_RECONNECT_IVL.to_raw())
}
pub fn get_reconnect_ivl_max(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_RECONNECT_IVL_MAX.to_raw())
}
pub fn get_backlog(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_BACKLOG.to_raw())
}
pub fn get_fd(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_FD.to_raw())
}
pub fn get_events(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_EVENTS.to_raw())
}
pub fn set_maxmsgsize(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_MAXMSGSIZE.to_raw(), value)
}
pub fn set_sndhwm(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_SNDHWM.to_raw(), value)
}
pub fn set_rcvhwm(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_RCVHWM.to_raw(), value)
}
pub fn set_affinity(&self, value: u64) -> Result<(), Error> {
setsockopt_u64(self.sock, ZMQ_AFFINITY.to_raw(), value)
}
pub fn set_identity(&self, value: &[u8]) -> Result<(), Error> {
setsockopt_bytes(self.sock, ZMQ_IDENTITY.to_raw(), value)
}
pub fn set_subscribe(&self, value: &[u8]) -> Result<(), Error> {
setsockopt_bytes(self.sock, ZMQ_SUBSCRIBE.to_raw(), value)
}
pub fn set_unsubscribe(&self, value: &[u8]) -> Result<(), Error> {
setsockopt_bytes(self.sock, ZMQ_UNSUBSCRIBE.to_raw(), value)
}
pub fn set_rate(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_RATE.to_raw(), value)
}
pub fn set_recovery_ivl(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_RECOVERY_IVL.to_raw(), value)
}
pub fn set_recovery_ivl_msec(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_RECOVERY_IVL_MSEC.to_raw(), value)
}
pub fn set_mcast_loop(&self, value: bool) -> Result<(), Error> {
let value = if value { 1i64 } else { 0i64 };
setsockopt_i64(self.sock, ZMQ_MCAST_LOOP.to_raw(), value)
}
pub fn set_sndbuf(&self, value: u64) -> Result<(), Error> {
setsockopt_u64(self.sock, ZMQ_SNDBUF.to_raw(), value)
}
pub fn set_rcvbuf(&self, value: u64) -> Result<(), Error> {
setsockopt_u64(self.sock, ZMQ_RCVBUF.to_raw(), value)
}
pub fn set_linger(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_LINGER.to_raw(), value)
}
pub fn set_reconnect_ivl(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_RECONNECT_IVL.to_raw(), value)
}
pub fn set_reconnect_ivl_max(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_RECONNECT_IVL_MAX.to_raw(), value)
}
pub fn set_backlog(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_BACKLOG.to_raw(), value)
}
pub fn as_poll_item<'a>(&self, events: i16) -> PollItem<'a> {
PollItem {
socket: self.sock,
fd: 0,
events: events,
revents: 0
}
}
}
pub struct Message {
msg: Msg_
}
impl Drop for Message {
fn drop(&mut self) {
unsafe {
let _ = zmq_msg_close(&self.msg);
}
}
}
impl Message {
pub fn new() -> Message {
unsafe {
let message = Message { msg: [0, ..MsgSize_] };
let _ = zmq_msg_init(&message.msg);
message
}
}
pub fn with_bytes<T>(&self, f: |&[u8]| -> T) -> T {
unsafe {
let data = zmq_msg_data(&self.msg);
let len = zmq_msg_size(&self.msg) as uint;
slice::raw::buf_as_slice(data, len, f)
}
}
pub fn as_bytes<'a>(&'a self) -> &'a [u8] {
// This is safe because we're constraining the slice to the lifetime of
// this message.
unsafe {
let data = zmq_msg_data(&self.msg);
let len = zmq_msg_size(&self.msg) as uint;
::std::mem::transmute(::std::raw::Slice {
data: data,
len: len,
})
}
}
pub fn with_str<T>(&self, f: |&str| -> T) -> T {
self.with_bytes(|v| f(str::from_utf8(v).unwrap()))
}
pub fn as_str<'a>(&'a self) -> Option<&'a str> {
str::from_utf8(self.as_bytes())
}
pub fn to_bytes(&self) -> Vec<u8> {
self.with_bytes(|v| v.to_vec())
}
pub fn to_string(&self) -> String {
self.with_str(|s| s.to_string())
}
}
pub static POLLIN : i16 = 1i16;
pub static POLLOUT : i16 = 2i16;
pub static POLLERR : i16 = 4i16;
#[repr(C)]
pub struct PollItem<'a> {
socket: Socket_,
fd: c_int,
events: i16,
revents: i16
}
impl<'a> PollItem<'a> {
pub fn get_revents(&self) -> i16 {
self.revents
}
}
pub fn poll<'a>(items: &mut [PollItem<'a>], timeout: i64) -> Result<int, Error> {
unsafe {
let rc = zmq_poll(
items.as_mut_ptr(),
items.len() as c_int,
timeout);
if rc == -1i32 {
Err(errno_to_error())
} else {
Ok(rc as int)
}
}
}
impl fmt::Show for Error {
/// Return the error string for an error.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
write!(f, "{}",
str::raw::from_c_str(zmq_strerror(*self as c_int)))
}
}
}
macro_rules! getsockopt_num(
($name:ident, $c_ty:ty, $ty:ty) => (
fn $name(sock: Socket_, opt: c_int) -> Result<$ty, Error> {
unsafe {
let mut value: $c_ty = 0;
let value_ptr = &mut value as *mut $c_ty;
let mut size = mem::size_of::<$c_ty>() as size_t;
if -1 == zmq_getsockopt(sock, opt, value_ptr as *mut c_void, &mut size) {
Err(errno_to_error())
} else {
Ok(value as $ty)
}
}
}
)
)
getsockopt_num!(getsockopt_int, c_int, int)
getsockopt_num!(getsockopt_i64, int64_t, i64)
getsockopt_num!(getsockopt_u64, uint64_t, u64)
fn getsockopt_bytes(sock: Socket_, opt: c_int) -> Result<Vec<u8>, Error> {
unsafe {
// The only binary option in zeromq is ZMQ_IDENTITY, which can have
// a max size of 255 bytes.
let mut size = 255 as size_t;
let mut value = Vec::with_capacity(size as uint);
let r = zmq_getsockopt(
sock,
opt,
value.as_mut_ptr() as *mut c_void,
&mut size);
if r == -1i32 {
Err(errno_to_error())
} else {
value.truncate(size as uint);
Ok(value)
}
}
}
macro_rules! setsockopt_num(
($name:ident, $ty:ty) => (
fn $name(sock: Socket_, opt: c_int, value: $ty) -> Result<(), Error> {
unsafe {
let size = mem::size_of::<$ty>() as size_t;
if -1 == zmq_setsockopt(sock, opt, (&value as *const $ty) as *const c_void, size) {
Err(errno_to_error())
} else {
Ok(())
}
}
}
)
)
setsockopt_num!(setsockopt_int, int)
setsockopt_num!(setsockopt_i64, i64)
setsockopt_num!(setsockopt_u64, u64)
fn setsockopt_bytes(sock: Socket_, opt: c_int, value: &[u8]) -> Result<(), Error> {
unsafe {
let r = zmq_setsockopt(
sock,
opt,
value.as_ptr() as *const c_void,
value.len() as size_t
);
if r == -1i32 {
Err(errno_to_error())
} else {
Ok(())
}
}
}
fn errno_to_error() -> Error {
Error::from_raw(unsafe { zmq_errno() })
}
static -> const
//! Module: zmq
#![crate_name = "zmq"]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![feature(phase, macro_rules)]
#[phase(plugin, link)]
extern crate log;
extern crate libc;
use libc::{c_int, c_long, c_void, size_t, c_char, int64_t, uint64_t};
use libc::consts::os::posix88;
use std::{mem, ptr, str, slice};
use std::fmt;
/// The ZMQ container that manages all the sockets
type Context_ = *mut c_void;
/// A ZMQ socket
type Socket_ = *mut c_void;
const MSG_SIZE: uint = 48;
/// A message
type Msg_ = [c_char, ..MSG_SIZE];
#[link(name = "zmq")]
extern {
fn zmq_version(major: *mut c_int, minor: *mut c_int, patch: *mut c_int);
fn zmq_ctx_new() -> Context_;
fn zmq_ctx_destroy(ctx: Context_) -> c_int;
fn zmq_errno() -> c_int;
fn zmq_strerror(errnum: c_int) -> *const c_char;
fn zmq_socket(ctx: Context_, typ: c_int) -> Socket_;
fn zmq_close(socket: Socket_) -> c_int;
fn zmq_getsockopt(socket: Socket_, opt: c_int, optval: *mut c_void, size: *mut size_t) -> c_int;
fn zmq_setsockopt(socket: Socket_, opt: c_int, optval: *const c_void, size: size_t) -> c_int;
fn zmq_bind(socket: Socket_, endpoint: *const c_char) -> c_int;
fn zmq_connect(socket: Socket_, endpoint: *const c_char) -> c_int;
fn zmq_msg_init(msg: &Msg_) -> c_int;
fn zmq_msg_init_size(msg: &Msg_, size: size_t) -> c_int;
fn zmq_msg_data(msg: &Msg_) -> *const u8;
fn zmq_msg_size(msg: &Msg_) -> size_t;
fn zmq_msg_close(msg: &Msg_) -> c_int;
fn zmq_msg_send(msg: &Msg_, socket: Socket_, flags: c_int) -> c_int;
fn zmq_msg_recv(msg: &Msg_, socket: Socket_, flags: c_int) -> c_int;
fn zmq_poll(items: *mut PollItem, nitems: c_int, timeout: c_long) -> c_int;
}
/// Socket types
#[allow(non_camel_case_types)]
#[deriving(Clone, Show)]
pub enum SocketType {
PAIR = 0,
PUB = 1,
SUB = 2,
REQ = 3,
REP = 4,
DEALER = 5,
ROUTER = 6,
PULL = 7,
PUSH = 8,
XPUB = 9,
XSUB = 10,
}
pub static DONTWAIT : int = 1;
pub static SNDMORE : int = 2;
#[allow(non_camel_case_types)]
#[deriving(Clone)]
#[allow(non_camel_case_types)]
pub enum Constants {
ZMQ_AFFINITY = 4,
ZMQ_IDENTITY = 5,
ZMQ_SUBSCRIBE = 6,
ZMQ_UNSUBSCRIBE = 7,
ZMQ_RATE = 8,
ZMQ_RECOVERY_IVL = 9,
ZMQ_MCAST_LOOP = 10,
ZMQ_SNDBUF = 11,
ZMQ_RCVBUF = 12,
ZMQ_RCVMORE = 13,
ZMQ_FD = 14,
ZMQ_EVENTS = 15,
ZMQ_TYPE = 16,
ZMQ_LINGER = 17,
ZMQ_RECONNECT_IVL = 18,
ZMQ_BACKLOG = 19,
ZMQ_RECOVERY_IVL_MSEC = 20,
ZMQ_RECONNECT_IVL_MAX = 21,
ZMQ_MAXMSGSIZE = 22,
ZMQ_SNDHWM = 23,
ZMQ_RCVHWM = 24,
ZMQ_MAX_VSM_SIZE = 30,
ZMQ_DELIMITER = 31,
ZMQ_VSM = 32,
ZMQ_MSG_MORE = 1,
ZMQ_MSG_SHARED = 128,
ZMQ_MSG_MASK = 129,
ZMQ_HAUSNUMERO = 156384712,
}
impl Constants {
pub fn to_raw(&self) -> i32 {
*self as i32
}
pub fn from_raw(raw: i32) -> Constants {
// fails if `raw` is not a valid value
match raw {
4 => ZMQ_AFFINITY,
5 => ZMQ_IDENTITY,
6 => ZMQ_SUBSCRIBE,
7 => ZMQ_UNSUBSCRIBE,
8 => ZMQ_RATE,
9 => ZMQ_RECOVERY_IVL,
10 => ZMQ_MCAST_LOOP,
11 => ZMQ_SNDBUF,
12 => ZMQ_RCVBUF,
13 => ZMQ_RCVMORE,
14 => ZMQ_FD,
15 => ZMQ_EVENTS,
16 => ZMQ_TYPE,
17 => ZMQ_LINGER,
18 => ZMQ_RECONNECT_IVL,
19 => ZMQ_BACKLOG,
20 => ZMQ_RECOVERY_IVL_MSEC,
21 => ZMQ_RECONNECT_IVL_MAX,
22 => ZMQ_MAXMSGSIZE,
23 => ZMQ_SNDHWM,
24 => ZMQ_RCVHWM,
30 => ZMQ_MAX_VSM_SIZE,
31 => ZMQ_DELIMITER,
32 => ZMQ_VSM,
1 => ZMQ_MSG_MORE,
128 => ZMQ_MSG_SHARED,
129 => ZMQ_MSG_MASK,
156384712 => ZMQ_HAUSNUMERO,
x => fail!("invalid constant {}", x as int),
}
}
}
#[deriving(Clone, Eq, PartialEq)]
pub enum Error {
EACCES = posix88::EACCES as int,
EADDRINUSE = posix88::EADDRINUSE as int,
EAGAIN = posix88::EAGAIN as int,
EBUSY = posix88::EBUSY as int,
ECONNREFUSED = posix88::ECONNREFUSED as int,
EFAULT = posix88::EFAULT as int,
EHOSTUNREACH = posix88::EHOSTUNREACH as int,
EINPROGRESS = posix88::EINPROGRESS as int,
EINVAL = posix88::EINVAL as int,
EMFILE = posix88::EMFILE as int,
EMSGSIZE = posix88::EMSGSIZE as int,
ENAMETOOLONG = posix88::ENAMETOOLONG as int,
ENODEV = posix88::ENODEV as int,
ENOENT = posix88::ENOENT as int,
ENOMEM = posix88::ENOMEM as int,
ENOTCONN = posix88::ENOTCONN as int,
ENOTSOCK = posix88::ENOTSOCK as int,
EPROTO = posix88::EPROTO as int,
EPROTONOSUPPORT = posix88::EPROTONOSUPPORT as int,
// magic number is EHAUSNUMERO + num
ENOTSUP = 156384713,
ENOBUFS = 156384715,
ENETDOWN = 156384716,
EADDRNOTAVAIL = 156384718,
// native zmq error codes
EFSM = 156384763,
ENOCOMPATPROTO = 156384764,
ETERM = 156384765,
EMTHREAD = 156384766,
}
impl Error {
pub fn to_raw(&self) -> i32 {
*self as i32
}
pub fn from_raw(raw: i32) -> Error {
match raw {
posix88::EACCES => EACCES,
posix88::EADDRINUSE => EADDRINUSE,
posix88::EAGAIN => EAGAIN,
posix88::EBUSY => EBUSY,
posix88::ECONNREFUSED => ECONNREFUSED,
posix88::EFAULT => EFAULT,
posix88::EHOSTUNREACH => EHOSTUNREACH,
posix88::EINPROGRESS => EINPROGRESS,
posix88::EINVAL => EINVAL,
posix88::EMFILE => EMFILE,
posix88::EMSGSIZE => EMSGSIZE,
posix88::ENAMETOOLONG => ENAMETOOLONG,
posix88::ENODEV => ENODEV,
posix88::ENOENT => ENOENT,
posix88::ENOMEM => ENOMEM,
posix88::ENOTCONN => ENOTCONN,
posix88::ENOTSOCK => ENOTSOCK,
posix88::EPROTO => EPROTO,
posix88::EPROTONOSUPPORT => EPROTONOSUPPORT,
156384713 => ENOTSUP,
156384714 => EPROTONOSUPPORT,
156384715 => ENOBUFS,
156384716 => ENETDOWN,
156384717 => EADDRINUSE,
156384718 => EADDRNOTAVAIL,
156384719 => ECONNREFUSED,
156384720 => EINPROGRESS,
156384721 => ENOTSOCK,
156384763 => EFSM,
156384764 => ENOCOMPATPROTO,
156384765 => ETERM,
156384766 => EMTHREAD,
x => {
unsafe {
fail!("unknown error [{}]: {}",
x as int,
str::raw::from_c_str(zmq_strerror(x))
)
}
}
}
}
}
// Return the current zeromq version.
pub fn version() -> (int, int, int) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_version(&mut major, &mut minor, &mut patch);
}
(major as int, minor as int, patch as int)
}
/// zmq context, used to create sockets. Is thread safe, and can be safely
/// shared, but dropping it while sockets are still open will cause them to
/// close (see zmq_ctx_destroy(3)).
///
/// For this reason, one should use an Arc to share it, rather than any unsafe
/// trickery you might think up that would call the destructor.
pub struct Context {
ctx: Context_,
}
impl Context {
pub fn new() -> Context {
Context {
ctx: unsafe { zmq_ctx_new() }
}
}
pub fn socket(&mut self, socket_type: SocketType) -> Result<Socket, Error> {
let sock = unsafe {zmq_socket(self.ctx, socket_type as c_int)};
if sock.is_null() {
return Err(errno_to_error());
}
Ok(Socket { sock: sock, closed: false })
}
/// Try to destroy the context. This is different than the destructor; the
/// destructor will loop when zmq_ctx_destroy returns EINTR
pub fn destroy(&mut self) -> Result<(), Error> {
if unsafe { zmq_ctx_destroy(self.ctx) } == -1i32 {
Err(errno_to_error())
} else {
Ok(())
}
}
}
impl Drop for Context {
fn drop(&mut self) {
debug!("context dropped");
let mut e = self.destroy();
while e.is_err() && (e.unwrap_err() != EFAULT) {
e = self.destroy();
}
}
}
pub struct Socket {
sock: Socket_,
closed: bool
}
impl Drop for Socket {
fn drop(&mut self) {
match self.close_final() {
Ok(()) => { debug!("socket dropped") },
Err(e) => fail!(e.to_string())
}
}
}
impl Socket {
/// Accept connections on a socket.
pub fn bind(&mut self, endpoint: &str) -> Result<(), Error> {
let rc = endpoint.with_c_str (|cstr| {
unsafe {zmq_bind(self.sock, cstr)}
});
if rc == -1i32 { Err(errno_to_error()) } else { Ok(()) }
}
/// Connect a socket.
pub fn connect(&mut self, endpoint: &str) -> Result<(), Error> {
let rc = endpoint.with_c_str (|cstr| {
unsafe {zmq_connect(self.sock, cstr)}
});
if rc == -1i32 { Err(errno_to_error()) } else { Ok(()) }
}
/// Send a message
pub fn send(&mut self, data: &[u8], flags: int) -> Result<(), Error> {
unsafe {
let base_ptr = data.as_ptr();
let len = data.len();
let msg = [0, ..MSG_SIZE];
// Copy the data into the message.
let rc = zmq_msg_init_size(&msg, len as size_t);
if rc == -1i32 { return Err(errno_to_error()); }
ptr::copy_memory(zmq_msg_data(&msg) as *mut u8, base_ptr, len);
let rc = zmq_msg_send(&msg, self.sock, flags as c_int);
let _ = zmq_msg_close(&msg);
if rc == -1i32 { Err(errno_to_error()) } else { Ok(()) }
}
}
pub fn send_str(&mut self, data: &str, flags: int) -> Result<(), Error> {
self.send(data.as_bytes(), flags)
}
/// Receive a message into a `Message`. The length passed to zmq_msg_recv
/// is the length of the buffer.
pub fn recv(&mut self, msg: &mut Message, flags: int) -> Result<(), Error> {
let rc = unsafe {
zmq_msg_recv(&msg.msg, self.sock, flags as c_int)
};
if rc == -1i32 {
Err(errno_to_error())
} else {
Ok(())
}
}
pub fn recv_msg(&mut self, flags: int) -> Result<Message, Error> {
let mut msg = Message::new();
match self.recv(&mut msg, flags) {
Ok(()) => Ok(msg),
Err(e) => Err(e),
}
}
pub fn recv_bytes(&mut self, flags: int) -> Result<Vec<u8>, Error> {
match self.recv_msg(flags) {
Ok(msg) => Ok(msg.to_bytes()),
Err(e) => Err(e),
}
}
pub fn recv_str(&mut self, flags: int) -> Result<String, Error> {
match self.recv_msg(flags) {
Ok(msg) => Ok(msg.to_string()),
Err(e) => Err(e),
}
}
pub fn close(&mut self) -> Result<(), Error> {
if !self.closed {
self.closed = true;
if unsafe { zmq_close(self.sock) } == -1i32 {
return Err(errno_to_error());
}
}
Ok(())
}
pub fn close_final(&mut self) -> Result<(), Error> {
if !self.closed {
if unsafe { zmq_close(self.sock) } == -1i32 {
return Err(errno_to_error());
}
}
Ok(())
}
pub fn get_socket_type(&self) -> Result<SocketType, Error> {
getsockopt_int(self.sock, ZMQ_TYPE.to_raw()).map(|ty| {
match ty {
0 => PAIR,
1 => PUB,
2 => SUB,
3 => REQ,
4 => REP,
5 => DEALER,
6 => ROUTER,
7 => PULL,
8 => PUSH,
9 => XPUB,
10 => XSUB,
_ => fail!("socket type is out of range!")
}
})
}
pub fn get_rcvmore(&self) -> Result<bool, Error> {
getsockopt_i64(self.sock, ZMQ_RCVMORE.to_raw()).and_then (|o| {
Ok(o == 1i64)
})
}
pub fn get_maxmsgsize(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_MAXMSGSIZE.to_raw())
}
pub fn get_sndhwm(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_SNDHWM.to_raw())
}
pub fn get_rcvhwm(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_RCVHWM.to_raw())
}
pub fn get_affinity(&self) -> Result<u64, Error> {
getsockopt_u64(self.sock, ZMQ_AFFINITY.to_raw())
}
pub fn get_identity(&self) -> Result<Vec<u8>, Error> {
getsockopt_bytes(self.sock, ZMQ_IDENTITY.to_raw())
}
pub fn get_rate(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_RATE.to_raw())
}
pub fn get_recovery_ivl(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_RECOVERY_IVL.to_raw())
}
pub fn get_recovery_ivl_msec(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_RECOVERY_IVL_MSEC.to_raw())
}
pub fn get_mcast_loop(&self) -> Result<bool, Error> {
getsockopt_i64(self.sock, ZMQ_MCAST_LOOP.to_raw()).and_then(|o| {
Ok(o == 1i64)
})
}
pub fn get_sndbuf(&self) -> Result<u64, Error> {
getsockopt_u64(self.sock, ZMQ_SNDBUF.to_raw())
}
pub fn get_rcvbuf(&self) -> Result<u64, Error> {
getsockopt_u64(self.sock, ZMQ_RCVBUF.to_raw())
}
pub fn get_linger(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_LINGER.to_raw())
}
pub fn get_reconnect_ivl(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_RECONNECT_IVL.to_raw())
}
pub fn get_reconnect_ivl_max(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_RECONNECT_IVL_MAX.to_raw())
}
pub fn get_backlog(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_BACKLOG.to_raw())
}
pub fn get_fd(&self) -> Result<i64, Error> {
getsockopt_i64(self.sock, ZMQ_FD.to_raw())
}
pub fn get_events(&self) -> Result<int, Error> {
getsockopt_int(self.sock, ZMQ_EVENTS.to_raw())
}
pub fn set_maxmsgsize(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_MAXMSGSIZE.to_raw(), value)
}
pub fn set_sndhwm(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_SNDHWM.to_raw(), value)
}
pub fn set_rcvhwm(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_RCVHWM.to_raw(), value)
}
pub fn set_affinity(&self, value: u64) -> Result<(), Error> {
setsockopt_u64(self.sock, ZMQ_AFFINITY.to_raw(), value)
}
pub fn set_identity(&self, value: &[u8]) -> Result<(), Error> {
setsockopt_bytes(self.sock, ZMQ_IDENTITY.to_raw(), value)
}
pub fn set_subscribe(&self, value: &[u8]) -> Result<(), Error> {
setsockopt_bytes(self.sock, ZMQ_SUBSCRIBE.to_raw(), value)
}
pub fn set_unsubscribe(&self, value: &[u8]) -> Result<(), Error> {
setsockopt_bytes(self.sock, ZMQ_UNSUBSCRIBE.to_raw(), value)
}
pub fn set_rate(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_RATE.to_raw(), value)
}
pub fn set_recovery_ivl(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_RECOVERY_IVL.to_raw(), value)
}
pub fn set_recovery_ivl_msec(&self, value: i64) -> Result<(), Error> {
setsockopt_i64(self.sock, ZMQ_RECOVERY_IVL_MSEC.to_raw(), value)
}
pub fn set_mcast_loop(&self, value: bool) -> Result<(), Error> {
let value = if value { 1i64 } else { 0i64 };
setsockopt_i64(self.sock, ZMQ_MCAST_LOOP.to_raw(), value)
}
pub fn set_sndbuf(&self, value: u64) -> Result<(), Error> {
setsockopt_u64(self.sock, ZMQ_SNDBUF.to_raw(), value)
}
pub fn set_rcvbuf(&self, value: u64) -> Result<(), Error> {
setsockopt_u64(self.sock, ZMQ_RCVBUF.to_raw(), value)
}
pub fn set_linger(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_LINGER.to_raw(), value)
}
pub fn set_reconnect_ivl(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_RECONNECT_IVL.to_raw(), value)
}
pub fn set_reconnect_ivl_max(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_RECONNECT_IVL_MAX.to_raw(), value)
}
pub fn set_backlog(&self, value: int) -> Result<(), Error> {
setsockopt_int(self.sock, ZMQ_BACKLOG.to_raw(), value)
}
pub fn as_poll_item<'a>(&self, events: i16) -> PollItem<'a> {
PollItem {
socket: self.sock,
fd: 0,
events: events,
revents: 0
}
}
}
pub struct Message {
msg: Msg_
}
impl Drop for Message {
fn drop(&mut self) {
unsafe {
let _ = zmq_msg_close(&self.msg);
}
}
}
impl Message {
pub fn new() -> Message {
unsafe {
let message = Message { msg: [0, ..MSG_SIZE] };
let _ = zmq_msg_init(&message.msg);
message
}
}
pub fn with_bytes<T>(&self, f: |&[u8]| -> T) -> T {
unsafe {
let data = zmq_msg_data(&self.msg);
let len = zmq_msg_size(&self.msg) as uint;
slice::raw::buf_as_slice(data, len, f)
}
}
pub fn as_bytes<'a>(&'a self) -> &'a [u8] {
// This is safe because we're constraining the slice to the lifetime of
// this message.
unsafe {
let data = zmq_msg_data(&self.msg);
let len = zmq_msg_size(&self.msg) as uint;
::std::mem::transmute(::std::raw::Slice {
data: data,
len: len,
})
}
}
pub fn with_str<T>(&self, f: |&str| -> T) -> T {
self.with_bytes(|v| f(str::from_utf8(v).unwrap()))
}
pub fn as_str<'a>(&'a self) -> Option<&'a str> {
str::from_utf8(self.as_bytes())
}
pub fn to_bytes(&self) -> Vec<u8> {
self.with_bytes(|v| v.to_vec())
}
pub fn to_string(&self) -> String {
self.with_str(|s| s.to_string())
}
}
pub static POLLIN : i16 = 1i16;
pub static POLLOUT : i16 = 2i16;
pub static POLLERR : i16 = 4i16;
#[repr(C)]
pub struct PollItem<'a> {
socket: Socket_,
fd: c_int,
events: i16,
revents: i16
}
impl<'a> PollItem<'a> {
pub fn get_revents(&self) -> i16 {
self.revents
}
}
pub fn poll<'a>(items: &mut [PollItem<'a>], timeout: i64) -> Result<int, Error> {
unsafe {
let rc = zmq_poll(
items.as_mut_ptr(),
items.len() as c_int,
timeout);
if rc == -1i32 {
Err(errno_to_error())
} else {
Ok(rc as int)
}
}
}
impl fmt::Show for Error {
/// Return the error string for an error.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
write!(f, "{}",
str::raw::from_c_str(zmq_strerror(*self as c_int)))
}
}
}
macro_rules! getsockopt_num(
($name:ident, $c_ty:ty, $ty:ty) => (
fn $name(sock: Socket_, opt: c_int) -> Result<$ty, Error> {
unsafe {
let mut value: $c_ty = 0;
let value_ptr = &mut value as *mut $c_ty;
let mut size = mem::size_of::<$c_ty>() as size_t;
if -1 == zmq_getsockopt(sock, opt, value_ptr as *mut c_void, &mut size) {
Err(errno_to_error())
} else {
Ok(value as $ty)
}
}
}
)
)
getsockopt_num!(getsockopt_int, c_int, int)
getsockopt_num!(getsockopt_i64, int64_t, i64)
getsockopt_num!(getsockopt_u64, uint64_t, u64)
fn getsockopt_bytes(sock: Socket_, opt: c_int) -> Result<Vec<u8>, Error> {
unsafe {
// The only binary option in zeromq is ZMQ_IDENTITY, which can have
// a max size of 255 bytes.
let mut size = 255 as size_t;
let mut value = Vec::with_capacity(size as uint);
let r = zmq_getsockopt(
sock,
opt,
value.as_mut_ptr() as *mut c_void,
&mut size);
if r == -1i32 {
Err(errno_to_error())
} else {
value.truncate(size as uint);
Ok(value)
}
}
}
macro_rules! setsockopt_num(
($name:ident, $ty:ty) => (
fn $name(sock: Socket_, opt: c_int, value: $ty) -> Result<(), Error> {
unsafe {
let size = mem::size_of::<$ty>() as size_t;
if -1 == zmq_setsockopt(sock, opt, (&value as *const $ty) as *const c_void, size) {
Err(errno_to_error())
} else {
Ok(())
}
}
}
)
)
setsockopt_num!(setsockopt_int, int)
setsockopt_num!(setsockopt_i64, i64)
setsockopt_num!(setsockopt_u64, u64)
fn setsockopt_bytes(sock: Socket_, opt: c_int, value: &[u8]) -> Result<(), Error> {
unsafe {
let r = zmq_setsockopt(
sock,
opt,
value.as_ptr() as *const c_void,
value.len() as size_t
);
if r == -1i32 {
Err(errno_to_error())
} else {
Ok(())
}
}
}
fn errno_to_error() -> Error {
Error::from_raw(unsafe { zmq_errno() })
}
|
//! TODO Fill in
use render;
use gdk_pixbuf::Pixbuf;
use glib::translate::ToGlibPtr;
use std::fmt::{self, Display, Formatter};
use std::default::Default;
use std::rc::Rc;
use rlua::{self, Table, Lua, UserData, ToLua, Value};
use super::object::{Object, Objectable};
use super::class::{self, Class, ClassBuilder};
#[derive(Clone, Debug)]
pub struct AwesomeState {
// TODO Fill in
dummy: i32
}
pub struct Awesome<'lua>(Table<'lua>);
impl Default for AwesomeState {
fn default() -> Self {
AwesomeState {
dummy: 0
}
}
}
impl <'lua> Awesome<'lua> {
fn new(lua: &Lua) -> rlua::Result<Object> {
// TODO FIXME
let class = class::button_class(lua)?;
Ok(Awesome::allocate(lua, class)?.build())
}
}
impl Display for AwesomeState {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Awesome: {:p}", self)
}
}
impl <'lua> ToLua<'lua> for Awesome<'lua> {
fn to_lua(self, lua: &'lua Lua) -> rlua::Result<Value<'lua>> {
self.0.to_lua(lua)
}
}
impl UserData for AwesomeState {}
pub fn init(lua: &Lua) -> rlua::Result<Class> {
property_setup(lua, method_setup(lua, Class::builder(lua, Some(Rc::new(Awesome::new)), None, None)?)?)?
.save_class("awesome")?
.build()
}
fn method_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
// TODO Do properly
use super::dummy;
builder.method("connect_signal".into(), lua.create_function(dummy))?
.method("register_xproperty".into(), lua.create_function(register_xproperty))?
.method("xkb_get_group_names".into(), lua.create_function(xkb_get_group_names))?
.method("restart".into(), lua.create_function(restart))?
.method("load_image".into(), lua.create_function(load_image))?
.method("quit".into(), lua.create_function(dummy))
}
/// Registers a new X property
/// This actually does nothing, since this is Wayland.
fn register_xproperty<'lua>(_: &'lua Lua, _: Value<'lua>) -> rlua::Result<()> {
warn!("register_xproperty not supported");
Ok(())
}
/// Get layout short names
fn xkb_get_group_names<'lua>(_: &'lua Lua, _: ()) -> rlua::Result<()> {
warn!("xkb_get_group_names not supported");
Ok(())
}
/// Restart Awesome by restarting the Lua thread
fn restart<'lua>(_: &'lua Lua, _: ()) -> rlua::Result<()> {
use lua::{self, LuaQuery};
if let Err(err) = lua::send(LuaQuery::Restart) {
warn!("Could not restart Lua thread {:#?}", err);
}
Ok(())
}
/// Load an image from the given path
/// Returns either a cairo surface as light user data, nil and an error message
fn load_image<'lua>(lua: &'lua Lua, file_path: String) -> rlua::Result<Value<'lua>> {
// TODO Move to render module
let pixbuf = Pixbuf::new_from_file(file_path.as_str())
.map_err(|err| rlua::Error::RuntimeError(format!("{}", err)))?;
let surface = render::load_surface_from_pixbuf(pixbuf);
// UGH, I wanted to do to_glib_full, but that isn't defined apparently
// So now I have to ignore the lifetime completely and just forget about the surface.
let surface_ptr = surface.to_glib_none().0;
::std::mem::forget(surface);
rlua::LightUserData(surface_ptr as _).to_lua(lua)
}
fn property_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
// TODO Do properly
builder.dummy_property("version".into(), "0".to_lua(lua)?)?
.dummy_property("themes_path".into(), "/usr/share/awesome/themes".to_lua(lua)?)?
.dummy_property("conffile".into(), "".to_lua(lua)?)
}
impl_objectable!(Awesome, AwesomeState);
Added awesome.quit impl
//! TODO Fill in
use render;
use gdk_pixbuf::Pixbuf;
use glib::translate::ToGlibPtr;
use std::fmt::{self, Display, Formatter};
use std::default::Default;
use std::rc::Rc;
use rlua::{self, Table, Lua, UserData, ToLua, Value};
use super::object::{Object, Objectable};
use super::class::{self, Class, ClassBuilder};
#[derive(Clone, Debug)]
pub struct AwesomeState {
// TODO Fill in
dummy: i32
}
pub struct Awesome<'lua>(Table<'lua>);
impl Default for AwesomeState {
fn default() -> Self {
AwesomeState {
dummy: 0
}
}
}
impl <'lua> Awesome<'lua> {
fn new(lua: &Lua) -> rlua::Result<Object> {
// TODO FIXME
let class = class::button_class(lua)?;
Ok(Awesome::allocate(lua, class)?.build())
}
}
impl Display for AwesomeState {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Awesome: {:p}", self)
}
}
impl <'lua> ToLua<'lua> for Awesome<'lua> {
fn to_lua(self, lua: &'lua Lua) -> rlua::Result<Value<'lua>> {
self.0.to_lua(lua)
}
}
impl UserData for AwesomeState {}
pub fn init(lua: &Lua) -> rlua::Result<Class> {
property_setup(lua, method_setup(lua, Class::builder(lua, Some(Rc::new(Awesome::new)), None, None)?)?)?
.save_class("awesome")?
.build()
}
fn method_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
// TODO Do properly
use super::dummy;
builder.method("connect_signal".into(), lua.create_function(dummy))?
.method("register_xproperty".into(), lua.create_function(register_xproperty))?
.method("xkb_get_group_names".into(), lua.create_function(xkb_get_group_names))?
.method("restart".into(), lua.create_function(restart))?
.method("load_image".into(), lua.create_function(load_image))?
.method("quit".into(), lua.create_function(quit))
}
/// Registers a new X property
/// This actually does nothing, since this is Wayland.
fn register_xproperty<'lua>(_: &'lua Lua, _: Value<'lua>) -> rlua::Result<()> {
warn!("register_xproperty not supported");
Ok(())
}
/// Get layout short names
fn xkb_get_group_names<'lua>(_: &'lua Lua, _: ()) -> rlua::Result<()> {
warn!("xkb_get_group_names not supported");
Ok(())
}
/// Restart Awesome by restarting the Lua thread
fn restart<'lua>(_: &'lua Lua, _: ()) -> rlua::Result<()> {
use lua::{self, LuaQuery};
if let Err(err) = lua::send(LuaQuery::Restart) {
warn!("Could not restart Lua thread {:#?}", err);
}
Ok(())
}
/// Load an image from the given path
/// Returns either a cairo surface as light user data, nil and an error message
fn load_image<'lua>(lua: &'lua Lua, file_path: String) -> rlua::Result<Value<'lua>> {
// TODO Move to render module
let pixbuf = Pixbuf::new_from_file(file_path.as_str())
.map_err(|err| rlua::Error::RuntimeError(format!("{}", err)))?;
let surface = render::load_surface_from_pixbuf(pixbuf);
// UGH, I wanted to do to_glib_full, but that isn't defined apparently
// So now I have to ignore the lifetime completely and just forget about the surface.
let surface_ptr = surface.to_glib_none().0;
::std::mem::forget(surface);
rlua::LightUserData(surface_ptr as _).to_lua(lua)
}
fn quit<'lua>(_: &'lua Lua, _: ()) -> rlua::Result<()> {
::rustwlc::terminate();
Ok(())
}
fn property_setup<'lua>(lua: &'lua Lua, builder: ClassBuilder<'lua>) -> rlua::Result<ClassBuilder<'lua>> {
// TODO Do properly
builder.dummy_property("version".into(), "0".to_lua(lua)?)?
.dummy_property("themes_path".into(), "/usr/share/awesome/themes".to_lua(lua)?)?
.dummy_property("conffile".into(), "".to_lua(lua)?)
}
impl_objectable!(Awesome, AwesomeState);
|
use alloc::collections::{BTreeSet, VecDeque};
use alloc::sync::{Arc, Weak};
use core::borrow::Borrow;
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
use core::fmt::{self, Debug};
use core::intrinsics;
use core::ops::{Deref, DerefMut};
use spin::Mutex;
use syscall::{
flag::MapFlags,
error::*,
};
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, PageIter, PhysicalAddress, VirtualAddress, VirtualAddressType};
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
number - number % PAGE_SIZE
}
/// Round up to the nearest multiple of page size
pub fn round_up_pages(number: usize) -> usize {
round_down_pages(number + PAGE_SIZE - 1)
}
pub fn entry_flags(flags: MapFlags) -> EntryFlags {
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if !flags.contains(MapFlags::PROT_EXEC) {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if flags.contains(MapFlags::PROT_READ) {
//TODO: PROT_READ
}
if flags.contains(MapFlags::PROT_WRITE) {
entry_flags |= EntryFlags::WRITABLE;
}
entry_flags
}
#[derive(Debug, Default)]
pub struct UserGrants {
pub inner: BTreeSet<Grant>,
}
impl UserGrants {
/// Returns the grant, if any, which occupies the specified address
pub fn contains(&self, address: VirtualAddress) -> Option<&Grant> {
let byte = Region::byte(address);
self.inner
.range(..=byte)
.next_back()
.filter(|existing| existing.occupies(byte))
}
/// Returns an iterator over all grants that occupy some part of the
/// requested region
pub fn conflicts<'a>(&'a self, requested: Region) -> impl Iterator<Item = &'a Grant> + 'a {
let start = self.contains(requested.start_address());
let start_region = start.map(Region::from).unwrap_or(requested);
self
.inner
.range(start_region..)
.take_while(move |region| !region.intersect(requested).is_empty())
}
/// Return a free region with the specified size
pub fn find_free(&self, size: usize) -> Region {
// Get last used region
let last = self.inner.iter().next_back().map(Region::from).unwrap_or(Region::new(VirtualAddress::new(0), 0));
// At the earliest, start at grant offset
let address = cmp::max(last.end_address().data(), crate::USER_GRANT_OFFSET);
// Create new region
Region::new(VirtualAddress::new(address), size)
}
/// Return a free region, respecting the user's hinted address and flags. Address may be null.
pub fn find_free_at(&mut self, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
if address == VirtualAddress::new(0) {
// Free hands!
return Ok(self.find_free(size));
}
// The user wished to have this region...
let mut requested = Region::new(address, size);
if
requested.end_address().data() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& address.data() % PAGE_SIZE != 0
{
// ... but it was invalid
return Err(Error::new(EINVAL));
}
if let Some(grant) = self.contains(requested.start_address()) {
// ... but it already exists
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
println!("grant: conflicts with: {:#x} - {:#x}", grant.start_address().data(), grant.end_address().data());
return Err(Error::new(EEXIST));
} else if flags.contains(MapFlags::MAP_FIXED) {
// TODO: Overwrite existing grant
return Err(Error::new(EOPNOTSUPP));
} else {
// TODO: Find grant close to requested address?
requested = self.find_free(requested.size());
}
}
Ok(requested)
}
}
impl Deref for UserGrants {
type Target = BTreeSet<Grant>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for UserGrants {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[derive(Clone, Copy)]
pub struct Region {
start: VirtualAddress,
size: usize,
}
impl Region {
/// Create a new region with the given size
pub fn new(start: VirtualAddress, size: usize) -> Self {
Self { start, size }
}
/// Create a new region spanning exactly one byte
pub fn byte(address: VirtualAddress) -> Self {
Self::new(address, 1)
}
/// Create a new region spanning between the start and end address
/// (exclusive end)
pub fn between(start: VirtualAddress, end: VirtualAddress) -> Self {
Self::new(
start,
end.data().saturating_sub(start.data()),
)
}
/// Return the part of the specified region that intersects with self.
pub fn intersect(&self, other: Self) -> Self {
Self::between(
cmp::max(self.start_address(), other.start_address()),
cmp::min(self.end_address(), other.end_address()),
)
}
/// Get the start address of the region
pub fn start_address(&self) -> VirtualAddress {
self.start
}
/// Set the start address of the region
pub fn set_start_address(&mut self, start: VirtualAddress) {
self.start = start;
}
/// Get the last address in the region (inclusive end)
pub fn final_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.data() + self.size - 1)
}
/// Get the start address of the next region (exclusive end)
pub fn end_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.data() + self.size)
}
/// Return the exact size of the region
pub fn size(&self) -> usize {
self.size
}
/// Return true if the size of this region is zero. Grants with such a
/// region should never exist.
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Set the exact size of the region
pub fn set_size(&mut self, size: usize) {
self.size = size;
}
/// Round region up to nearest page size
pub fn round(self) -> Self {
Self {
size: round_up_pages(self.size),
..self
}
}
/// Return the size of the grant in multiples of the page size
pub fn full_size(&self) -> usize {
self.round().size()
}
/// Returns true if the address is within the regions's requested range
pub fn collides(&self, other: Self) -> bool {
self.start_address() <= other.start_address() && other.end_address().data() - self.start_address().data() < self.size()
}
/// Returns true if the address is within the regions's actual range (so,
/// rounded up to the page size)
pub fn occupies(&self, other: Self) -> bool {
self.round().collides(other)
}
/// Return all pages containing a chunk of the region
pub fn pages(&self) -> PageIter {
Page::range_inclusive(
Page::containing_address(self.start_address()),
Page::containing_address(self.end_address())
)
}
/// Returns the region from the start of self until the start of the specified region.
///
/// # Panics
///
/// Panics if the given region starts before self
pub fn before(self, region: Self) -> Option<Self> {
assert!(self.start_address() <= region.start_address());
Some(Self::between(
self.start_address(),
region.start_address(),
)).filter(|reg| !reg.is_empty())
}
/// Returns the region from the end of the given region until the end of self.
///
/// # Panics
///
/// Panics if self ends before the given region
pub fn after(self, region: Self) -> Option<Self> {
assert!(region.end_address() <= self.end_address());
Some(Self::between(
region.end_address(),
self.end_address(),
)).filter(|reg| !reg.is_empty())
}
/// Re-base address that lives inside this region, onto a new base region
pub fn rebase(self, new_base: Self, address: VirtualAddress) -> VirtualAddress {
let offset = address.data() - self.start_address().data();
let new_start = new_base.start_address().data() + offset;
VirtualAddress::new(new_start)
}
}
impl PartialEq for Region {
fn eq(&self, other: &Self) -> bool {
self.start.eq(&other.start)
}
}
impl Eq for Region {}
impl PartialOrd for Region {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.start.partial_cmp(&other.start)
}
}
impl Ord for Region {
fn cmp(&self, other: &Self) -> Ordering {
self.start.cmp(&other.start)
}
}
impl Debug for Region {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().data(), self.end_address().data(), self.size())
}
}
impl<'a> From<&'a Grant> for Region {
fn from(source: &'a Grant) -> Self {
source.region
}
}
#[derive(Debug)]
pub struct Grant {
region: Region,
flags: EntryFlags,
mapped: bool,
owned: bool,
//TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
pub desc_opt: Option<FileDescriptor>,
}
impl Grant {
pub fn is_owned(&self) -> bool {
self.owned
}
/// Get a mutable reference to the region. This is unsafe, because a bad
/// region could lead to the wrong addresses being unmapped.
pub unsafe fn region_mut(&mut self) -> &mut Region {
&mut self.region
}
pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = match to.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data() - to.data() + from.data()));
let result = active_table.map_to(page, frame, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
desc_opt: None,
}
}
pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = match to.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = active_table.map(page, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: true,
desc_opt: None,
}
}
pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, desc_opt: Option<FileDescriptor>, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
let mut active_table = match to.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
//TODO: Do not allocate
let mut frames = VecDeque::with_capacity(size/PAGE_SIZE);
let start_page = Page::containing_address(from);
let end_page = Page::containing_address(VirtualAddress::new(from.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
frames.push_back(frame);
}
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = frames.pop_front().expect("grant did not find enough frames");
let result = mapper.map_to(page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
}
});
ipi(IpiKind::Tlb, IpiTarget::Other);
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
desc_opt,
}
}
/// This function should only be used in clone!
pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant {
assert!(self.mapped);
let mut active_table = match new_start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
if self.owned {
let result = active_table.map(new_page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
flush_all.consume(result);
} else {
let result = active_table.map_to(new_page, frame, flags);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
if self.owned {
unsafe {
intrinsics::copy(self.region.start.data() as *const u8, new_start.data() as *mut u8, self.region.size);
}
let mut flush_all = MapperFlushAll::new();
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = active_table.remap(new_page, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
}
Grant {
region: Region {
start: new_start,
size: self.region.size,
},
flags: self.flags,
mapped: true,
owned: self.owned,
desc_opt: self.desc_opt.clone()
}
}
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
assert!(self.mapped);
let mut active_table = match new_start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = mapper.map_to(new_page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
});
}
flush_all.flush(&mut active_table);
self.region.start = new_start;
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub unsafe fn set_mapped(&mut self, mapped: bool) {
self.mapped = mapped;
}
pub fn unmap(mut self) {
assert!(self.mapped);
let mut active_table = match self.start_address().get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, frame) = active_table.unmap_return(page, false);
if self.owned {
//TODO: make sure this frame can be safely freed, physical use counter
crate::memory::deallocate_frames(frame, 1);
}
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
if let Some(desc) = self.desc_opt.take() {
println!("Grant::unmap: close desc {:?}", desc);
//TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
let _ = desc.close();
}
self.mapped = false;
}
pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
assert!(self.mapped);
let mut active_table = match self.start_address().get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, frame) = mapper.unmap_return(page, false);
if self.owned {
//TODO: make sure this frame can be safely freed, physical use counter
crate::memory::deallocate_frames(frame, 1);
}
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
}
});
ipi(IpiKind::Tlb, IpiTarget::Other);
if let Some(desc) = self.desc_opt.take() {
println!("Grant::unmap_inactive: close desc {:?}", desc);
//TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
let _ = desc.close();
}
self.mapped = false;
}
/// Extract out a region into a separate grant. The return value is as
/// follows: (before, new split, after). Before and after may be `None`,
/// which occurs when the split off region is at the start or end of the
/// page respectively.
///
/// # Panics
///
/// Panics if the start or end addresses of the region is not aligned to the
/// page size. To round up the size to the nearest page size, use `.round()`
/// on the region.
///
/// Also panics if the given region isn't completely contained within the
/// grant. Use `grant.intersect` to find a sub-region that works.
pub fn extract(mut self, region: Region) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
assert_eq!(region.start_address().data() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
assert_eq!(region.size() % PAGE_SIZE, 0, "split_out must be called on page-size aligned end address");
let before_grant = self.before(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
let after_grant = self.after(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
unsafe {
*self.region_mut() = region;
}
Some((before_grant, self, after_grant))
}
}
impl Deref for Grant {
type Target = Region;
fn deref(&self) -> &Self::Target {
&self.region
}
}
impl PartialOrd for Grant {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.region.partial_cmp(&other.region)
}
}
impl Ord for Grant {
fn cmp(&self, other: &Self) -> Ordering {
self.region.cmp(&other.region)
}
}
impl PartialEq for Grant {
fn eq(&self, other: &Self) -> bool {
self.region.eq(&other.region)
}
}
impl Eq for Grant {}
impl Borrow<Region> for Grant {
fn borrow(&self) -> &Region {
&self.region
}
}
impl Drop for Grant {
fn drop(&mut self) {
assert!(!self.mapped, "Grant dropped while still mapped");
}
}
#[derive(Clone, Debug)]
pub enum SharedMemory {
Owned(Arc<Mutex<Memory>>),
Borrowed(Weak<Mutex<Memory>>)
}
impl SharedMemory {
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
match *self {
SharedMemory::Owned(ref memory_lock) => {
let mut memory = memory_lock.lock();
f(&mut *memory)
},
SharedMemory::Borrowed(ref memory_weak) => {
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
let mut memory = memory_lock.lock();
f(&mut *memory)
}
}
}
pub fn borrow(&self) -> SharedMemory {
match *self {
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
}
}
}
#[derive(Debug)]
pub struct Memory {
start: VirtualAddress,
size: usize,
flags: EntryFlags
}
impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self {
let mut memory = Memory {
start,
size,
flags,
};
memory.map(clear);
memory
}
pub fn to_shared(self) -> SharedMemory {
SharedMemory::Owned(Arc::new(Mutex::new(self)))
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
Page::range_inclusive(start_page, end_page)
}
fn map(&mut self, clear: bool) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.map(page, self.flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
if clear {
assert!(self.flags.contains(EntryFlags::WRITABLE));
unsafe {
intrinsics::write_bytes(self.start_address().data() as *mut u8, 0, self.size);
}
}
}
fn unmap(&mut self) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
}
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
let mut active_table = match new_start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
let result = mapper.map_to(new_page, frame, self.flags);
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
});
}
flush_all.flush(&mut active_table);
self.start = new_start;
}
pub fn remap(&mut self, new_flags: EntryFlags) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.remap(page, new_flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
self.flags = new_flags;
}
pub fn resize(&mut self, new_size: usize, clear: bool) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
//TODO: Calculate page changes to minimize operations
if new_size > self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let result = active_table.map(page, self.flags);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
if clear {
unsafe {
intrinsics::write_bytes((self.start.data() + self.size) as *mut u8, 0, new_size - self.size);
}
}
} else if new_size < self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
}
self.size = new_size;
}
}
impl Drop for Memory {
fn drop(&mut self) {
self.unmap();
}
}
#[derive(Debug)]
pub struct Tls {
pub master: VirtualAddress,
pub file_size: usize,
pub mem: Memory,
pub offset: usize,
}
impl Tls {
/// Load TLS data from master
pub unsafe fn load(&mut self) {
intrinsics::copy(
self.master.data() as *const u8,
(self.mem.start_address().data() + self.offset) as *mut u8,
self.file_size
);
}
}
#[cfg(tests)]
mod tests {
// TODO: Get these tests working
#[test]
fn region_collides() {
assert!(Region::new(0, 2).collides(Region::new(0, 1)));
assert!(Region::new(0, 2).collides(Region::new(1, 1)));
assert!(!Region::new(0, 2).collides(Region::new(2, 1)));
assert!(!Region::new(0, 2).collides(Region::new(3, 1)));
}
}
aarch64: context: memory: Grant::map_inactive: Bugfix
When mapping one (from) virtual address range to another (to) virtual
address range, be mindful of which mapper type to use for each range.
Before this, the same mapper type was used for both ranges. This meant
that if from and to were different (as in not both kernel virtual
addresses or user virtual addresses) then it would appear that either
from or to was not mapped previously and the kernel would panic.
use alloc::collections::{BTreeSet, VecDeque};
use alloc::sync::{Arc, Weak};
use core::borrow::Borrow;
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
use core::fmt::{self, Debug};
use core::intrinsics;
use core::ops::{Deref, DerefMut};
use spin::Mutex;
use syscall::{
flag::MapFlags,
error::*,
};
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, PageIter, PhysicalAddress, VirtualAddress, VirtualAddressType};
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
number - number % PAGE_SIZE
}
/// Round up to the nearest multiple of page size
pub fn round_up_pages(number: usize) -> usize {
round_down_pages(number + PAGE_SIZE - 1)
}
pub fn entry_flags(flags: MapFlags) -> EntryFlags {
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if !flags.contains(MapFlags::PROT_EXEC) {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if flags.contains(MapFlags::PROT_READ) {
//TODO: PROT_READ
}
if flags.contains(MapFlags::PROT_WRITE) {
entry_flags |= EntryFlags::WRITABLE;
}
entry_flags
}
#[derive(Debug, Default)]
pub struct UserGrants {
pub inner: BTreeSet<Grant>,
}
impl UserGrants {
/// Returns the grant, if any, which occupies the specified address
pub fn contains(&self, address: VirtualAddress) -> Option<&Grant> {
let byte = Region::byte(address);
self.inner
.range(..=byte)
.next_back()
.filter(|existing| existing.occupies(byte))
}
/// Returns an iterator over all grants that occupy some part of the
/// requested region
pub fn conflicts<'a>(&'a self, requested: Region) -> impl Iterator<Item = &'a Grant> + 'a {
let start = self.contains(requested.start_address());
let start_region = start.map(Region::from).unwrap_or(requested);
self
.inner
.range(start_region..)
.take_while(move |region| !region.intersect(requested).is_empty())
}
/// Return a free region with the specified size
pub fn find_free(&self, size: usize) -> Region {
// Get last used region
let last = self.inner.iter().next_back().map(Region::from).unwrap_or(Region::new(VirtualAddress::new(0), 0));
// At the earliest, start at grant offset
let address = cmp::max(last.end_address().data(), crate::USER_GRANT_OFFSET);
// Create new region
Region::new(VirtualAddress::new(address), size)
}
/// Return a free region, respecting the user's hinted address and flags. Address may be null.
pub fn find_free_at(&mut self, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
if address == VirtualAddress::new(0) {
// Free hands!
return Ok(self.find_free(size));
}
// The user wished to have this region...
let mut requested = Region::new(address, size);
if
requested.end_address().data() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& address.data() % PAGE_SIZE != 0
{
// ... but it was invalid
return Err(Error::new(EINVAL));
}
if let Some(grant) = self.contains(requested.start_address()) {
// ... but it already exists
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
println!("grant: conflicts with: {:#x} - {:#x}", grant.start_address().data(), grant.end_address().data());
return Err(Error::new(EEXIST));
} else if flags.contains(MapFlags::MAP_FIXED) {
// TODO: Overwrite existing grant
return Err(Error::new(EOPNOTSUPP));
} else {
// TODO: Find grant close to requested address?
requested = self.find_free(requested.size());
}
}
Ok(requested)
}
}
impl Deref for UserGrants {
type Target = BTreeSet<Grant>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for UserGrants {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[derive(Clone, Copy)]
pub struct Region {
start: VirtualAddress,
size: usize,
}
impl Region {
/// Create a new region with the given size
pub fn new(start: VirtualAddress, size: usize) -> Self {
Self { start, size }
}
/// Create a new region spanning exactly one byte
pub fn byte(address: VirtualAddress) -> Self {
Self::new(address, 1)
}
/// Create a new region spanning between the start and end address
/// (exclusive end)
pub fn between(start: VirtualAddress, end: VirtualAddress) -> Self {
Self::new(
start,
end.data().saturating_sub(start.data()),
)
}
/// Return the part of the specified region that intersects with self.
pub fn intersect(&self, other: Self) -> Self {
Self::between(
cmp::max(self.start_address(), other.start_address()),
cmp::min(self.end_address(), other.end_address()),
)
}
/// Get the start address of the region
pub fn start_address(&self) -> VirtualAddress {
self.start
}
/// Set the start address of the region
pub fn set_start_address(&mut self, start: VirtualAddress) {
self.start = start;
}
/// Get the last address in the region (inclusive end)
pub fn final_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.data() + self.size - 1)
}
/// Get the start address of the next region (exclusive end)
pub fn end_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.data() + self.size)
}
/// Return the exact size of the region
pub fn size(&self) -> usize {
self.size
}
/// Return true if the size of this region is zero. Grants with such a
/// region should never exist.
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Set the exact size of the region
pub fn set_size(&mut self, size: usize) {
self.size = size;
}
/// Round region up to nearest page size
pub fn round(self) -> Self {
Self {
size: round_up_pages(self.size),
..self
}
}
/// Return the size of the grant in multiples of the page size
pub fn full_size(&self) -> usize {
self.round().size()
}
/// Returns true if the address is within the regions's requested range
pub fn collides(&self, other: Self) -> bool {
self.start_address() <= other.start_address() && other.end_address().data() - self.start_address().data() < self.size()
}
/// Returns true if the address is within the regions's actual range (so,
/// rounded up to the page size)
pub fn occupies(&self, other: Self) -> bool {
self.round().collides(other)
}
/// Return all pages containing a chunk of the region
pub fn pages(&self) -> PageIter {
Page::range_inclusive(
Page::containing_address(self.start_address()),
Page::containing_address(self.end_address())
)
}
/// Returns the region from the start of self until the start of the specified region.
///
/// # Panics
///
/// Panics if the given region starts before self
pub fn before(self, region: Self) -> Option<Self> {
assert!(self.start_address() <= region.start_address());
Some(Self::between(
self.start_address(),
region.start_address(),
)).filter(|reg| !reg.is_empty())
}
/// Returns the region from the end of the given region until the end of self.
///
/// # Panics
///
/// Panics if self ends before the given region
pub fn after(self, region: Self) -> Option<Self> {
assert!(region.end_address() <= self.end_address());
Some(Self::between(
region.end_address(),
self.end_address(),
)).filter(|reg| !reg.is_empty())
}
/// Re-base address that lives inside this region, onto a new base region
pub fn rebase(self, new_base: Self, address: VirtualAddress) -> VirtualAddress {
let offset = address.data() - self.start_address().data();
let new_start = new_base.start_address().data() + offset;
VirtualAddress::new(new_start)
}
}
impl PartialEq for Region {
fn eq(&self, other: &Self) -> bool {
self.start.eq(&other.start)
}
}
impl Eq for Region {}
impl PartialOrd for Region {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.start.partial_cmp(&other.start)
}
}
impl Ord for Region {
fn cmp(&self, other: &Self) -> Ordering {
self.start.cmp(&other.start)
}
}
impl Debug for Region {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().data(), self.end_address().data(), self.size())
}
}
impl<'a> From<&'a Grant> for Region {
fn from(source: &'a Grant) -> Self {
source.region
}
}
#[derive(Debug)]
pub struct Grant {
region: Region,
flags: EntryFlags,
mapped: bool,
owned: bool,
//TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
pub desc_opt: Option<FileDescriptor>,
}
impl Grant {
pub fn is_owned(&self) -> bool {
self.owned
}
/// Get a mutable reference to the region. This is unsafe, because a bad
/// region could lead to the wrong addresses being unmapped.
pub unsafe fn region_mut(&mut self) -> &mut Region {
&mut self.region
}
pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = match to.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data() - to.data() + from.data()));
let result = active_table.map_to(page, frame, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
desc_opt: None,
}
}
pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = match to.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = active_table.map(page, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: true,
desc_opt: None,
}
}
pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, desc_opt: Option<FileDescriptor>, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
let mut active_table = match from.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
//TODO: Do not allocate
let mut frames = VecDeque::with_capacity(size/PAGE_SIZE);
let start_page = Page::containing_address(from);
let end_page = Page::containing_address(VirtualAddress::new(from.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
frames.push_back(frame);
}
let mut active_table = match to.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = frames.pop_front().expect("grant did not find enough frames");
let result = mapper.map_to(page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
}
});
ipi(IpiKind::Tlb, IpiTarget::Other);
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
desc_opt,
}
}
/// This function should only be used in clone!
pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant {
assert!(self.mapped);
let mut active_table = match new_start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
if self.owned {
let result = active_table.map(new_page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
flush_all.consume(result);
} else {
let result = active_table.map_to(new_page, frame, flags);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
if self.owned {
unsafe {
intrinsics::copy(self.region.start.data() as *const u8, new_start.data() as *mut u8, self.region.size);
}
let mut flush_all = MapperFlushAll::new();
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = active_table.remap(new_page, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
}
Grant {
region: Region {
start: new_start,
size: self.region.size,
},
flags: self.flags,
mapped: true,
owned: self.owned,
desc_opt: self.desc_opt.clone()
}
}
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
assert!(self.mapped);
let mut active_table = match new_start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = mapper.map_to(new_page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
});
}
flush_all.flush(&mut active_table);
self.region.start = new_start;
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub unsafe fn set_mapped(&mut self, mapped: bool) {
self.mapped = mapped;
}
pub fn unmap(mut self) {
assert!(self.mapped);
let mut active_table = match self.start_address().get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, frame) = active_table.unmap_return(page, false);
if self.owned {
//TODO: make sure this frame can be safely freed, physical use counter
crate::memory::deallocate_frames(frame, 1);
}
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
if let Some(desc) = self.desc_opt.take() {
println!("Grant::unmap: close desc {:?}", desc);
//TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
let _ = desc.close();
}
self.mapped = false;
}
pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
assert!(self.mapped);
let mut active_table = match self.start_address().get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, frame) = mapper.unmap_return(page, false);
if self.owned {
//TODO: make sure this frame can be safely freed, physical use counter
crate::memory::deallocate_frames(frame, 1);
}
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
}
});
ipi(IpiKind::Tlb, IpiTarget::Other);
if let Some(desc) = self.desc_opt.take() {
println!("Grant::unmap_inactive: close desc {:?}", desc);
//TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
let _ = desc.close();
}
self.mapped = false;
}
/// Extract out a region into a separate grant. The return value is as
/// follows: (before, new split, after). Before and after may be `None`,
/// which occurs when the split off region is at the start or end of the
/// page respectively.
///
/// # Panics
///
/// Panics if the start or end addresses of the region is not aligned to the
/// page size. To round up the size to the nearest page size, use `.round()`
/// on the region.
///
/// Also panics if the given region isn't completely contained within the
/// grant. Use `grant.intersect` to find a sub-region that works.
pub fn extract(mut self, region: Region) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
assert_eq!(region.start_address().data() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
assert_eq!(region.size() % PAGE_SIZE, 0, "split_out must be called on page-size aligned end address");
let before_grant = self.before(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
let after_grant = self.after(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
unsafe {
*self.region_mut() = region;
}
Some((before_grant, self, after_grant))
}
}
impl Deref for Grant {
type Target = Region;
fn deref(&self) -> &Self::Target {
&self.region
}
}
impl PartialOrd for Grant {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.region.partial_cmp(&other.region)
}
}
impl Ord for Grant {
fn cmp(&self, other: &Self) -> Ordering {
self.region.cmp(&other.region)
}
}
impl PartialEq for Grant {
fn eq(&self, other: &Self) -> bool {
self.region.eq(&other.region)
}
}
impl Eq for Grant {}
impl Borrow<Region> for Grant {
fn borrow(&self) -> &Region {
&self.region
}
}
impl Drop for Grant {
fn drop(&mut self) {
assert!(!self.mapped, "Grant dropped while still mapped");
}
}
#[derive(Clone, Debug)]
pub enum SharedMemory {
Owned(Arc<Mutex<Memory>>),
Borrowed(Weak<Mutex<Memory>>)
}
impl SharedMemory {
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
match *self {
SharedMemory::Owned(ref memory_lock) => {
let mut memory = memory_lock.lock();
f(&mut *memory)
},
SharedMemory::Borrowed(ref memory_weak) => {
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
let mut memory = memory_lock.lock();
f(&mut *memory)
}
}
}
pub fn borrow(&self) -> SharedMemory {
match *self {
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
}
}
}
#[derive(Debug)]
pub struct Memory {
start: VirtualAddress,
size: usize,
flags: EntryFlags
}
impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self {
let mut memory = Memory {
start,
size,
flags,
};
memory.map(clear);
memory
}
pub fn to_shared(self) -> SharedMemory {
SharedMemory::Owned(Arc::new(Mutex::new(self)))
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
Page::range_inclusive(start_page, end_page)
}
fn map(&mut self, clear: bool) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.map(page, self.flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
if clear {
assert!(self.flags.contains(EntryFlags::WRITABLE));
unsafe {
intrinsics::write_bytes(self.start_address().data() as *mut u8, 0, self.size);
}
}
}
fn unmap(&mut self) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
}
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
let mut active_table = match new_start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
let result = mapper.map_to(new_page, frame, self.flags);
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
});
}
flush_all.flush(&mut active_table);
self.start = new_start;
}
pub fn remap(&mut self, new_flags: EntryFlags) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.remap(page, new_flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
self.flags = new_flags;
}
pub fn resize(&mut self, new_size: usize, clear: bool) {
let mut active_table = match self.start.get_type() {
VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) },
VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) }
};
//TODO: Calculate page changes to minimize operations
if new_size > self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let result = active_table.map(page, self.flags);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
if clear {
unsafe {
intrinsics::write_bytes((self.start.data() + self.size) as *mut u8, 0, new_size - self.size);
}
}
} else if new_size < self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
}
self.size = new_size;
}
}
impl Drop for Memory {
fn drop(&mut self) {
self.unmap();
}
}
#[derive(Debug)]
pub struct Tls {
pub master: VirtualAddress,
pub file_size: usize,
pub mem: Memory,
pub offset: usize,
}
impl Tls {
/// Load TLS data from master
pub unsafe fn load(&mut self) {
intrinsics::copy(
self.master.data() as *const u8,
(self.mem.start_address().data() + self.offset) as *mut u8,
self.file_size
);
}
}
#[cfg(tests)]
mod tests {
// TODO: Get these tests working
#[test]
fn region_collides() {
assert!(Region::new(0, 2).collides(Region::new(0, 1)));
assert!(Region::new(0, 2).collides(Region::new(1, 1)));
assert!(!Region::new(0, 2).collides(Region::new(2, 1)));
assert!(!Region::new(0, 2).collides(Region::new(3, 1)));
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::collections::BTreeMap;
use std::mem;
use bindgen::config::Config;
use bindgen::declarationtyperesolver::DeclarationTypeResolver;
use bindgen::dependencies::Dependencies;
use bindgen::ir::{
AnnotationSet, Cfg, Constant, Enum, OpaqueItem, Path, Static, Struct, Type, Typedef, Union,
};
use bindgen::library::Library;
use bindgen::monomorph::Monomorphs;
/// An item is any type of rust item besides a function
pub trait Item {
fn path(&self) -> &Path;
fn name(&self) -> &str {
self.path().name()
}
fn export_name(&self) -> &str {
self.name()
}
fn cfg(&self) -> Option<&Cfg>;
fn annotations(&self) -> &AnnotationSet;
fn annotations_mut(&mut self) -> &mut AnnotationSet;
fn container(&self) -> ItemContainer;
fn collect_declaration_types(&self, _resolver: &mut DeclarationTypeResolver) {
unimplemented!()
}
fn resolve_declaration_types(&mut self, _resolver: &DeclarationTypeResolver) {
unimplemented!()
}
fn rename_for_config(&mut self, _config: &Config) {}
fn add_dependencies(&self, _library: &Library, _out: &mut Dependencies) {}
fn instantiate_monomorph(&self, _generics: &[Type], _library: &Library, _out: &mut Monomorphs) {
unreachable!("Cannot instantiate {} as a generic.", self.name())
}
}
#[derive(Debug, Clone)]
pub enum ItemContainer {
Constant(Constant),
Static(Static),
OpaqueItem(OpaqueItem),
Struct(Struct),
Union(Union),
Enum(Enum),
Typedef(Typedef),
}
impl ItemContainer {
pub fn deref(&self) -> &Item {
match self {
&ItemContainer::Constant(ref x) => x,
&ItemContainer::Static(ref x) => x,
&ItemContainer::OpaqueItem(ref x) => x,
&ItemContainer::Struct(ref x) => x,
&ItemContainer::Union(ref x) => x,
&ItemContainer::Enum(ref x) => x,
&ItemContainer::Typedef(ref x) => x,
}
}
}
#[derive(Debug, Clone)]
pub enum ItemValue<T: Item> {
Cfg(Vec<T>),
Single(T),
}
#[derive(Debug, Clone)]
pub struct ItemMap<T: Item> {
data: BTreeMap<Path, ItemValue<T>>,
}
impl<T: Item + Clone> ItemMap<T> {
pub fn new() -> ItemMap<T> {
ItemMap {
data: BTreeMap::new(),
}
}
pub fn rebuild(&mut self) {
let old = mem::replace(self, ItemMap::new());
old.for_all_items(|x| {
self.try_insert(x.clone());
});
}
pub fn try_insert(&mut self, item: T) -> bool {
match (item.cfg().is_some(), self.data.get_mut(item.path())) {
(true, Some(&mut ItemValue::Cfg(ref mut items))) => {
items.push(item);
return true;
}
(false, Some(&mut ItemValue::Cfg(_))) => {
return false;
}
(true, Some(&mut ItemValue::Single(_))) => {
return false;
}
(false, Some(&mut ItemValue::Single(_))) => {
return false;
}
_ => {}
}
let path = item.path().clone();
if item.cfg().is_some() {
self.data.insert(path, ItemValue::Cfg(vec![item]));
} else {
self.data.insert(path, ItemValue::Single(item));
}
true
}
pub fn extend_with(&mut self, other: &ItemMap<T>) {
other.for_all_items(|x| {
self.try_insert(x.clone());
});
}
pub fn to_vec(&self) -> Vec<T> {
let mut result = Vec::with_capacity(self.data.len());
for container in self.data.values() {
match container {
&ItemValue::Cfg(ref items) => result.extend_from_slice(items),
&ItemValue::Single(ref item) => {
result.push(item.clone());
}
}
}
result
}
pub fn get_items(&self, path: &Path) -> Option<Vec<ItemContainer>> {
Some(match *self.data.get(path)? {
ItemValue::Cfg(ref items) => items.iter().map(|x| x.container()).collect(),
ItemValue::Single(ref item) => vec![item.container()],
})
}
pub fn filter<F>(&mut self, callback: F)
where
F: Fn(&T) -> bool,
{
let data = mem::replace(&mut self.data, BTreeMap::new());
for (name, container) in data {
match container {
ItemValue::Cfg(items) => {
let mut new_items = Vec::new();
for item in items {
if !callback(&item) {
new_items.push(item);
}
}
if new_items.len() > 0 {
self.data.insert(name, ItemValue::Cfg(new_items));
}
}
ItemValue::Single(item) => {
if !callback(&item) {
self.data.insert(name, ItemValue::Single(item));
}
}
}
}
}
pub fn for_all_items<F>(&self, mut callback: F)
where
F: FnMut(&T),
{
for container in self.data.values() {
match container {
&ItemValue::Cfg(ref items) => {
for item in items {
callback(item);
}
}
&ItemValue::Single(ref item) => callback(item),
}
}
}
pub fn for_all_items_mut<F>(&mut self, mut callback: F)
where
F: FnMut(&mut T),
{
for container in self.data.values_mut() {
match container {
&mut ItemValue::Cfg(ref mut items) => {
for item in items {
callback(item);
}
}
&mut ItemValue::Single(ref mut item) => callback(item),
}
}
}
pub fn for_items<F>(&self, path: &Path, mut callback: F)
where
F: FnMut(&T),
{
match self.data.get(path) {
Some(&ItemValue::Cfg(ref items)) => {
for item in items {
callback(item);
}
}
Some(&ItemValue::Single(ref item)) => {
callback(item);
}
None => {}
}
}
pub fn for_items_mut<F>(&mut self, path: &Path, mut callback: F)
where
F: FnMut(&mut T),
{
match self.data.get_mut(path) {
Some(&mut ItemValue::Cfg(ref mut items)) => {
for item in items {
callback(item);
}
}
Some(&mut ItemValue::Single(ref mut item)) => {
callback(item);
}
None => {}
}
}
}
Update to use dyn when needed
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::collections::BTreeMap;
use std::mem;
use bindgen::config::Config;
use bindgen::declarationtyperesolver::DeclarationTypeResolver;
use bindgen::dependencies::Dependencies;
use bindgen::ir::{
AnnotationSet, Cfg, Constant, Enum, OpaqueItem, Path, Static, Struct, Type, Typedef, Union,
};
use bindgen::library::Library;
use bindgen::monomorph::Monomorphs;
/// An item is any type of rust item besides a function
pub trait Item {
fn path(&self) -> &Path;
fn name(&self) -> &str {
self.path().name()
}
fn export_name(&self) -> &str {
self.name()
}
fn cfg(&self) -> Option<&Cfg>;
fn annotations(&self) -> &AnnotationSet;
fn annotations_mut(&mut self) -> &mut AnnotationSet;
fn container(&self) -> ItemContainer;
fn collect_declaration_types(&self, _resolver: &mut DeclarationTypeResolver) {
unimplemented!()
}
fn resolve_declaration_types(&mut self, _resolver: &DeclarationTypeResolver) {
unimplemented!()
}
fn rename_for_config(&mut self, _config: &Config) {}
fn add_dependencies(&self, _library: &Library, _out: &mut Dependencies) {}
fn instantiate_monomorph(&self, _generics: &[Type], _library: &Library, _out: &mut Monomorphs) {
unreachable!("Cannot instantiate {} as a generic.", self.name())
}
}
#[derive(Debug, Clone)]
pub enum ItemContainer {
Constant(Constant),
Static(Static),
OpaqueItem(OpaqueItem),
Struct(Struct),
Union(Union),
Enum(Enum),
Typedef(Typedef),
}
impl ItemContainer {
pub fn deref(&self) -> &dyn Item {
match self {
&ItemContainer::Constant(ref x) => x,
&ItemContainer::Static(ref x) => x,
&ItemContainer::OpaqueItem(ref x) => x,
&ItemContainer::Struct(ref x) => x,
&ItemContainer::Union(ref x) => x,
&ItemContainer::Enum(ref x) => x,
&ItemContainer::Typedef(ref x) => x,
}
}
}
#[derive(Debug, Clone)]
pub enum ItemValue<T: Item> {
Cfg(Vec<T>),
Single(T),
}
#[derive(Debug, Clone)]
pub struct ItemMap<T: Item> {
data: BTreeMap<Path, ItemValue<T>>,
}
impl<T: Item + Clone> ItemMap<T> {
pub fn new() -> ItemMap<T> {
ItemMap {
data: BTreeMap::new(),
}
}
pub fn rebuild(&mut self) {
let old = mem::replace(self, ItemMap::new());
old.for_all_items(|x| {
self.try_insert(x.clone());
});
}
pub fn try_insert(&mut self, item: T) -> bool {
match (item.cfg().is_some(), self.data.get_mut(item.path())) {
(true, Some(&mut ItemValue::Cfg(ref mut items))) => {
items.push(item);
return true;
}
(false, Some(&mut ItemValue::Cfg(_))) => {
return false;
}
(true, Some(&mut ItemValue::Single(_))) => {
return false;
}
(false, Some(&mut ItemValue::Single(_))) => {
return false;
}
_ => {}
}
let path = item.path().clone();
if item.cfg().is_some() {
self.data.insert(path, ItemValue::Cfg(vec![item]));
} else {
self.data.insert(path, ItemValue::Single(item));
}
true
}
pub fn extend_with(&mut self, other: &ItemMap<T>) {
other.for_all_items(|x| {
self.try_insert(x.clone());
});
}
pub fn to_vec(&self) -> Vec<T> {
let mut result = Vec::with_capacity(self.data.len());
for container in self.data.values() {
match container {
&ItemValue::Cfg(ref items) => result.extend_from_slice(items),
&ItemValue::Single(ref item) => {
result.push(item.clone());
}
}
}
result
}
pub fn get_items(&self, path: &Path) -> Option<Vec<ItemContainer>> {
Some(match *self.data.get(path)? {
ItemValue::Cfg(ref items) => items.iter().map(|x| x.container()).collect(),
ItemValue::Single(ref item) => vec![item.container()],
})
}
pub fn filter<F>(&mut self, callback: F)
where
F: Fn(&T) -> bool,
{
let data = mem::replace(&mut self.data, BTreeMap::new());
for (name, container) in data {
match container {
ItemValue::Cfg(items) => {
let mut new_items = Vec::new();
for item in items {
if !callback(&item) {
new_items.push(item);
}
}
if new_items.len() > 0 {
self.data.insert(name, ItemValue::Cfg(new_items));
}
}
ItemValue::Single(item) => {
if !callback(&item) {
self.data.insert(name, ItemValue::Single(item));
}
}
}
}
}
pub fn for_all_items<F>(&self, mut callback: F)
where
F: FnMut(&T),
{
for container in self.data.values() {
match container {
&ItemValue::Cfg(ref items) => {
for item in items {
callback(item);
}
}
&ItemValue::Single(ref item) => callback(item),
}
}
}
pub fn for_all_items_mut<F>(&mut self, mut callback: F)
where
F: FnMut(&mut T),
{
for container in self.data.values_mut() {
match container {
&mut ItemValue::Cfg(ref mut items) => {
for item in items {
callback(item);
}
}
&mut ItemValue::Single(ref mut item) => callback(item),
}
}
}
pub fn for_items<F>(&self, path: &Path, mut callback: F)
where
F: FnMut(&T),
{
match self.data.get(path) {
Some(&ItemValue::Cfg(ref items)) => {
for item in items {
callback(item);
}
}
Some(&ItemValue::Single(ref item)) => {
callback(item);
}
None => {}
}
}
pub fn for_items_mut<F>(&mut self, path: &Path, mut callback: F)
where
F: FnMut(&mut T),
{
match self.data.get_mut(path) {
Some(&mut ItemValue::Cfg(ref mut items)) => {
for item in items {
callback(item);
}
}
Some(&mut ItemValue::Single(ref mut item)) => {
callback(item);
}
None => {}
}
}
}
|
use std::io;
use types::{InputBuf, ParseResult};
use primitives::IntoInner;
use buffer::{
Buffer,
DataSource,
FixedSizeBuffer,
StreamError,
Stream,
};
use buffer::data_source::{IteratorDataSource, ReadDataSource};
bitflags!{
flags ParserState: u64 {
/// The parser which was last run on the buffer did not manage to complete with the data
/// available in the buffer.
const INCOMPLETE = 1,
/// The buffer did not manage to read any more data from the underlying `Read`
/// implementation.
const END_OF_INPUT = 2,
/// `parse()` should attempt to read more data whenever the `INCOMPLETE` flag is set.
const AUTOMATIC_FILL = 4,
}
}
/// Manages a buffer and data source pair, enabling efficient parsing from a streaming source.
#[derive(Debug)]
pub struct Source<S: DataSource, B: Buffer<S::Item>> {
/// Source reader
source: S,
/// Temporary source
buffer: B,
/// The requested amount of bytes to be available for reading from the buffer
request: usize,
/// Input state, if end has been reached
state: ParserState,
}
impl<R: io::Read> Source<ReadDataSource<R>, FixedSizeBuffer<u8>> {
/// Creates a new `Source` from a `Read` instance with the default `FixedSizeBuffer` settings.
#[inline]
pub fn new(source: R) -> Self {
Self::with_buffer(ReadDataSource::new(source), FixedSizeBuffer::new())
}
}
impl<R: io::Read, B: Buffer<u8>> Source<ReadDataSource<R>, B> {
/// Creates a new `Source` from `Read` and buffer instances.
#[inline]
pub fn from_read(source: R, buffer: B) -> Self {
Self::with_buffer(ReadDataSource::new(source), buffer)
}
}
impl<I: Iterator, B: Buffer<I::Item>> Source<IteratorDataSource<I>, B>
where I::Item: Copy + PartialEq {
/// Creates a new `Source` from `Iterator` and `Buffer` instances.
#[inline]
pub fn from_iter(source: I, buffer: B) -> Self {
Self::with_buffer(IteratorDataSource::new(source), buffer)
}
}
impl<S: DataSource, B: Buffer<S::Item>> Source<S, B> {
/// Creates a new `Source` from `DataSource` and `Buffer` instances.
#[inline]
pub fn with_buffer(source: S, buffer: B) -> Self {
Source {
source: source,
buffer: buffer,
request: 0,
state: INCOMPLETE | AUTOMATIC_FILL,
}
}
/// Attempts to fill this source so it contains at least ``request`` bytes.
#[inline]
fn fill_requested(&mut self, request: usize) -> io::Result<usize> {
let mut read = 0;
let mut buffer = &mut self.buffer;
let source = &mut self.source;
if buffer.len() < request {
let diff = request - buffer.len();
buffer.request_space(diff);
while buffer.len() < request {
match try!(buffer.fill(source)) {
0 => break,
n => read = read + n,
}
}
}
Ok(read)
}
/// Attempts to fill the buffer to satisfy the last call to `parse()`.
#[inline]
pub fn fill(&mut self) -> io::Result<usize> {
let req = self.buffer.len() + 1;
self.fill_requested(req).map(|n| {
self.state.remove(INCOMPLETE);
if n > 0 {
self.state.remove(END_OF_INPUT);
} else {
self.state.insert(END_OF_INPUT);
}
n
})
}
/// Returns the number of bytes left in the buffer which have not yet been parsed.
#[inline]
pub fn len(&self) -> usize {
self.buffer.len()
}
/// If the buffer is empty and the reader has reached the end.
#[inline]
pub fn is_empty(&self) -> bool {
self.state.contains(END_OF_INPUT) && self.len() == 0
}
/// Returns the capacity of the underlying buffer.
///
/// This is the maximum number of input items the buffer can store.
#[inline]
pub fn capacity(&self) -> usize {
self.buffer.capacity()
}
/// Borrows the remainder of the buffer.
#[inline]
pub fn buffer(&self) -> &[S::Item] {
&self.buffer
}
/// Resets the buffer state, keeping the current buffer contents and cursor position.
///
/// This is useful when streaming data and more data has been made available on a
/// socket/stream.
#[inline]
pub fn reset(&mut self) {
self.state = ParserState::empty();
}
/// Changes the setting automatic fill feature, `true` will make the buffer automatically
/// call `fill()` on the next call to `parse()` after a `Retry` was encountered.
// TODO: Make a part of the constructor/builder
#[inline]
pub fn set_autofill(&mut self, value: bool) {
if value {
self.state.insert(AUTOMATIC_FILL)
} else {
self.state.remove(AUTOMATIC_FILL)
}
}
}
impl<S: DataSource<Item=u8>, B: Buffer<u8>> io::Read for Source<S, B> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.len() > self.len() {
try!(self.fill_requested(buf.len()));
}
(&self.buffer[..]).read(buf).map(|n| {
self.buffer.consume(n);
n
})
}
}
impl<S: DataSource<Item=u8>, B: Buffer<u8>> io::BufRead for Source<S, B> {
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let cap = self.buffer.capacity();
try!(self.fill_requested(cap));
Ok(self.buffer())
}
#[inline]
fn consume(&mut self, num: usize) {
self.buffer.consume(num)
}
}
impl<'a, S: DataSource, B: Buffer<S::Item>> Stream<'a, 'a> for Source<S, B>
where S::Item: 'a {
type Item = S::Item;
#[inline]
fn parse<F, T, E>(&'a mut self, f: F) -> Result<T, StreamError<&'a [Self::Item], E>>
where F: FnOnce(InputBuf<'a, Self::Item>) -> ParseResult<InputBuf<'a, Self::Item>, T, E>,
T: 'a,
E: 'a {
use primitives::Primitives;
if self.state.contains(INCOMPLETE | AUTOMATIC_FILL) {
try!(self.fill().map_err(StreamError::IoError));
}
if self.is_empty() {
return Err(StreamError::EndOfInput);
}
match f(InputBuf::new(&self.buffer)).into_inner() {
(remainder, Ok(data)) => {
if remainder.is_incomplete() && self.state.contains(END_OF_INPUT) {
// We can't accept this since we might have hit a premature end
self.state.insert(INCOMPLETE);
Err(StreamError::Retry)
} else {
// TODO: Do something neater with the remainder
self.buffer.consume(self.buffer.len() - remainder.len());
Ok(data)
}
},
(mut remainder, Err(err)) => {
if remainder.is_incomplete() {
if self.state.contains(END_OF_INPUT) {
Err(StreamError::Incomplete)
} else {
self.state.insert(INCOMPLETE);
Err(StreamError::Retry)
}
} else {
// TODO: Do something neater with the remainder
// TODO: Detail this behaviour, maybe make it configurable
self.buffer.consume(self.buffer.len() - remainder.len());
Err(StreamError::ParseError(remainder.consume_remaining(), err))
}
},
}
}
}
#[cfg(test)]
mod test {
use std::io;
use types::Input;
use parsers::{
Error,
any,
take,
};
use buffer::{
FixedSizeBuffer,
StreamError,
Stream,
};
use buffer::data_source::ReadDataSource;
use super::*;
fn buf(source: &[u8], buffer_length: usize) -> Source<ReadDataSource<io::Cursor<&[u8]>>, FixedSizeBuffer<u8>> {
Source::with_buffer(ReadDataSource::new(io::Cursor::new(source)), FixedSizeBuffer::with_size(buffer_length))
}
#[test]
#[should_panic]
fn bufsize_zero() {
let _ = buf(&b"this is a test"[..], 0);
}
#[test]
fn default_bufsize() {
let b: Source<_, FixedSizeBuffer<_>> = Source::new(io::Cursor::new(&b"test"[..]));
assert!(b.capacity() > 0);
}
#[test]
fn empty_buf() {
let mut n = 0;
let mut b = Source::new(io::Cursor::new(&b""[..]));
let r = b.parse(|i| {
n += 1;
take(i, 1).bind(|i, _| i.ret::<_, Error<_>>(true))
});
assert_eq!(r, Err(StreamError::EndOfInput));
assert_eq!(n, 0);
}
#[test]
fn fill() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'e'));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b's'));
assert_eq!(n, 5);
assert_eq!(m, 3);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 6);
assert_eq!(m, 3);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 7);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 8);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 8);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 8);
assert_eq!(m, 4);
}
#[test]
fn fill2() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'e'));
assert_eq!(n, 2);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b's'));
assert_eq!(n, 4);
assert_eq!(m, 3);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 5);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 6);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 6);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 6);
assert_eq!(m, 4);
}
#[test]
fn fill3() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 3);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"te"[..]));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"st"[..]));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 4);
assert_eq!(m, 2);
}
#[test]
fn incomplete() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"tes"[..], 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"te"[..]));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 3);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Incomplete));
assert_eq!(n, 4);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Incomplete));
assert_eq!(n, 5);
assert_eq!(m, 1);
}
#[test]
fn no_autofill() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 2);
b.set_autofill(false);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 1);
assert_eq!(m, 0);
assert_eq!(b.fill().unwrap(), 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"te"[..]));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 3);
assert_eq!(m, 1);
assert_eq!(b.fill().unwrap(), 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"st"[..]));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 5);
assert_eq!(m, 2);
assert_eq!(b.fill().unwrap(), 0);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 5);
assert_eq!(m, 2);
}
#[test]
fn no_autofill_first() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"ab"[..], 1);
b.set_autofill(false);
assert_eq!(b.fill().unwrap(), 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'a'));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.fill().unwrap(), 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'b'));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.fill().unwrap(), 0);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 4);
assert_eq!(m, 2);
}
}
Stateful: Fixed issue where incomplete was not handled correctly on success
use std::io;
use types::{InputBuf, ParseResult};
use primitives::IntoInner;
use buffer::{
Buffer,
DataSource,
FixedSizeBuffer,
StreamError,
Stream,
};
use buffer::data_source::{IteratorDataSource, ReadDataSource};
bitflags!{
flags ParserState: u64 {
/// The parser which was last run on the buffer did not manage to complete with the data
/// available in the buffer.
const INCOMPLETE = 1,
/// The buffer did not manage to read any more data from the underlying `Read`
/// implementation.
const END_OF_INPUT = 2,
/// `parse()` should attempt to read more data whenever the `INCOMPLETE` flag is set.
const AUTOMATIC_FILL = 4,
}
}
/// Manages a buffer and data source pair, enabling efficient parsing from a streaming source.
#[derive(Debug)]
pub struct Source<S: DataSource, B: Buffer<S::Item>> {
/// Source reader
source: S,
/// Temporary source
buffer: B,
/// The requested amount of bytes to be available for reading from the buffer
request: usize,
/// Input state, if end has been reached
state: ParserState,
}
impl<R: io::Read> Source<ReadDataSource<R>, FixedSizeBuffer<u8>> {
/// Creates a new `Source` from a `Read` instance with the default `FixedSizeBuffer` settings.
#[inline]
pub fn new(source: R) -> Self {
Self::with_buffer(ReadDataSource::new(source), FixedSizeBuffer::new())
}
}
impl<R: io::Read, B: Buffer<u8>> Source<ReadDataSource<R>, B> {
/// Creates a new `Source` from `Read` and buffer instances.
#[inline]
pub fn from_read(source: R, buffer: B) -> Self {
Self::with_buffer(ReadDataSource::new(source), buffer)
}
}
impl<I: Iterator, B: Buffer<I::Item>> Source<IteratorDataSource<I>, B>
where I::Item: Copy + PartialEq {
/// Creates a new `Source` from `Iterator` and `Buffer` instances.
#[inline]
pub fn from_iter(source: I, buffer: B) -> Self {
Self::with_buffer(IteratorDataSource::new(source), buffer)
}
}
impl<S: DataSource, B: Buffer<S::Item>> Source<S, B> {
/// Creates a new `Source` from `DataSource` and `Buffer` instances.
#[inline]
pub fn with_buffer(source: S, buffer: B) -> Self {
Source {
source: source,
buffer: buffer,
request: 0,
state: INCOMPLETE | AUTOMATIC_FILL,
}
}
/// Attempts to fill this source so it contains at least ``request`` bytes.
#[inline]
fn fill_requested(&mut self, request: usize) -> io::Result<usize> {
let mut read = 0;
let mut buffer = &mut self.buffer;
let source = &mut self.source;
if buffer.len() < request {
let diff = request - buffer.len();
buffer.request_space(diff);
while buffer.len() < request {
match try!(buffer.fill(source)) {
0 => break,
n => read = read + n,
}
}
}
Ok(read)
}
/// Attempts to fill the buffer to satisfy the last call to `parse()`.
#[inline]
pub fn fill(&mut self) -> io::Result<usize> {
let req = self.buffer.len() + 1;
self.fill_requested(req).map(|n| {
self.state.remove(INCOMPLETE);
if n > 0 {
self.state.remove(END_OF_INPUT);
} else {
self.state.insert(END_OF_INPUT);
}
n
})
}
/// Returns the number of bytes left in the buffer which have not yet been parsed.
#[inline]
pub fn len(&self) -> usize {
self.buffer.len()
}
/// If the buffer is empty and the reader has reached the end.
#[inline]
pub fn is_empty(&self) -> bool {
self.state.contains(END_OF_INPUT) && self.len() == 0
}
/// Returns the capacity of the underlying buffer.
///
/// This is the maximum number of input items the buffer can store.
#[inline]
pub fn capacity(&self) -> usize {
self.buffer.capacity()
}
/// Borrows the remainder of the buffer.
#[inline]
pub fn buffer(&self) -> &[S::Item] {
&self.buffer
}
/// Resets the buffer state, keeping the current buffer contents and cursor position.
///
/// This is useful when streaming data and more data has been made available on a
/// socket/stream.
#[inline]
pub fn reset(&mut self) {
self.state = ParserState::empty();
}
/// Changes the setting automatic fill feature, `true` will make the buffer automatically
/// call `fill()` on the next call to `parse()` after a `Retry` was encountered.
// TODO: Make a part of the constructor/builder
#[inline]
pub fn set_autofill(&mut self, value: bool) {
if value {
self.state.insert(AUTOMATIC_FILL)
} else {
self.state.remove(AUTOMATIC_FILL)
}
}
}
impl<S: DataSource<Item=u8>, B: Buffer<u8>> io::Read for Source<S, B> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.len() > self.len() {
try!(self.fill_requested(buf.len()));
}
(&self.buffer[..]).read(buf).map(|n| {
self.buffer.consume(n);
n
})
}
}
impl<S: DataSource<Item=u8>, B: Buffer<u8>> io::BufRead for Source<S, B> {
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let cap = self.buffer.capacity();
try!(self.fill_requested(cap));
Ok(self.buffer())
}
#[inline]
fn consume(&mut self, num: usize) {
self.buffer.consume(num)
}
}
impl<'a, S: DataSource, B: Buffer<S::Item>> Stream<'a, 'a> for Source<S, B>
where S::Item: 'a {
type Item = S::Item;
#[inline]
fn parse<F, T, E>(&'a mut self, f: F) -> Result<T, StreamError<&'a [Self::Item], E>>
where F: FnOnce(InputBuf<'a, Self::Item>) -> ParseResult<InputBuf<'a, Self::Item>, T, E>,
T: 'a,
E: 'a {
use primitives::Primitives;
if self.state.contains(INCOMPLETE | AUTOMATIC_FILL) {
try!(self.fill().map_err(StreamError::IoError));
}
if self.is_empty() {
return Err(StreamError::EndOfInput);
}
match f(InputBuf::new(&self.buffer)).into_inner() {
(remainder, Ok(data)) => {
if remainder.is_incomplete() && !self.state.contains(END_OF_INPUT) {
// We can't accept this since we might have hit a premature end
self.state.insert(INCOMPLETE);
Err(StreamError::Retry)
} else {
// TODO: Do something neater with the remainder
self.buffer.consume(self.buffer.len() - remainder.len());
Ok(data)
}
},
(mut remainder, Err(err)) => {
match (remainder.is_incomplete(), self.state.contains(END_OF_INPUT)) {
(true, true) => Err(StreamError::Incomplete),
(true, false) => {
self.state.insert(INCOMPLETE);
Err(StreamError::Retry)
},
_ => {
// TODO: Do something neater with the remainder
// TODO: Detail this behaviour, maybe make it configurable
self.buffer.consume(self.buffer.len() - remainder.len());
Err(StreamError::ParseError(remainder.consume_remaining(), err))
}
}
},
}
}
}
#[cfg(test)]
mod test {
use std::io;
use types::Input;
use parsers::{
Error,
any,
take,
take_while,
};
use buffer::{
FixedSizeBuffer,
StreamError,
Stream,
};
use buffer::data_source::ReadDataSource;
use super::*;
fn buf(source: &[u8], buffer_length: usize) -> Source<ReadDataSource<io::Cursor<&[u8]>>, FixedSizeBuffer<u8>> {
Source::with_buffer(ReadDataSource::new(io::Cursor::new(source)), FixedSizeBuffer::with_size(buffer_length))
}
#[test]
#[should_panic]
fn bufsize_zero() {
let _ = buf(&b"this is a test"[..], 0);
}
#[test]
fn default_bufsize() {
let b: Source<_, FixedSizeBuffer<_>> = Source::new(io::Cursor::new(&b"test"[..]));
assert!(b.capacity() > 0);
}
#[test]
fn empty_buf() {
let mut n = 0;
let mut b = Source::new(io::Cursor::new(&b""[..]));
let r = b.parse(|i| {
n += 1;
take(i, 1).bind(|i, _| i.ret::<_, Error<_>>(true))
});
assert_eq!(r, Err(StreamError::EndOfInput));
assert_eq!(n, 0);
}
#[test]
fn fill() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'e'));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b's'));
assert_eq!(n, 5);
assert_eq!(m, 3);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 6);
assert_eq!(m, 3);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 7);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 8);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 8);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 8);
assert_eq!(m, 4);
}
#[test]
fn fill2() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'e'));
assert_eq!(n, 2);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b's'));
assert_eq!(n, 4);
assert_eq!(m, 3);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b't'));
assert_eq!(n, 5);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 6);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 6);
assert_eq!(m, 4);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 6);
assert_eq!(m, 4);
}
#[test]
fn fill3() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 3);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"te"[..]));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"st"[..]));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 4);
assert_eq!(m, 2);
}
#[test]
fn incomplete() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"tes"[..], 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"te"[..]));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 3);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Incomplete));
assert_eq!(n, 4);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Incomplete));
assert_eq!(n, 5);
assert_eq!(m, 1);
}
#[test]
fn incomplete2() {
let mut o = 0;
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"tes"[..], 2);
assert_eq!(b.parse(|i| { n += 1; take_while(i, |_| { o += 1; o < 2 }).inspect(|_| m += 1) }), Ok(&b"t"[..]));
assert_eq!(n, 1);
assert_eq!(m, 1);
o = 0;
assert_eq!(b.parse(|i| { n += 1; take_while(i, |_| { o += 1; o < 2 }).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 2);
o = 0;
assert_eq!(b.parse(|i| { n += 1; take_while(i, |_| { o += 1; o < 2 }).inspect(|_| m += 1) }), Ok(&b"e"[..]));
assert_eq!(n, 3);
assert_eq!(m, 3);
o = 0;
assert_eq!(b.parse(|i| { n += 1; take_while(i, |_| { o += 1; o < 2 }).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 4);
o = 0;
assert_eq!(b.parse(|i| { n += 1; take_while(i, |_| { o += 1; o < 2 }).inspect(|_| m += 1) }), Ok(&b"s"[..]));
assert_eq!(n, 5);
assert_eq!(m, 5);
o = 0;
assert_eq!(b.parse(|i| { n += 1; take_while(i, |_| { o += 1; o < 2 }).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 5);
assert_eq!(m, 5);
}
#[test]
fn no_autofill() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"test"[..], 2);
b.set_autofill(false);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 1);
assert_eq!(m, 0);
assert_eq!(b.fill().unwrap(), 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"te"[..]));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 3);
assert_eq!(m, 1);
assert_eq!(b.fill().unwrap(), 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Ok(&b"st"[..]));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 5);
assert_eq!(m, 2);
assert_eq!(b.fill().unwrap(), 0);
assert_eq!(b.parse(|i| { n += 1; take(i, 2).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 5);
assert_eq!(m, 2);
}
#[test]
fn no_autofill_first() {
let mut n = 0; // Times it has entered the parsing function
let mut m = 0; // Times it has managed to get past the request for data
let mut b = buf(&b"ab"[..], 1);
b.set_autofill(false);
assert_eq!(b.fill().unwrap(), 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'a'));
assert_eq!(n, 1);
assert_eq!(m, 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 2);
assert_eq!(m, 1);
assert_eq!(b.fill().unwrap(), 1);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Ok(b'b'));
assert_eq!(n, 3);
assert_eq!(m, 2);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::Retry));
assert_eq!(n, 4);
assert_eq!(m, 2);
assert_eq!(b.fill().unwrap(), 0);
assert_eq!(b.parse(|i| { n += 1; any(i).inspect(|_| m += 1) }), Err(StreamError::EndOfInput));
assert_eq!(n, 4);
assert_eq!(m, 2);
}
}
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) 2015-2016 Lumol's contributors — BSD license
//! This module allow to convert from and to the internal unit system.
//!
//! Internal units are:
//!
//! - Angstrom (A) for distances;
//! - femtosecond (fs) for time;
//! - Unified atomic mass unit (u or Da) for mass;
//! - Kelvin (K) for temperature;
//! - Number of particles for quantity of matter;
//! - radian (rad) for angles;
//!
//! Other units are derived from these primitives units. For examples, the
//! internal unit for energy is 1e-4 kJ/mol.
use std::collections::BTreeMap;
use std::f64::consts::PI;
use std::num;
use std::fmt;
use std::error::Error;
use consts::{BOHR_RADIUS, NA};
// Atomic mass unit in kg
const U_IN_KG : f64 = 1.660538782e-27;
/// Get the conversion factors from a string unit to the internal units.
lazy_static!(
static ref FACTORS: BTreeMap<&'static str, f64> = {
let mut map = BTreeMap::new();
// Distances units.
assert!(map.insert("A", 1.0).is_none());
assert!(map.insert("Å", 1.0).is_none());
assert!(map.insert("nm", 10.0).is_none());
assert!(map.insert("pm", 1e-2).is_none());
assert!(map.insert("fm", 1e-5).is_none());
assert!(map.insert("m", 1e10).is_none());
assert!(map.insert("bohr", BOHR_RADIUS).is_none());
// Time units.
assert!(map.insert("fs", 1.0).is_none());
assert!(map.insert("ps", 1e3).is_none());
assert!(map.insert("ns", 1e6).is_none());
// Mass units.
assert!(map.insert("u", 1.0).is_none());
assert!(map.insert("Da", 1.0).is_none());
assert!(map.insert("kDa", 1.0).is_none());
assert!(map.insert("g", 1e-3 / U_IN_KG).is_none());
assert!(map.insert("kg", 1.0 / U_IN_KG).is_none());
// Temperature units.
assert!(map.insert("K", 1.0).is_none());
// Quantity of matter units.
assert!(map.insert("mol", NA).is_none());
// Angle units.
assert!(map.insert("rad", 1.0).is_none());
assert!(map.insert("deg", PI / 180.0).is_none());
// Energy units.
assert!(map.insert("J", 1e-10 / U_IN_KG).is_none());
assert!(map.insert("kJ", 1e-7 / U_IN_KG).is_none());
assert!(map.insert("kcal", 4.184 * 1e-7 / U_IN_KG).is_none());
assert!(map.insert("eV", 1.60217653e-19 * 1e-10 / U_IN_KG).is_none());
assert!(map.insert("H", 4.35974417e-18 * 1e-10 / U_IN_KG).is_none());
assert!(map.insert("Ry", 4.35974417e-18 / 2.0 * 1e-10 / U_IN_KG).is_none());
// Force unit.
assert!(map.insert("N", 1e-20 / U_IN_KG).is_none());
// Pressure units.
assert!(map.insert("Pa", 1e-40 / U_IN_KG).is_none());
assert!(map.insert("kPa", 1e-37 / U_IN_KG).is_none());
assert!(map.insert("MPa", 1e-34 / U_IN_KG).is_none());
assert!(map.insert("bar", 1e-35 / U_IN_KG).is_none());
assert!(map.insert("atm", 101325.0 * 1e-40 / U_IN_KG).is_none());
return map;
};
);
/// Possible error causes when parsing an unit string.
#[derive(Debug)]
pub enum ParseError {
/// Error while parsing a power in `x^y` expressions
Power(num::ParseIntError),
/// Error while parsing the value part of an unit string
Value(num::ParseFloatError),
/// Parentheses are not balanced in this unit
ParenthesesMismatch,
/// This unit was not found
NotFound {
/// The unit that created this error
unit: String
},
/// Any other error
MalformedExpr(String),
}
impl From<num::ParseIntError> for ParseError {
fn from(err: num::ParseIntError) -> ParseError {
ParseError::Power(err)
}
}
impl From<num::ParseFloatError> for ParseError {
fn from(err: num::ParseFloatError) -> ParseError {
ParseError::Value(err)
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::Power(ref err) => err.fmt(f),
ParseError::Value(ref err) => err.fmt(f),
ParseError::ParenthesesMismatch => write!(f, "Parentheses are not equilibrated."),
ParseError::NotFound{ref unit} => write!(f, "Unit '{}' not found.", unit),
ParseError::MalformedExpr(ref err) => write!(f, "Malformed expression: {}", err),
}
}
}
impl Error for ParseError {
fn description(&self) -> &str {
match *self {
ParseError::Power(ref err) => err.description(),
ParseError::Value(ref err) => err.description(),
ParseError::ParenthesesMismatch => "Parentheses are not equilibrated.",
ParseError::NotFound{..} => "Unit not found.",
ParseError::MalformedExpr(..) => "Malformed expression",
}
}
}
/// Possible tokens in unit strings
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum Token {
/// Left parentheses
LParen,
/// Right parentheses
RParen,
/// '*' token
Mul,
/// '/' token
Div,
/// '^' token
Pow,
/// Any other whitespaces separated value
Value(String)
}
impl Token {
/// What is the precedence of a specific token
fn precedence(&self) -> usize {
match *self {
Token::LParen | Token::RParen => 0,
Token::Div | Token::Mul => 10,
Token::Pow => 20,
Token::Value(..) => internal_error!(
"invalid call to UnitTok::precedence for values"
)
}
}
/// Get the string used to build this token in tokenize
fn as_str(&self) -> &str {
match *self {
Token::LParen => "(",
Token::RParen => ")",
Token::Div => "/",
Token::Mul => "*",
Token::Pow => "^",
Token::Value(ref value) => value
}
}
}
/// Transform a string to a stream of tokens
fn tokenize(unit: &str) -> Vec<Token> {
let mut tokens = Vec::new();
let mut token = String::new();
for c in unit.chars() {
match c {
'*' | '/' | '^' | '(' | ')' => {
if !token.is_empty() {
tokens.push(Token::Value(token.clone()));
token.clear();
}
match c {
'*' => tokens.push(Token::Mul),
'/' => tokens.push(Token::Div),
'^' => tokens.push(Token::Pow),
'(' => tokens.push(Token::LParen),
')' => tokens.push(Token::RParen),
_ => internal_error!("invalid unit operator"),
}
},
other if !other.is_whitespace() => {
token.push(other);
},
_ => {assert!(c.is_whitespace())}
}
}
// Last token
if !token.is_empty() {
tokens.push(Token::Value(token));
}
return tokens;
}
static MISSING_OPERATOR: &'static str = "Oops, sorry explorator, but you felt \
in a space-time hole. We are missing an operator here";
/// Create the AST for unit expression using the Shunting-Yard algorithm.
///
/// See /// https://en.wikipedia.org/wiki/Shunting-yard_algorithm for a
/// description of the algorithm.
#[allow(trivial_casts)]
fn shunting_yard(tokens: Vec<Token>) -> Result<Vec<Token>, ParseError> {
let mut operators = Vec::new();
let mut output = Vec::new();
for token in tokens {
match token {
Token::Value(..) => output.push(token),
Token::Mul | Token::Div | Token::Pow => {
while !operators.is_empty() {
// The cast is useless here, but rustc can't figure out
// the type of the expression after the call to `expect`
let top_operator = (operators.last().expect(MISSING_OPERATOR) as &Token).clone();
// All the operators are left-associative
if token.precedence() <= top_operator.precedence() {
output.push(operators.pop().expect(MISSING_OPERATOR));
} else {
break;
}
}
operators.push(token);
},
Token::LParen => operators.push(token),
Token::RParen => {
while !operators.is_empty() && operators.last() != Some(&Token::LParen) {
output.push(operators.pop().expect(MISSING_OPERATOR))
}
if operators.is_empty() || operators.last() != Some(&Token::LParen) {
return Err(ParseError::ParenthesesMismatch)
} else {
let _ = operators.pop();
}
}
}
}
while !operators.is_empty() {
match *operators.last().expect(MISSING_OPERATOR) {
Token::LParen | Token::RParen => return Err(ParseError::ParenthesesMismatch),
_ => output.push(operators.pop().expect(MISSING_OPERATOR))
}
}
return Ok(output);
}
/// Possible members in unit expressions
#[derive(Debug, PartialEq)]
enum UnitExpr {
/// A single value
Val(f64),
/// Multiplication of left-hand side by right-hand side
Mul(Box<UnitExpr>, Box<UnitExpr>),
/// Division of left-hand side by right-hand side
Div(Box<UnitExpr>, Box<UnitExpr>),
/// Take the power of the expr by the `i32` value
Pow(Box<UnitExpr>, i32),
}
impl UnitExpr {
/// Recursively evaluate an unit expression
fn eval(&self) -> f64 {
match *self {
UnitExpr::Val(v) => v,
UnitExpr::Mul(ref lhs, ref rhs) => lhs.eval() * rhs.eval(),
UnitExpr::Div(ref lhs, ref rhs) => lhs.eval() / rhs.eval(),
UnitExpr::Pow(ref expr, pow) => expr.eval().powi(pow),
}
}
/// Parse a string, and generate the corresponding unit expression
fn parse(unit: &str) -> Result<UnitExpr, ParseError> {
let tokens = tokenize(unit);
let mut stream = try!(shunting_yard(tokens));
let ast = try!(read_expr(&mut stream));
if stream.is_empty() {
Ok(ast)
} else {
let remaining = stream.iter().map(|t| t.as_str()).collect::<Vec<_>>().join(" ");
return Err(ParseError::MalformedExpr(
format!("remaining values after the end of the unit: {}", remaining)
))
}
}
}
/// Read and pop (recursively) a single expression from the `stream`.
/// The `stream` must be in reverse polish notation.
fn read_expr(stream: &mut Vec<Token>) -> Result<UnitExpr, ParseError> {
if let Some(token) = stream.pop() {
match token {
Token::Value(unit) => {
match FACTORS.get(&*unit) {
Some(&value) => Ok(UnitExpr::Val(value)),
None => Err(ParseError::NotFound{unit: unit})
}
},
Token::Mul => {
let rhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the right of '*': {}", err)
)));
let lhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the left of '*': {}", err)
)));
Ok(UnitExpr::Mul(Box::new(lhs), Box::new(rhs)))
},
Token::Div => {
let rhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the right of '/': {}", err)
)));
let lhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the left of '/': {}", err)
)));
Ok(UnitExpr::Div(Box::new(lhs), Box::new(rhs)))
},
Token::Pow => {
let pow = match stream.pop() {
Some(pow) => match pow {
Token::Value(value) => try!(value.parse()),
_ => return Err(ParseError::MalformedExpr(String::from(
format!("Invalid value after ^: {}", pow.as_str())
)))
},
None => return Err(ParseError::MalformedExpr(String::from(
"Missing value after '^'"
)))
};
let expr = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the left of '*': {}", err)
)));
Ok(UnitExpr::Pow(Box::new(expr), pow))
}
Token::LParen | Token::RParen => internal_error!(
"there should not be any parenthese here"
)
}
} else {
Err(ParseError::MalformedExpr(String::from("missing a value")))
}
}
/// Convert the numeric value `val` from the unit `unit` to the internal unit.
///
/// ```
/// use lumol::units;
/// let internal = units::from(10.0, "A").unwrap();
/// assert!(internal == 10.0);
/// ```
pub fn from(value: f64, unit: &str) -> Result<f64, ParseError> {
let unit = try!(UnitExpr::parse(unit));
return Ok(unit.eval() * value);
}
/// Parse the string `val` and convert it to the corresponding internal unit
///
/// ```
/// use lumol::units;
/// let internal = units::from_str("10 A").unwrap();
/// assert!(internal == 10.0);
/// ```
pub fn from_str(value: &str) -> Result<f64, ParseError> {
let unit = value.split_whitespace().skip(1).collect::<Vec<&str>>().join(" ");
let unit = if unit.is_empty() {
UnitExpr::Val(1.0)
} else {
try!(UnitExpr::parse(&unit))
};
let value: &str = value.split_whitespace().take(1).collect::<Vec<&str>>()[0];
let value: f64 = try!(value.parse());
return Ok(unit.eval() * value);
}
/// Convert the numeric value `val` (in internal units) to the unit `unit`.
///
/// ```
/// use lumol::units;
/// let real = units::to(10.0, "A").unwrap();
/// assert!(real == 10.0);
/// ```
pub fn to(value: f64, unit: &str) -> Result<f64, ParseError> {
let unit = try!(UnitExpr::parse(unit));
return Ok(value / unit.eval());
}
#[cfg(test)]
mod test {
use super::*;
use super::{tokenize, shunting_yard};
use super::{Token, UnitExpr};
#[test]
fn tokens() {
assert_eq!(tokenize("(")[0], Token::LParen);
assert_eq!(tokenize(")")[0], Token::RParen);
assert_eq!(tokenize("*")[0], Token::Mul);
assert_eq!(tokenize("/")[0], Token::Div);
assert_eq!(tokenize("^")[0], Token::Pow);
assert_eq!(tokenize("foo")[0], Token::Value(String::from("foo")));
assert_eq!(tokenize("45")[0], Token::Value(String::from("45")));
assert_eq!(tokenize("(bar/m").len(), 4);
assert_eq!(tokenize(" ( bar\t/\n m").len(), 4);
}
fn ast_str(unit: &str) -> Result<String, ParseError> {
let tokens = tokenize(unit);
let ast = try!(shunting_yard(tokens));
return Ok(ast.iter().map(|t| t.as_str()).collect::<Vec<_>>().join(" "));
}
#[test]
fn ast() {
assert_eq!(ast_str("").unwrap(), "");
assert_eq!(ast_str("()").unwrap(), "");
assert_eq!(ast_str("foo").unwrap(), "foo");
assert_eq!(ast_str("foo*bar").unwrap(), "foo bar *");
assert_eq!(ast_str("foo / bar").unwrap(), "foo bar /");
assert_eq!(ast_str("foo^4").unwrap(), "foo 4 ^");
assert_eq!(ast_str("bar/foo ^ 4").unwrap(), "bar foo 4 ^ /");
assert_eq!(ast_str("k*bar /foo^ 4").unwrap(), "k bar * foo 4 ^ /");
}
#[test]
fn ast_errors() {
assert!(ast_str("(").is_err());
assert!(ast_str(")").is_err());
assert!(ast_str("(bar/m").is_err());
assert!(ast_str("m/K)").is_err());
}
#[test]
fn eval() {
assert_eq!(UnitExpr::parse("Å").unwrap(), UnitExpr::Val(1.0));
assert_eq!(UnitExpr::parse("nm").unwrap(), UnitExpr::Val(10.0));
assert_eq!(UnitExpr::parse("bohr/fs").unwrap().eval(), 0.52917720859);
assert_approx_eq!(UnitExpr::parse("kcal/mol/A^2").unwrap().eval(), 4.184e-4, 1e-12);
assert_eq!(UnitExpr::parse("(Ry / rad^-3 )").unwrap().eval(), 0.13127498789124938);
assert_eq!(UnitExpr::parse("bar/(m * fs^2)").unwrap().eval(), 6.0221417942167636e-19);
assert_eq!(UnitExpr::parse("kJ/mol/deg^2").unwrap().eval(), 0.3282806352310398);
}
#[test]
fn parsing_errrors() {
assert!(UnitExpr::parse("m^4-8").is_err());
assert!(UnitExpr::parse("foo ^ bar").is_err());
assert!(UnitExpr::parse("m^z4").is_err());
assert!(UnitExpr::parse("HJK").is_err());
}
#[test]
fn unit_from_str() {
assert_eq!(from_str("10.0 A").unwrap(), 10.0);
assert_eq!(from_str("10 A").unwrap(), 10.0);
assert_eq!(from_str("1e1 A").unwrap(), 10.0);
assert_eq!(from_str("10").unwrap(), 10.0);
assert!(from_str("10a.0 bar").is_err());
assert!(from_str("h10").is_err());
}
#[test]
fn unit_to() {
assert_eq!(to(25.0, "m").unwrap(), 2.5e-9);
assert_eq!(to(25.0, "bar").unwrap(), 4.1513469550000005e9);
assert_eq!(to(25.0, "kJ/mol").unwrap(), 249999.99982494753);
}
}
Fix "temporary used to live longer warning"
// Lumol, an extensible molecular simulation engine
// Copyright (C) 2015-2016 Lumol's contributors — BSD license
//! This module allow to convert from and to the internal unit system.
//!
//! Internal units are:
//!
//! - Angstrom (A) for distances;
//! - femtosecond (fs) for time;
//! - Unified atomic mass unit (u or Da) for mass;
//! - Kelvin (K) for temperature;
//! - Number of particles for quantity of matter;
//! - radian (rad) for angles;
//!
//! Other units are derived from these primitives units. For examples, the
//! internal unit for energy is 1e-4 kJ/mol.
use std::collections::BTreeMap;
use std::f64::consts::PI;
use std::num;
use std::fmt;
use std::error::Error;
use consts::{BOHR_RADIUS, NA};
// Atomic mass unit in kg
const U_IN_KG : f64 = 1.660538782e-27;
/// Get the conversion factors from a string unit to the internal units.
lazy_static!(
static ref FACTORS: BTreeMap<&'static str, f64> = {
let mut map = BTreeMap::new();
// Distances units.
assert!(map.insert("A", 1.0).is_none());
assert!(map.insert("Å", 1.0).is_none());
assert!(map.insert("nm", 10.0).is_none());
assert!(map.insert("pm", 1e-2).is_none());
assert!(map.insert("fm", 1e-5).is_none());
assert!(map.insert("m", 1e10).is_none());
assert!(map.insert("bohr", BOHR_RADIUS).is_none());
// Time units.
assert!(map.insert("fs", 1.0).is_none());
assert!(map.insert("ps", 1e3).is_none());
assert!(map.insert("ns", 1e6).is_none());
// Mass units.
assert!(map.insert("u", 1.0).is_none());
assert!(map.insert("Da", 1.0).is_none());
assert!(map.insert("kDa", 1.0).is_none());
assert!(map.insert("g", 1e-3 / U_IN_KG).is_none());
assert!(map.insert("kg", 1.0 / U_IN_KG).is_none());
// Temperature units.
assert!(map.insert("K", 1.0).is_none());
// Quantity of matter units.
assert!(map.insert("mol", NA).is_none());
// Angle units.
assert!(map.insert("rad", 1.0).is_none());
assert!(map.insert("deg", PI / 180.0).is_none());
// Energy units.
assert!(map.insert("J", 1e-10 / U_IN_KG).is_none());
assert!(map.insert("kJ", 1e-7 / U_IN_KG).is_none());
assert!(map.insert("kcal", 4.184 * 1e-7 / U_IN_KG).is_none());
assert!(map.insert("eV", 1.60217653e-19 * 1e-10 / U_IN_KG).is_none());
assert!(map.insert("H", 4.35974417e-18 * 1e-10 / U_IN_KG).is_none());
assert!(map.insert("Ry", 4.35974417e-18 / 2.0 * 1e-10 / U_IN_KG).is_none());
// Force unit.
assert!(map.insert("N", 1e-20 / U_IN_KG).is_none());
// Pressure units.
assert!(map.insert("Pa", 1e-40 / U_IN_KG).is_none());
assert!(map.insert("kPa", 1e-37 / U_IN_KG).is_none());
assert!(map.insert("MPa", 1e-34 / U_IN_KG).is_none());
assert!(map.insert("bar", 1e-35 / U_IN_KG).is_none());
assert!(map.insert("atm", 101325.0 * 1e-40 / U_IN_KG).is_none());
return map;
};
);
/// Possible error causes when parsing an unit string.
#[derive(Debug)]
pub enum ParseError {
/// Error while parsing a power in `x^y` expressions
Power(num::ParseIntError),
/// Error while parsing the value part of an unit string
Value(num::ParseFloatError),
/// Parentheses are not balanced in this unit
ParenthesesMismatch,
/// This unit was not found
NotFound {
/// The unit that created this error
unit: String
},
/// Any other error
MalformedExpr(String),
}
impl From<num::ParseIntError> for ParseError {
fn from(err: num::ParseIntError) -> ParseError {
ParseError::Power(err)
}
}
impl From<num::ParseFloatError> for ParseError {
fn from(err: num::ParseFloatError) -> ParseError {
ParseError::Value(err)
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::Power(ref err) => err.fmt(f),
ParseError::Value(ref err) => err.fmt(f),
ParseError::ParenthesesMismatch => write!(f, "Parentheses are not equilibrated."),
ParseError::NotFound{ref unit} => write!(f, "Unit '{}' not found.", unit),
ParseError::MalformedExpr(ref err) => write!(f, "Malformed expression: {}", err),
}
}
}
impl Error for ParseError {
fn description(&self) -> &str {
match *self {
ParseError::Power(ref err) => err.description(),
ParseError::Value(ref err) => err.description(),
ParseError::ParenthesesMismatch => "Parentheses are not equilibrated.",
ParseError::NotFound{..} => "Unit not found.",
ParseError::MalformedExpr(..) => "Malformed expression",
}
}
}
/// Possible tokens in unit strings
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum Token {
/// Left parentheses
LParen,
/// Right parentheses
RParen,
/// '*' token
Mul,
/// '/' token
Div,
/// '^' token
Pow,
/// Any other whitespaces separated value
Value(String)
}
impl Token {
/// What is the precedence of a specific token
fn precedence(&self) -> usize {
match *self {
Token::LParen | Token::RParen => 0,
Token::Div | Token::Mul => 10,
Token::Pow => 20,
Token::Value(..) => internal_error!(
"invalid call to UnitTok::precedence for values"
)
}
}
/// Get the string used to build this token in tokenize
fn as_str(&self) -> &str {
match *self {
Token::LParen => "(",
Token::RParen => ")",
Token::Div => "/",
Token::Mul => "*",
Token::Pow => "^",
Token::Value(ref value) => value
}
}
}
/// Transform a string to a stream of tokens
fn tokenize(unit: &str) -> Vec<Token> {
let mut tokens = Vec::new();
let mut token = String::new();
for c in unit.chars() {
match c {
'*' | '/' | '^' | '(' | ')' => {
if !token.is_empty() {
tokens.push(Token::Value(token.clone()));
token.clear();
}
match c {
'*' => tokens.push(Token::Mul),
'/' => tokens.push(Token::Div),
'^' => tokens.push(Token::Pow),
'(' => tokens.push(Token::LParen),
')' => tokens.push(Token::RParen),
_ => internal_error!("invalid unit operator"),
}
},
other if !other.is_whitespace() => {
token.push(other);
},
_ => {assert!(c.is_whitespace())}
}
}
// Last token
if !token.is_empty() {
tokens.push(Token::Value(token));
}
return tokens;
}
static MISSING_OPERATOR: &'static str = "Oops, sorry explorator, but you felt \
in a space-time hole. We are missing an operator here";
/// Create the AST for unit expression using the Shunting-Yard algorithm.
///
/// See /// https://en.wikipedia.org/wiki/Shunting-yard_algorithm for a
/// description of the algorithm.
#[allow(trivial_casts)]
fn shunting_yard(tokens: Vec<Token>) -> Result<Vec<Token>, ParseError> {
let mut operators = Vec::new();
let mut output = Vec::new();
for token in tokens {
match token {
Token::Value(..) => output.push(token),
Token::Mul | Token::Div | Token::Pow => {
while !operators.is_empty() {
// The cast is useless here, but rustc can't figure out
// the type of the expression after the call to `expect`
let top_operator = (operators.last().expect(MISSING_OPERATOR) as &Token).clone();
// All the operators are left-associative
if token.precedence() <= top_operator.precedence() {
output.push(operators.pop().expect(MISSING_OPERATOR));
} else {
break;
}
}
operators.push(token);
},
Token::LParen => operators.push(token),
Token::RParen => {
while !operators.is_empty() && operators.last() != Some(&Token::LParen) {
output.push(operators.pop().expect(MISSING_OPERATOR))
}
if operators.is_empty() || operators.last() != Some(&Token::LParen) {
return Err(ParseError::ParenthesesMismatch)
} else {
let _ = operators.pop();
}
}
}
}
while !operators.is_empty() {
match *operators.last().expect(MISSING_OPERATOR) {
Token::LParen | Token::RParen => return Err(ParseError::ParenthesesMismatch),
_ => output.push(operators.pop().expect(MISSING_OPERATOR))
}
}
return Ok(output);
}
/// Possible members in unit expressions
#[derive(Debug, PartialEq)]
enum UnitExpr {
/// A single value
Val(f64),
/// Multiplication of left-hand side by right-hand side
Mul(Box<UnitExpr>, Box<UnitExpr>),
/// Division of left-hand side by right-hand side
Div(Box<UnitExpr>, Box<UnitExpr>),
/// Take the power of the expr by the `i32` value
Pow(Box<UnitExpr>, i32),
}
impl UnitExpr {
/// Recursively evaluate an unit expression
fn eval(&self) -> f64 {
match *self {
UnitExpr::Val(v) => v,
UnitExpr::Mul(ref lhs, ref rhs) => lhs.eval() * rhs.eval(),
UnitExpr::Div(ref lhs, ref rhs) => lhs.eval() / rhs.eval(),
UnitExpr::Pow(ref expr, pow) => expr.eval().powi(pow),
}
}
/// Parse a string, and generate the corresponding unit expression
fn parse(unit: &str) -> Result<UnitExpr, ParseError> {
let tokens = tokenize(unit);
let mut stream = try!(shunting_yard(tokens));
let ast = try!(read_expr(&mut stream));
if stream.is_empty() {
Ok(ast)
} else {
let remaining = stream.iter().map(|t| t.as_str()).collect::<Vec<_>>().join(" ");
return Err(ParseError::MalformedExpr(
format!("remaining values after the end of the unit: {}", remaining)
))
}
}
}
/// Read and pop (recursively) a single expression from the `stream`.
/// The `stream` must be in reverse polish notation.
fn read_expr(stream: &mut Vec<Token>) -> Result<UnitExpr, ParseError> {
if let Some(token) = stream.pop() {
match token {
Token::Value(unit) => {
match FACTORS.get(&*unit) {
Some(&value) => Ok(UnitExpr::Val(value)),
None => Err(ParseError::NotFound{unit: unit})
}
},
Token::Mul => {
let rhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the right of '*': {}", err)
)));
let lhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the left of '*': {}", err)
)));
Ok(UnitExpr::Mul(Box::new(lhs), Box::new(rhs)))
},
Token::Div => {
let rhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the right of '/': {}", err)
)));
let lhs = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the left of '/': {}", err)
)));
Ok(UnitExpr::Div(Box::new(lhs), Box::new(rhs)))
},
Token::Pow => {
let pow = match stream.pop() {
Some(pow) => match pow {
Token::Value(value) => try!(value.parse()),
_ => return Err(ParseError::MalformedExpr(String::from(
format!("Invalid value after ^: {}", pow.as_str())
)))
},
None => return Err(ParseError::MalformedExpr(String::from(
"Missing value after '^'"
)))
};
let expr = try!(read_expr(stream).map_err(|err| ParseError::MalformedExpr(
format!("Error in unit at the left of '*': {}", err)
)));
Ok(UnitExpr::Pow(Box::new(expr), pow))
}
Token::LParen | Token::RParen => internal_error!(
"there should not be any parenthese here"
)
}
} else {
Err(ParseError::MalformedExpr(String::from("missing a value")))
}
}
/// Convert the numeric value `val` from the unit `unit` to the internal unit.
///
/// ```
/// use lumol::units;
/// let internal = units::from(10.0, "A").unwrap();
/// assert!(internal == 10.0);
/// ```
pub fn from(value: f64, unit: &str) -> Result<f64, ParseError> {
let unit = try!(UnitExpr::parse(unit));
return Ok(unit.eval() * value);
}
/// Parse the string `val` and convert it to the corresponding internal unit
///
/// ```
/// use lumol::units;
/// let internal = units::from_str("10 A").unwrap();
/// assert!(internal == 10.0);
/// ```
pub fn from_str(value: &str) -> Result<f64, ParseError> {
let unit = value.split_whitespace().skip(1).collect::<Vec<&str>>().join(" ");
let unit = if unit.is_empty() {
UnitExpr::Val(1.0)
} else {
try!(UnitExpr::parse(&unit))
};
let value = value.split_whitespace().take(1).collect::<Vec<&str>>()[0];
let value = try!(value.parse::<f64>());
return Ok(unit.eval() * value);
}
/// Convert the numeric value `val` (in internal units) to the unit `unit`.
///
/// ```
/// use lumol::units;
/// let real = units::to(10.0, "A").unwrap();
/// assert!(real == 10.0);
/// ```
pub fn to(value: f64, unit: &str) -> Result<f64, ParseError> {
let unit = try!(UnitExpr::parse(unit));
return Ok(value / unit.eval());
}
#[cfg(test)]
mod test {
use super::*;
use super::{tokenize, shunting_yard};
use super::{Token, UnitExpr};
#[test]
fn tokens() {
assert_eq!(tokenize("(")[0], Token::LParen);
assert_eq!(tokenize(")")[0], Token::RParen);
assert_eq!(tokenize("*")[0], Token::Mul);
assert_eq!(tokenize("/")[0], Token::Div);
assert_eq!(tokenize("^")[0], Token::Pow);
assert_eq!(tokenize("foo")[0], Token::Value(String::from("foo")));
assert_eq!(tokenize("45")[0], Token::Value(String::from("45")));
assert_eq!(tokenize("(bar/m").len(), 4);
assert_eq!(tokenize(" ( bar\t/\n m").len(), 4);
}
fn ast_str(unit: &str) -> Result<String, ParseError> {
let tokens = tokenize(unit);
let ast = try!(shunting_yard(tokens));
return Ok(ast.iter().map(|t| t.as_str()).collect::<Vec<_>>().join(" "));
}
#[test]
fn ast() {
assert_eq!(ast_str("").unwrap(), "");
assert_eq!(ast_str("()").unwrap(), "");
assert_eq!(ast_str("foo").unwrap(), "foo");
assert_eq!(ast_str("foo*bar").unwrap(), "foo bar *");
assert_eq!(ast_str("foo / bar").unwrap(), "foo bar /");
assert_eq!(ast_str("foo^4").unwrap(), "foo 4 ^");
assert_eq!(ast_str("bar/foo ^ 4").unwrap(), "bar foo 4 ^ /");
assert_eq!(ast_str("k*bar /foo^ 4").unwrap(), "k bar * foo 4 ^ /");
}
#[test]
fn ast_errors() {
assert!(ast_str("(").is_err());
assert!(ast_str(")").is_err());
assert!(ast_str("(bar/m").is_err());
assert!(ast_str("m/K)").is_err());
}
#[test]
fn eval() {
assert_eq!(UnitExpr::parse("Å").unwrap(), UnitExpr::Val(1.0));
assert_eq!(UnitExpr::parse("nm").unwrap(), UnitExpr::Val(10.0));
assert_eq!(UnitExpr::parse("bohr/fs").unwrap().eval(), 0.52917720859);
assert_approx_eq!(UnitExpr::parse("kcal/mol/A^2").unwrap().eval(), 4.184e-4, 1e-12);
assert_eq!(UnitExpr::parse("(Ry / rad^-3 )").unwrap().eval(), 0.13127498789124938);
assert_eq!(UnitExpr::parse("bar/(m * fs^2)").unwrap().eval(), 6.0221417942167636e-19);
assert_eq!(UnitExpr::parse("kJ/mol/deg^2").unwrap().eval(), 0.3282806352310398);
}
#[test]
fn parsing_errrors() {
assert!(UnitExpr::parse("m^4-8").is_err());
assert!(UnitExpr::parse("foo ^ bar").is_err());
assert!(UnitExpr::parse("m^z4").is_err());
assert!(UnitExpr::parse("HJK").is_err());
}
#[test]
fn unit_from_str() {
assert_eq!(from_str("10.0 A").unwrap(), 10.0);
assert_eq!(from_str("10 A").unwrap(), 10.0);
assert_eq!(from_str("1e1 A").unwrap(), 10.0);
assert_eq!(from_str("10").unwrap(), 10.0);
assert!(from_str("10a.0 bar").is_err());
assert!(from_str("h10").is_err());
}
#[test]
fn unit_to() {
assert_eq!(to(25.0, "m").unwrap(), 2.5e-9);
assert_eq!(to(25.0, "bar").unwrap(), 4.1513469550000005e9);
assert_eq!(to(25.0, "kJ/mol").unwrap(), 249999.99982494753);
}
}
|
// Copyright 2022 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
cfg_if::cfg_if! {
if #[cfg(unix)] {
use std::net;
use base::RawDescriptor;
#[cfg(feature = "gpu")]
use devices::virtio::GpuDisplayParameters;
use devices::virtio::vhost::user::device::parse_wayland_sock;
#[cfg(feature = "gpu")]
use super::sys::config::parse_gpu_display_options;
use super::sys::config::{
parse_coiommu_params, VfioCommand, parse_vfio, parse_vfio_platform,
};
use super::config::SharedDir;
} else if #[cfg(windows)] {
use crate::crosvm::sys::config::IrqChipKind;
}
}
use std::collections::BTreeMap;
use std::path::PathBuf;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use arch::MsrConfig;
use arch::Pstore;
use arch::VcpuAffinity;
use argh::FromArgs;
use base::getpid;
use devices::virtio::block::block::DiskOption;
#[cfg(feature = "audio")]
use devices::virtio::snd::parameters::Parameters as SndParameters;
use devices::virtio::vhost::user::device;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
use devices::virtio::VideoBackendType;
#[cfg(feature = "audio")]
use devices::Ac97Parameters;
use devices::PflashParameters;
use devices::SerialHardware;
use devices::SerialParameters;
use devices::StubPciParameters;
use hypervisor::ProtectionType;
use resources::AddressRange;
use vm_control::BatteryType;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
use super::config::parse_video_options;
#[cfg(feature = "gpu")]
use super::sys::config::parse_gpu_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::config::parse_gpu_render_server_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::GpuRenderServerParameters;
use crate::crosvm::config::numbered_disk_option;
#[cfg(feature = "audio")]
use crate::crosvm::config::parse_ac97_options;
use crate::crosvm::config::parse_battery_options;
use crate::crosvm::config::parse_bus_id_addr;
use crate::crosvm::config::parse_cpu_affinity;
use crate::crosvm::config::parse_cpu_capacity;
use crate::crosvm::config::parse_cpu_set;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_direct_io_options;
use crate::crosvm::config::parse_file_backed_mapping;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_memory_region;
use crate::crosvm::config::parse_mmio_address_range;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_pcie_root_port_params;
use crate::crosvm::config::parse_pflash_parameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::parse_plugin_mount_option;
use crate::crosvm::config::parse_pstore;
use crate::crosvm::config::parse_serial_options;
use crate::crosvm::config::parse_stub_pci_parameters;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_userspace_msr_options;
#[cfg(feature = "plugin")]
use crate::crosvm::config::BindMount;
#[cfg(feature = "direct")]
use crate::crosvm::config::DirectIoOption;
use crate::crosvm::config::Executable;
use crate::crosvm::config::FileBackedMappingParameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::GidMap;
#[cfg(feature = "direct")]
use crate::crosvm::config::HostPcieRootPortParameters;
use crate::crosvm::config::HypervisorKind;
use crate::crosvm::config::TouchDeviceOption;
use crate::crosvm::config::VhostUserFsOption;
use crate::crosvm::config::VhostUserOption;
use crate::crosvm::config::VhostUserWlOption;
use crate::crosvm::config::VvuOption;
#[derive(FromArgs)]
/// crosvm
pub struct CrosvmCmdlineArgs {
#[argh(switch)]
/// use extended exit status
pub extended_status: bool,
#[argh(option, default = r#"String::from("info")"#)]
/// specify log level, eg "off", "error", "debug,disk=off", etc
pub log_level: String,
#[argh(switch)]
/// disable output to syslog
pub no_syslog: bool,
#[argh(subcommand)]
pub command: Command,
}
#[allow(clippy::large_enum_variant)]
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum CrossPlatformCommands {
#[cfg(feature = "balloon")]
Balloon(BalloonCommand),
#[cfg(feature = "balloon")]
BalloonStats(BalloonStatsCommand),
Battery(BatteryCommand),
#[cfg(feature = "composite-disk")]
CreateComposite(CreateCompositeCommand),
CreateQcow2(CreateQcow2Command),
Device(DeviceCommand),
Disk(DiskCommand),
MakeRT(MakeRTCommand),
Resume(ResumeCommand),
Run(RunCommand),
Stop(StopCommand),
Suspend(SuspendCommand),
Powerbtn(PowerbtnCommand),
Sleepbtn(SleepCommand),
Gpe(GpeCommand),
Usb(UsbCommand),
Version(VersionCommand),
Vfio(VfioCrosvmCommand),
}
#[allow(clippy::large_enum_variant)]
#[derive(argh_helpers::FlattenSubcommand)]
pub enum Command {
CrossPlatform(CrossPlatformCommands),
Sys(super::sys::cmdline::Commands),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "balloon")]
/// Set balloon size of the crosvm instance to `SIZE` bytes
pub struct BalloonCommand {
#[argh(positional, arg_name = "SIZE")]
/// amount of bytes
pub num_bytes: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(argh::FromArgs)]
#[argh(subcommand, name = "balloon_stats")]
/// Prints virtio balloon statistics for a `VM_SOCKET`
pub struct BalloonStatsCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "battery")]
/// Modify battery
pub struct BatteryCommand {
#[argh(positional, arg_name = "BATTERY_TYPE")]
/// battery type
pub battery_type: String,
#[argh(positional)]
/// battery property
/// status | present | health | capacity | aconline
pub property: String,
#[argh(positional)]
/// battery property target
/// STATUS | PRESENT | HEALTH | CAPACITY | ACONLINE
pub target: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[cfg(feature = "composite-disk")]
#[derive(FromArgs)]
#[argh(subcommand, name = "create_composite")]
/// Create a new composite disk image file
pub struct CreateCompositeCommand {
#[argh(positional, arg_name = "PATH")]
/// image path
pub path: String,
#[argh(positional, arg_name = "LABEL:PARTITION")]
/// partitions
pub partitions: Vec<String>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "create_qcow2")]
/// Create Qcow2 image given path and size
pub struct CreateQcow2Command {
#[argh(positional, arg_name = "PATH")]
/// path to the new qcow2 file to create
pub file_path: String,
#[argh(positional, arg_name = "SIZE")]
/// desired size of the image in bytes; required if not using --backing-file
pub size: Option<u64>,
#[argh(option)]
/// path to backing file; if specified, the image will be the same size as the backing file, and
/// SIZE may not be specified
pub backing_file: Option<String>,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum DiskSubcommand {
Resize(ResizeDiskSubcommand),
}
#[derive(FromArgs)]
/// resize disk
#[argh(subcommand, name = "resize")]
pub struct ResizeDiskSubcommand {
#[argh(positional, arg_name = "DISK_INDEX")]
/// disk index
pub disk_index: usize,
#[argh(positional, arg_name = "NEW_SIZE")]
/// new disk size
pub disk_size: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "disk")]
/// Manage attached virtual disk devices
pub struct DiskCommand {
#[argh(subcommand)]
pub command: DiskSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "make_rt")]
/// Enables real-time vcpu priority for crosvm instances started with `--delay-rt`
pub struct MakeRTCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "resume")]
/// Resumes the crosvm instance
pub struct ResumeCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "stop")]
/// Stops crosvm instances via their control sockets
pub struct StopCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "suspend")]
/// Suspends the crosvm instance
pub struct SuspendCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "powerbtn")]
/// Triggers a power button event in the crosvm instance
pub struct PowerbtnCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "sleepbtn")]
/// Triggers a sleep button event in the crosvm instance
pub struct SleepCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "gpe")]
/// Injects a general-purpose event into the crosvm instance
pub struct GpeCommand {
#[argh(positional)]
/// GPE #
pub gpe: u32,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "usb")]
/// Manage attached virtual USB devices.
pub struct UsbCommand {
#[argh(subcommand)]
pub command: UsbSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "version")]
/// Show package version.
pub struct VersionCommand {}
#[derive(FromArgs)]
#[argh(subcommand, name = "add")]
/// ADD
pub struct VfioAddSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "remove")]
/// REMOVE
pub struct VfioRemoveSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum VfioSubCommand {
Add(VfioAddSubCommand),
Remove(VfioRemoveSubCommand),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "vfio")]
/// add/remove host vfio pci device into guest
pub struct VfioCrosvmCommand {
#[argh(subcommand)]
pub command: VfioSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "device")]
/// Start a device process
pub struct DeviceCommand {
#[argh(subcommand)]
pub command: DeviceSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand)]
/// Cross-platform Devices
pub enum CrossPlatformDevicesCommands {
Block(device::BlockOptions),
Net(device::NetOptions),
}
#[derive(argh_helpers::FlattenSubcommand)]
pub enum DeviceSubcommand {
CrossPlatform(CrossPlatformDevicesCommands),
Sys(super::sys::cmdline::DevicesSubcommand),
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum UsbSubCommand {
Attach(UsbAttachCommand),
Detach(UsbDetachCommand),
List(UsbListCommand),
}
#[derive(FromArgs)]
/// Attach usb device
#[argh(subcommand, name = "attach")]
pub struct UsbAttachCommand {
#[argh(
positional,
arg_name = "BUS_ID:ADDR:BUS_NUM:DEV_NUM",
from_str_fn(parse_bus_id_addr)
)]
pub addr: (u8, u8, u16, u16),
#[argh(positional)]
/// usb device path
pub dev_path: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "detach")]
pub struct UsbDetachCommand {
#[argh(positional, arg_name = "PORT")]
/// usb port
pub port: u8,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "list")]
pub struct UsbListCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
/// Start a new crosvm instance
#[remain::sorted]
#[argh_helpers::pad_description_for_argh]
#[derive(FromArgs)]
#[argh(subcommand, name = "run")]
pub struct RunCommand {
#[cfg(feature = "audio")]
#[argh(
option,
from_str_fn(parse_ac97_options),
arg_name = "[backend=BACKEND,capture=true,capture_effect=EFFECT,client_type=TYPE,shm-fd=FD,client-fd=FD,server-fd=FD]"
)]
/// comma separated key=value pairs for setting up Ac97 devices.
/// Can be given more than once.
/// Possible key values:
/// backend=(null, cras, vios) - Where to route the audio
/// device. If not provided, backend will default to
/// null. `null` for /dev/null, cras for CRAS server
/// and vios for VioS server.
/// capture - Enable audio capture
/// capture_effects - | separated effects to be enabled for
/// recording. The only supported effect value now is
/// EchoCancellation or aec.
/// client_type - Set specific client type for cras backend.
/// socket_type - Set specific socket type for cras backend.
/// server - The to the VIOS server (unix socket)
pub ac97: Vec<Ac97Parameters>,
#[argh(option, long = "acpi-table", arg_name = "PATH")]
/// path to user provided ACPI table
pub acpi_tables: Vec<PathBuf>,
#[argh(option)]
/// path to Android fstab
pub android_fstab: Option<PathBuf>,
#[argh(option, arg_name = "N", long = "balloon-bias-mib")]
/// amount to bias balance of memory between host and guest as the balloon inflates, in mib.
pub balloon_bias: Option<i64>,
#[argh(option, arg_name = "PATH")]
/// path for balloon controller socket.
pub balloon_control: Option<PathBuf>,
#[argh(option, from_str_fn(parse_battery_options))]
/// comma separated key=value pairs for setting up battery
/// device
/// Possible key values:
/// type=goldfish - type of battery emulation, defaults to
/// goldfish
pub battery: Option<BatteryType>,
#[argh(option)]
/// path to BIOS/firmware ROM
pub bios: Option<PathBuf>,
#[argh(option, arg_name = "CID")]
/// context ID for virtual sockets.
pub cid: Option<u64>,
#[cfg(unix)]
#[argh(
option,
arg_name = "unpin_policy=POLICY,unpin_interval=NUM,unpin_limit=NUM,unpin_gen_threshold=NUM",
from_str_fn(parse_coiommu_params)
)]
/// comma separated key=value pairs for setting up coiommu
/// devices.
/// Possible key values:
/// unpin_policy=lru - LRU unpin policy.
/// unpin_interval=NUM - Unpin interval time in seconds.
/// unpin_limit=NUM - Unpin limit for each unpin cycle, in
/// unit of page count. 0 is invalid.
/// unpin_gen_threshold=NUM - Number of unpin intervals a
/// pinned page must be busy for to be aged into the
/// older which is less frequently checked generation.
pub coiommu: Option<devices::CoIommuParameters>,
#[argh(
option,
arg_name = "CPU=CAP[,CPU=CAP[,...]]",
from_str_fn(parse_cpu_capacity)
)]
/// set the relative capacity of the given CPU (default: no capacity)
pub cpu_capacity: Option<BTreeMap<usize, u32>>, // CPU index -> capacity
#[argh(
option,
long = "cpu-cluster",
arg_name = "CPUSET",
from_str_fn(parse_cpu_set)
)]
/// group the given CPUs into a cluster (default: no clusters)
pub cpu_clusters: Vec<Vec<usize>>,
#[cfg(feature = "audio_cras")]
#[argh(
option,
arg_name = "[capture=true,client=crosvm,socket=unified,\
num_output_devices=1,num_input_devices=1,num_output_streams=1,num_input_streams=1]",
long = "cras-snd"
)]
/// comma separated key=value pairs for setting up virtio snd
/// devices.
/// Possible key values:
/// capture=(false,true) - Disable/enable audio capture.
/// Default is false.
/// client_type=(crosvm,arcvm,borealis) - Set specific
/// client type for cras backend. Default is crosvm.
/// socket_type=(legacy,unified) Set specific socket type
/// for cras backend. Default is unified.
/// num_output_devices=INT - Set number of output PCM
/// devices.
/// num_input_devices=INT - Set number of input PCM devices.
/// num_output_streams=INT - Set number of output PCM
/// streams per device.
/// num_input_streams=INT - Set number of input PCM streams
/// per device.
pub cras_snds: Vec<SndParameters>,
#[cfg(feature = "crash-report")]
#[argh(option, long = "crash-pipe-name", arg_name = "\\\\.\\pipe\\PIPE_NAME")]
/// the crash handler ipc pipe name.
pub crash_pipe_name: Option<String>,
#[argh(switch)]
/// don't set VCPUs real-time until make-rt command is run
pub delay_rt: bool,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_edge_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "gpe")]
/// enable GPE interrupt and register access passthrough
pub direct_gpe: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_level_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct memory mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_mmio: Option<DirectIoOption>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct port mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_pmio: Option<DirectIoOption>,
#[argh(switch)]
/// run all devices in one, non-sandboxed process
pub disable_sandbox: bool,
#[argh(switch)]
/// disable INTx in virtio devices
pub disable_virtio_intx: bool,
#[argh(
option,
short = 'd',
long = "disk",
arg_name = "PATH[,key=value[,key=value[,...]]",
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache"
pub disks: Vec<(usize, DiskOption)>,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_keyboard: bool,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_mouse: bool,
#[argh(option, long = "dmi", arg_name = "DIR")]
/// directory with smbios_entry_point/DMI files
pub dmi_path: Option<PathBuf>,
#[argh(switch)]
/// expose Power and Perfomance (PnP) data to guest and guest can show these PnP data
pub enable_pnp_data: bool,
#[argh(positional, arg_name = "KERNEL")]
/// bzImage of kernel to run
pub executable_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(switch, long = "exit-stats")]
/// gather and display statistics on Vm Exits and Bus Reads/Writes.
pub exit_stats: bool,
#[argh(
option,
long = "file-backed-mapping",
arg_name = "addr=NUM,size=SIZE,path=PATH[,offset=NUM][,ro][,rw][,sync]",
from_str_fn(parse_file_backed_mapping)
)]
/// map the given file into guest memory at the specified
/// address.
/// Parameters (addr, size, path are required):
/// addr=NUM - guest physical address to map at
/// size=NUM - amount of memory to map
/// path=PATH - path to backing file/device to map
/// offset=NUM - offset in backing file (default 0)
/// ro - make the mapping readonly (default)
/// rw - make the mapping writable
/// sync - open backing file with O_SYNC
/// align - whether to adjust addr and size to page
/// boundaries implicitly
pub file_backed_mappings: Vec<FileBackedMappingParameters>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// force use of a calibrated TSC cpuid leaf (0x15) even if the hypervisor
/// doesn't require one.
pub force_calibrated_tsc_leaf: bool,
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
#[argh(option, arg_name = "PORT")]
/// (EXPERIMENTAL) gdb on the given port
pub gdb: Option<u32>,
#[cfg(feature = "gpu")]
#[argh(
option,
arg_name = "[width=INT,height=INT]",
from_str_fn(parse_gpu_display_options)
)]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a display on the virtio-gpu device
/// Possible key values:
/// width=INT - The width of the virtual display connected
/// to the virtio-gpu.
/// height=INT - The height of the virtual display
/// connected to the virtio-gpu
#[cfg(unix)]
pub gpu_display: Vec<GpuDisplayParameters>,
#[cfg(feature = "gpu")]
#[argh(option, long = "gpu", from_str_fn(parse_gpu_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a virtio-gpu device
/// Possible key values:
/// backend=(2d|virglrenderer|gfxstream) - Which backend to
/// use for virtio-gpu (determining rendering protocol)
/// context-types=LIST - The list of supported context
/// types, separated by ':' (default: no contexts enabled)
/// width=INT - The width of the virtual display connected
/// to the virtio-gpu.
/// height=INT - The height of the virtual display
/// connected to the virtio-gpu.
/// egl[=true|=false] - If the backend should use a EGL
/// context for rendering.
/// glx[=true|=false] - If the backend should use a GLX
/// context for rendering.
/// surfaceless[=true|=false] - If the backend should use a
/// surfaceless context for rendering.
/// angle[=true|=false] - If the gfxstream backend should
/// use ANGLE (OpenGL on Vulkan) as its native OpenGL
/// driver.
/// vulkan[=true|=false] - If the backend should support
/// vulkan
/// wsi=vk - If the gfxstream backend should use the Vulkan
/// swapchain to draw on a window
/// cache-path=PATH - The path to the virtio-gpu device
/// shader cache.
/// cache-size=SIZE - The maximum size of the shader cache.
/// pci-bar-size=SIZE - The size for the PCI BAR in bytes
/// (default 8gb).
pub gpu_params: Option<devices::virtio::GpuParameters>,
#[cfg(all(unix, feature = "gpu", feature = "virgl_renderer_next"))]
#[argh(option, from_str_fn(parse_gpu_render_server_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a render server for the virtio-gpu device
/// Possible key values:
/// path=PATH - The path to the render server executable.
/// cache-path=PATH - The path to the render server shader
/// cache.
/// cache-size=SIZE - The maximum size of the shader cache
pub gpu_render_server: Option<GpuRenderServerParameters>,
#[argh(switch)]
/// use mirror cpu topology of Host for Guest VM, also copy some cpu feature to Guest VM
pub host_cpu_topology: bool,
#[cfg(windows)]
#[argh(option, long = "host-guid", arg_name = "PATH")]
/// string representation of the host guid in registry format, for namespacing vsock connections.
pub host_guid: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "IP")]
/// IP address to assign to host tap interface
pub host_ip: Option<net::Ipv4Addr>,
#[argh(switch)]
/// advise the kernel to use Huge Pages for guest memory mappings
pub hugepages: bool,
/// hypervisor backend
#[argh(option)]
pub hypervisor: Option<HypervisorKind>,
#[argh(option, long = "init-mem", arg_name = "N")]
/// amount of guest memory outside the balloon at boot in MiB. (default: --mem)
pub init_memory: Option<u64>,
#[argh(option, short = 'i', long = "initrd", arg_name = "PATH")]
/// initial ramdisk to load
pub initrd_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(option, long = "irqchip", arg_name = "kernel|split|userspace")]
/// type of interrupt controller emulation. \"split\" is only available for x86 KVM.
pub irq_chip: Option<IrqChipKind>,
#[argh(switch)]
/// allow to enable ITMT scheduling feature in VM. The success of enabling depends on HWP and ACPI CPPC support on hardware
pub itmt: bool,
#[cfg(windows)]
#[argh(option, long = "kernel-log-file", arg_name = "PATH")]
/// forward hypervisor kernel driver logs for this VM to a file.
pub kernel_log_file: Option<String>,
#[cfg(unix)]
#[argh(option, long = "kvm-device", arg_name = "PATH")]
/// path to the KVM device. (default /dev/kvm)
pub kvm_device_path: Option<PathBuf>,
#[cfg(unix)]
#[argh(switch)]
/// disable host swap on guest VM pages.
pub lock_guest_memory: bool,
#[cfg(windows)]
#[argh(option, long = "log-file", arg_name = "PATH")]
/// redirect logs to the supplied log file at PATH rather than stderr. For multi-process mode, use --logs-directory instead
pub log_file: Option<String>,
#[cfg(windows)]
#[argh(option, long = "logs-directory", arg_name = "PATH")]
/// path to the logs directory used for crosvm processes. Logs will be sent to stderr if unset, and stderr/stdout will be uncaptured
pub logs_directory: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "MAC", long = "mac")]
/// MAC address for VM
pub mac_address: Option<net_util::MacAddress>,
#[argh(option, long = "mem", short = 'm', arg_name = "N")]
/// amount of guest memory in MiB. (default: 256)
pub memory: Option<u64>,
#[argh(
option,
long = "mmio-address-range",
from_str_fn(parse_mmio_address_range)
)]
/// MMIO address ranges
pub mmio_address_ranges: Option<Vec<AddressRange>>,
#[cfg(unix)]
#[argh(option, arg_name = "N")]
/// virtio net virtual queue pairs. (default: 1)
pub net_vq_pairs: Option<u16>,
#[cfg(unix)]
#[argh(option, arg_name = "NETMASK")]
/// netmask for VM subnet
pub netmask: Option<net::Ipv4Addr>,
#[argh(switch)]
/// don't use virtio-balloon device in the guest
pub no_balloon: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy KBD devices emulation
pub no_i8042: bool,
#[argh(switch)]
/// don't create RNG device in the guest
pub no_rng: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy RTC devices emulation
pub no_rtc: bool,
#[argh(switch)]
/// don't use SMT in the guest
pub no_smt: bool,
#[argh(switch)]
/// don't use usb devices in the guest
pub no_usb: bool,
#[argh(option, short = 'p', arg_name = "PARAMS")]
/// extra kernel or plugin command line arguments. Can be given more than once
pub params: Vec<String>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(option, long = "pci-start", arg_name = "pci_low_mmio_start")]
/// the pci mmio start address below 4G
pub pci_low_start: Option<u64>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "mmio_base,mmio_length",
from_str_fn(parse_memory_region)
)]
/// region for PCIe Enhanced Configuration Access Mechanism
pub pcie_ecam: Option<AddressRange>,
#[cfg(feature = "direct")]
#[argh(
option,
long = "pcie-root-port",
arg_name = "PATH[,hp_gpe=NUM]",
from_str_fn(parse_pcie_root_port_params)
)]
/// path to sysfs of host pcie root port and host pcie root port hotplug gpe number
pub pcie_rp: Vec<HostPcieRootPortParameters>,
#[argh(switch)]
/// enable per-VM core scheduling intead of the default one (per-vCPU core scheduing) by
/// making all vCPU threads share same cookie for core scheduling.
/// This option is no-op on devices that have neither MDS nor L1TF vulnerability
pub per_vm_core_scheduling: bool,
#[argh(
option,
long = "pflash",
arg_name = "path=PATH,[block_size=SIZE]",
from_str_fn(parse_pflash_parameters)
)]
/// comma-seperated key-value pair for setting up the pflash device, which provides space to store UEFI variables.
/// block_size defaults to 4K.
/// [--pflash <path=PATH,[block_size=SIZE]>]
pub pflash_parameters: Option<PflashParameters>,
#[argh(option, arg_name = "PATH")]
/// path to empty directory to use for sandbox pivot root
pub pivot_root: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to plugin process to run under crosvm
pub plugin: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option)]
/// path to the file listing supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_map_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-gid-map", arg_name = "GID:GID:INT")]
/// supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_maps: Vec<GidMap>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// path to the file listing paths be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mount_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-mount", arg_name = "PATH:PATH:BOOL")]
/// path to be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mounts: Vec<BindMount>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to a directory that will become root filesystem for the plugin process.
pub plugin_root: Option<PathBuf>,
#[argh(option, long = "pmem-device", arg_name = "PATH")]
/// path to a disk image
pub pmem_devices: Vec<DiskOption>,
#[argh(switch)]
/// grant this Guest VM certain privileges to manage Host resources, such as power management
pub privileged_vm: bool,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-handle", arg_name = "PATH")]
/// shared read-only memory address for a serialized EmulatorProcessInvariants proto
pub process_invariants_data_handle: Option<u64>,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-size", arg_name = "PATH")]
/// size of the serialized EmulatorProcessInvariants proto pointed at by process-invariants-handle
pub process_invariants_data_size: Option<usize>,
#[cfg(windows)]
#[argh(option, long = "product-channel")]
/// product channel
pub product_channel: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-name")]
/// the product name for file paths.
pub product_name: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-version")]
/// product version
pub product_version: Option<String>,
#[argh(switch)]
/// prevent host access to guest memory
pub protected_vm: bool,
#[argh(switch)]
/// (EXPERIMENTAL) prevent host access to guest memory, but don't use protected VM firmware
protected_vm_without_firmware: bool,
#[argh(option, arg_name = "path=PATH,size=SIZE", from_str_fn(parse_pstore))]
/// path to pstore buffer backend file followed by size
/// [--pstore <path=PATH,size=SIZE>]
pub pstore: Option<Pstore>,
#[cfg(windows)]
#[argh(switch)]
/// enable virtio-pvclock.
pub pvclock: bool,
// Must be `Some` iff `protected_vm == ProtectionType::UnprotectedWithFirmware`.
#[argh(option, long = "unprotected-vm-with-firmware", arg_name = "PATH")]
/// (EXPERIMENTAL/FOR DEBUGGING) Use VM firmware, but allow host access to guest memory
pub pvm_fw: Option<PathBuf>,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]",
short = 'r',
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
root: Option<(usize, DiskOption)>,
#[argh(option, arg_name = "CPUSET", from_str_fn(parse_cpu_set))]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on. (e.g. 0,1-3,5) (default: none)
pub rt_cpus: Option<Vec<usize>>,
#[argh(option, long = "rw-pmem-device", arg_name = "PATH")]
/// path to a writable disk image
rw_pmem_devices: Vec<DiskOption>,
#[argh(
option,
long = "rwdisk",
arg_name = "PATH[,key=value[,key=value[,...]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwdisks: Vec<(usize, DiskOption)>,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write root disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwroot: Option<(usize, DiskOption)>,
#[argh(switch)]
/// set Low Power S0 Idle Capable Flag for guest Fixed ACPI
/// Description Table, additionally use enhanced crosvm suspend and resume
/// routines to perform full guest suspension/resumption
pub s2idle: bool,
#[cfg(unix)]
#[argh(switch)]
/// instead of seccomp filter failures being fatal, they will be logged instead
pub seccomp_log_failures: bool,
#[cfg(unix)]
#[argh(option, arg_name = "PATH")]
/// path to seccomp .policy files
pub seccomp_policy_dir: Option<PathBuf>,
#[argh(
option,
long = "serial",
arg_name = "type=TYPE,[hardware=HW,num=NUM,path=PATH,input=PATH,console,earlycon,stdin]",
from_str_fn(parse_serial_options)
)]
/// comma separated key=value pairs for setting up serial
/// devices. Can be given more than once.
/// Possible key values:
/// type=(stdout,syslog,sink,file) - Where to route the
/// serial device
/// hardware=(serial,virtio-console) - Which type of serial
/// hardware to emulate. Defaults to 8250 UART (serial).
/// num=(1,2,3,4) - Serial Device Number. If not provided,
/// num will default to 1.
/// path=PATH - The path to the file to write to when
/// type=file
/// input=PATH - The path to the file to read from when not
/// stdin
/// console - Use this serial device as the guest console.
/// Can only be given once. Will default to first
/// serial port if not provided.
/// earlycon - Use this serial device as the early console.
/// Can only be given once.
/// stdin - Direct standard input to this serial device.
/// Can only be given once. Will default to first serial
/// port if not provided.
pub serial_parameters: Vec<SerialParameters>,
#[cfg(feature = "kiwi")]
#[argh(option, long = "service-pipe-name", arg_name = "PIPE_NAME")]
/// the service ipc pipe name. (Prefix \\\\.\\pipe\\ not needed.
pub service_pipe_name: Option<String>,
#[cfg(unix)]
#[argh(
option,
long = "shared-dir",
arg_name = "PATH:TAG[:type=TYPE:writeback=BOOL:timeout=SECONDS:uidmap=UIDMAP:gidmap=GIDMAP:cache=CACHE:dax=BOOL,posix_acl=BOOL]"
)]
/// colon-separated options for configuring a directory to be
/// shared with the VM. The first field is the directory to be
/// shared and the second field is the tag that the VM can use
/// to identify the device. The remaining fields are key=value
/// pairs that may appear in any order.
/// Valid keys are:
/// type=(p9, fs) - Indicates whether the directory should
/// be shared via virtio-9p or virtio-fs (default: p9).
/// uidmap=UIDMAP - The uid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current euid> 1).
/// gidmap=GIDMAP - The gid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current egid> 1).
/// cache=(never, auto, always) - Indicates whether the VM
/// can cache the contents of the shared directory
/// (default: auto). When set to "auto" and the type
/// is "fs", the VM will use close-to-open consistency
/// for file contents.
/// timeout=SECONDS - How long the VM should consider file
/// attributes and directory entries to be valid
/// (default: 5). If the VM has exclusive access to the
/// directory, then this should be a large value. If
/// the directory can be modified by other processes,
/// then this should be 0.
/// writeback=BOOL - Enables writeback caching
/// (default: false). This is only safe to do when the
/// VM has exclusive access to the files in a directory.
/// Additionally, the server should have read
/// permission for all files as the VM may issue read
/// requests even for files that are opened write-only.
/// dax=BOOL - Enables DAX support. Enabling DAX can
/// improve performance for frequently accessed files
/// by mapping regions of the file directly into the
/// VM's memory. There is a cost of slightly increased
/// latency the first time the file is accessed. Since
/// the mapping is shared directly from the host kernel's
/// file cache, enabling DAX can improve performance even
/// when the guest cache policy is "Never". The default
/// value for this option is "false".
/// posix_acl=BOOL - Indicates whether the shared directory
/// supports POSIX ACLs. This should only be enabled
/// when the underlying file system supports POSIX ACLs.
/// The default value for this option is "true".
pub shared_dirs: Vec<SharedDir>,
#[cfg(feature = "slirp-ring-capture")]
#[argh(option, long = "slirp-capture-file", arg_name = "PATH")]
/// Redirects slirp network packets to the supplied log file rather than the current directory as `slirp_capture_packets.pcap`
pub slirp_capture_file: Option<String>,
#[argh(option, short = 's', long = "socket", arg_name = "PATH")]
/// path to put the control socket. If PATH is a directory, a name will be generated
pub socket_path: Option<PathBuf>,
#[cfg(feature = "tpm")]
#[argh(switch)]
/// enable a software emulated trusted platform module device
pub software_tpm: bool,
#[cfg(feature = "audio")]
#[argh(option, arg_name = "PATH")]
/// path to the VioS server socket for setting up virtio-snd devices
pub sound: Option<PathBuf>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// (EXPERIMENTAL) enable split-irqchip support
pub split_irqchip: bool,
#[argh(switch)]
/// don't allow guest to use pages from the balloon
pub strict_balloon: bool,
#[argh(
option,
long = "stub-pci-device",
arg_name = "DOMAIN:BUS:DEVICE.FUNCTION[,vendor=NUM][,device=NUM][,class=NUM][,subsystem_vendor=NUM][,subsystem_device=NUM][,revision=NUM]",
from_str_fn(parse_stub_pci_parameters)
)]
/// comma-separated key=value pairs for setting up a stub PCI
/// device that just enumerates. The first option in the list
/// must specify a PCI address to claim.
/// Optional further parameters
/// vendor=NUM - PCI vendor ID
/// device=NUM - PCI device ID
/// class=NUM - PCI class (including class code, subclass,
/// and programming interface)
/// subsystem_vendor=NUM - PCI subsystem vendor ID
/// subsystem_device=NUM - PCI subsystem device ID
/// revision=NUM - revision
pub stub_pci_devices: Vec<StubPciParameters>,
#[argh(option, arg_name = "N")]
/// (EXPERIMENTAL) Size of virtio swiotlb buffer in MiB (default: 64 if `--protected-vm` or `--protected-vm-without-firmware` is present)
pub swiotlb: Option<u64>,
#[argh(option, arg_name = "TAG")]
/// when logging to syslog, use the provided tag
pub syslog_tag: Option<String>,
#[cfg(unix)]
#[argh(option)]
/// file descriptor for configured tap device. A different virtual network card will be added each time this argument is given
pub tap_fd: Vec<RawDescriptor>,
#[cfg(unix)]
#[argh(option)]
/// name of a configured persistent TAP interface to use for networking. A different virtual network card will be added each time this argument is given
pub tap_name: Vec<String>,
#[cfg(target_os = "android")]
#[argh(option, arg_name = "NAME[,...]")]
/// comma-separated names of the task profiles to apply to all threads in crosvm including the vCPU threads
pub task_profiles: Vec<String>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "INDEX,type=TYPE,action=ACTION,[from=FROM],[filter=FILTER]",
from_str_fn(parse_userspace_msr_options)
)]
/// userspace MSR handling. Takes INDEX of the MSR and how they
/// are handled.
/// type=(r|w|rw|wr) - read/write permission control.
/// action=(pass|emu) - if the control of msr is effective
/// on host.
/// from=(cpu0) - source of msr value. if not set, the
/// source is running CPU.
/// filter=(yes|no) - if the msr is filtered in KVM.
pub userspace_msr: Vec<(u32, MsrConfig)>,
#[argh(
option,
long = "cpu-affinity",
arg_name = "CPUSET",
from_str_fn(parse_cpu_affinity)
)]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on (e.g. 0,1-3,5)
/// or colon-separated list of assignments of guest to host CPU assignments (e.g. 0=0:1=1:2=2) (default: no mask)
pub vcpu_affinity: Option<VcpuAffinity>,
#[argh(option, arg_name = "PATH")]
/// move all vCPU threads to this CGroup (default: nothing moves)
pub vcpu_cgroup_path: Option<PathBuf>,
#[argh(option, long = "cpus", short = 'c')]
/// number of VCPUs. (default: 1)
pub vcpu_count: Option<usize>,
#[cfg(unix)]
#[argh(
option,
arg_name = "PATH[,guest-address=auto|<BUS:DEVICE.FUNCTION>][,iommu=on|off]",
from_str_fn(parse_vfio)
)]
/// path to sysfs of PCI pass through or mdev device.
/// guest-address=auto|<BUS:DEVICE.FUNCTION> - PCI address
/// that the device will be assigned in the guest
/// (default: auto). When set to "auto", the device will
/// be assigned an address that mirrors its address in
/// the host.
/// iommu=on|off - indicates whether to enable virtio IOMMU
/// for this device
pub vfio: Vec<VfioCommand>,
#[cfg(unix)]
#[argh(option, arg_name = "PATH", from_str_fn(parse_vfio_platform))]
/// path to sysfs of platform pass through
pub vfio_platform: Vec<VfioCommand>,
#[argh(switch)]
/// use vhost for networking
pub vhost_net: bool,
#[cfg(unix)]
#[argh(option, long = "vhost-net-device", arg_name = "PATH")]
/// path to the vhost-net device. (default /dev/vhost-net)
pub vhost_net_device_path: Option<PathBuf>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user block
pub vhost_user_blk: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user console
pub vhost_user_console: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH:TAG")]
/// path to a socket path for vhost-user fs, and tag for the shared dir
pub vhost_user_fs: Vec<VhostUserFsOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// paths to a vhost-user socket for gpu
pub vhost_user_gpu: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user mac80211_hwsim
pub vhost_user_mac80211_hwsim: Option<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user net
pub vhost_user_net: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user snd
pub vhost_user_snd: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user vsock
pub vhost_user_vsock: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a vhost-user socket for wayland
pub vhost_user_wl: Option<VhostUserWlOption>,
#[cfg(unix)]
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user vsock
pub vhost_vsock_device: Option<PathBuf>,
#[cfg(unix)]
#[argh(option, arg_name = "FD")]
/// open FD to the vhost-vsock device, mutually exclusive with vhost-vsock-device
pub vhost_vsock_fd: Option<RawDescriptor>,
#[cfg(feature = "video-decoder")]
#[argh(
option,
long = "video-decoder",
arg_name = "[backend]",
from_str_fn(parse_video_options)
)]
/// (EXPERIMENTAL) enable virtio-video decoder device
/// Possible backend values: libvda, ffmpeg, vaapi
pub video_dec: Option<VideoBackendType>,
#[cfg(feature = "video-encoder")]
#[argh(
option,
long = "video-encoder",
arg_name = "[backend]",
from_str_fn(parse_video_options)
)]
/// (EXPERIMENTAL) enable virtio-video encoder device
/// Possible backend values: libvda
pub video_enc: Option<VideoBackendType>,
#[argh(option, long = "evdev", arg_name = "PATH")]
/// path to an event device node. The device will be grabbed (unusable from the host) and made available to the guest with the same configuration it shows on the host
pub virtio_input_evdevs: Vec<PathBuf>,
#[argh(option, long = "keyboard", arg_name = "PATH")]
/// path to a socket from where to read keyboard input events and write status updates to
pub virtio_keyboard: Vec<PathBuf>,
#[argh(option, long = "mouse", arg_name = "PATH")]
/// path to a socket from where to read mouse input events and write status updates to
pub virtio_mice: Vec<PathBuf>,
#[argh(option, long = "multi-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read multi touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_multi_touch: Vec<TouchDeviceOption>,
#[argh(option, long = "single-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read single touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_single_touch: Vec<TouchDeviceOption>,
#[cfg(feature = "audio")]
#[argh(
option,
arg_name = "[capture=true,backend=BACKEND,num_output_devices=1,
num_input_devices=1,num_output_streams=1,num_input_streams=1]",
long = "virtio-snd"
)]
/// comma separated key=value pairs for setting up virtio snd
/// devices.
/// Possible key values:
/// capture=(false,true) - Disable/enable audio capture.
/// Default is false.
/// backend=(null,[cras]) - Which backend to use for
/// virtio-snd.
/// client_type=(crosvm,arcvm,borealis) - Set specific
/// client type for cras backend. Default is crosvm.
/// socket_type=(legacy,unified) Set specific socket type
/// for cras backend. Default is unified.
/// num_output_devices=INT - Set number of output PCM
/// devices.
/// num_input_devices=INT - Set number of input PCM devices.
/// num_output_streams=INT - Set number of output PCM
/// streams per device.
/// num_input_streams=INT - Set number of input PCM streams
/// per device.
pub virtio_snds: Vec<SndParameters>,
#[argh(option, long = "switches", arg_name = "PATH")]
/// path to a socket from where to read switch input events and write status updates to
pub virtio_switches: Vec<PathBuf>,
#[argh(option, long = "trackpad", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read trackpad input events and write status updates to, optionally followed by screen width and height (defaults to 800x1280)
pub virtio_trackpad: Vec<TouchDeviceOption>,
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
#[argh(switch)]
/// enable the virtio-tpm connection to vtpm daemon
pub vtpm_proxy: bool,
#[argh(
option,
arg_name = "SOCKET_PATH[,addr=DOMAIN:BUS:DEVICE.FUNCTION,uuid=UUID]"
)]
/// socket path for the Virtio Vhost User proxy device.
/// Parameters
/// addr=BUS:DEVICE.FUNCTION - PCI address that the proxy
/// device will be allocated
/// (default: automatically allocated)
/// uuid=UUID - UUID which will be stored in VVU PCI config
/// space that is readable from guest userspace
pub vvu_proxy: Vec<VvuOption>,
#[cfg(unix)]
#[argh(
option,
long = "wayland-sock",
arg_name = "PATH[,name=NAME]",
from_str_fn(parse_wayland_sock)
)]
/// path to the Wayland socket to use. The unnamed one is used for displaying virtual screens. Named ones are only for IPC
pub wayland_socket_paths: Vec<(String, PathBuf)>,
#[argh(option, arg_name = "DISPLAY")]
/// X11 display name to use
pub x_display: Option<String>,
}
impl TryFrom<RunCommand> for super::config::Config {
type Error = String;
fn try_from(cmd: RunCommand) -> Result<Self, Self::Error> {
let mut cfg = Self::default();
// TODO: we need to factor out some(?) of the checks into config::validate_config
// Process arguments
if let Some(p) = cmd.executable_path {
cfg.executable_path = Some(Executable::Kernel(p));
}
#[cfg(unix)]
if let Some(p) = cmd.kvm_device_path {
cfg.kvm_device_path = p;
}
#[cfg(unix)]
if let Some(p) = cmd.vhost_net_device_path {
if !p.exists() {
return Err(format!("vhost-net-device path {:?} does not exist", p));
}
cfg.vhost_net_device_path = p;
}
if let Some(p) = cmd.android_fstab {
if !p.exists() {
return Err(format!("android-fstab path {:?} does not exist", p));
}
cfg.android_fstab = Some(p);
}
cfg.params.extend(cmd.params);
cfg.per_vm_core_scheduling = cmd.per_vm_core_scheduling;
cfg.vcpu_count = cmd.vcpu_count;
cfg.vcpu_affinity = cmd.vcpu_affinity;
cfg.cpu_clusters = cmd.cpu_clusters;
if let Some(capacity) = cmd.cpu_capacity {
cfg.cpu_capacity = capacity;
}
cfg.vcpu_cgroup_path = cmd.vcpu_cgroup_path;
cfg.no_smt = cmd.no_smt;
if let Some(rt_cpus) = cmd.rt_cpus {
cfg.rt_cpus = rt_cpus;
}
cfg.delay_rt = cmd.delay_rt;
cfg.memory = cmd.memory;
#[cfg(target_arch = "aarch64")]
{
cfg.swiotlb = cmd.swiotlb;
}
cfg.hugepages = cmd.hugepages;
cfg.hypervisor = cmd.hypervisor;
#[cfg(unix)]
{
cfg.lock_guest_memory = cmd.lock_guest_memory;
}
#[cfg(feature = "audio")]
{
cfg.ac97_parameters = cmd.ac97;
cfg.sound = cmd.sound;
}
cfg.vhost_user_snd = cmd.vhost_user_snd;
for serial_params in cmd.serial_parameters {
super::sys::config::check_serial_params(&serial_params)?;
let num = serial_params.num;
let key = (serial_params.hardware, num);
if cfg.serial_parameters.contains_key(&key) {
return Err(format!(
"serial hardware {} num {}",
serial_params.hardware, num,
));
}
if serial_params.console {
for params in cfg.serial_parameters.values() {
if params.console {
return Err(format!(
"{} device {} already set as console",
params.hardware, params.num,
));
}
}
}
if serial_params.earlycon {
// Only SerialHardware::Serial supports earlycon= currently.
match serial_params.hardware {
SerialHardware::Serial => {}
_ => {
return Err(super::config::invalid_value_err(
serial_params.hardware.to_string(),
String::from("earlycon not supported for hardware"),
));
}
}
for params in cfg.serial_parameters.values() {
if params.earlycon {
return Err(format!(
"{} device {} already set as earlycon",
params.hardware, params.num,
));
}
}
}
if serial_params.stdin {
if let Some(previous_stdin) = cfg.serial_parameters.values().find(|sp| sp.stdin) {
return Err(format!(
"{} device {} already connected to standard input",
previous_stdin.hardware, previous_stdin.num,
));
}
}
cfg.serial_parameters.insert(key, serial_params);
}
if cmd.root.is_some() && cmd.rwroot.is_some() {
return Err("Only one of [root,rwroot] has to be specified".to_string());
}
let root_disk = if let Some((read_only, (index, mut disk_option))) = cmd
.root
.map(|d| (true, d))
.or(cmd.rwroot.map(|d| (false, d)))
{
if index >= 26 {
return Err("ran out of letters for to assign to root disk".to_string());
}
disk_option.read_only = read_only;
cfg.params.push(format!(
"root=/dev/vd{} {}",
char::from(b'a' + index as u8),
if read_only { "ro" } else { "rw" }
));
Some((index, disk_option))
} else {
None
};
let mut disks = root_disk
.into_iter()
.chain(cmd.disks.into_iter().map(|(i, mut d)| {
d.read_only = true;
(i, d)
}))
.chain(cmd.rwdisks.into_iter().map(|(i, mut d)| {
d.read_only = false;
(i, d)
}))
.collect::<Vec<_>>();
disks.sort_by_key(|(i, _)| *i);
cfg.disks = disks.into_iter().map(|(_, d)| d).collect();
for (mut pmem, read_only) in cmd
.pmem_devices
.into_iter()
.map(|p| (p, true))
.chain(cmd.rw_pmem_devices.into_iter().map(|p| (p, false)))
{
pmem.read_only = read_only;
cfg.pmem_devices.push(pmem);
}
#[cfg(windows)]
{
#[cfg(feature = "crash-report")]
{
cfg.crash_pipe_name = cmd.crash_pipe_name;
}
cfg.product_name = cmd.product_name;
cfg.exit_stats = cmd.exit_stats;
cfg.host_guid = cmd.host_guid;
cfg.irq_chip = cmd.irq_chip;
cfg.kernel_log_file = cmd.kernel_log_file;
cfg.log_file = cmd.log_file;
cfg.logs_directory = cmd.logs_directory;
#[cfg(feature = "process-invariants")]
{
cfg.process_invariants_data_handle = cmd.process_invariants_data_handle;
cfg.process_invariants_data_size = cmd.process_invariants_data_size;
}
cfg.pvclock = cmd.pvclock;
#[cfg(feature = "kiwi")]
{
cfg.service_pipe_name = cmd.service_pipe_name;
}
#[cfg(feature = "slirp-ring-capture")]
{
cfg.slirp_capture_file = cmd.slirp_capture_file;
}
cfg.syslog_tag = cmd.syslog_tag;
cfg.product_channel = cmd.product_channel;
cfg.product_version = cmd.product_version;
}
cfg.pstore = cmd.pstore;
#[cfg(unix)]
for (name, params) in cmd.wayland_socket_paths {
if cfg.wayland_socket_paths.contains_key(&name) {
return Err(format!("wayland socket name already used: '{}'", name));
}
cfg.wayland_socket_paths.insert(name, params);
}
cfg.x_display = cmd.x_display;
cfg.display_window_keyboard = cmd.display_window_keyboard;
cfg.display_window_mouse = cmd.display_window_mouse;
if let Some(mut socket_path) = cmd.socket_path {
if socket_path.is_dir() {
socket_path.push(format!("crosvm-{}.sock", getpid()));
}
cfg.socket_path = Some(socket_path);
}
cfg.balloon_control = cmd.balloon_control;
cfg.cid = cmd.cid;
#[cfg(feature = "plugin")]
{
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
if let Some(p) = cmd.plugin {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Plugin(p));
}
cfg.plugin_root = cmd.plugin_root;
cfg.plugin_mounts = cmd.plugin_mounts;
if let Some(path) = cmd.plugin_mount_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-mount-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let mount = parse_plugin_mount_option(trimmed_line)?;
cfg.plugin_mounts.push(mount);
}
}
}
cfg.plugin_gid_maps = cmd.plugin_gid_maps;
if let Some(path) = cmd.plugin_gid_map_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-gid-map-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let map = trimmed_line.parse()?;
cfg.plugin_gid_maps.push(map);
}
}
}
}
cfg.vhost_net = cmd.vhost_net;
#[cfg(feature = "tpm")]
{
cfg.software_tpm = cmd.software_tpm;
}
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
{
cfg.vtpm_proxy = cmd.vtpm_proxy;
}
cfg.virtio_single_touch = cmd.virtio_single_touch;
cfg.virtio_multi_touch = cmd.virtio_multi_touch;
cfg.virtio_trackpad = cmd.virtio_trackpad;
cfg.virtio_mice = cmd.virtio_mice;
cfg.virtio_keyboard = cmd.virtio_keyboard;
cfg.virtio_switches = cmd.virtio_switches;
cfg.virtio_input_evdevs = cmd.virtio_input_evdevs;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.split_irqchip = cmd.split_irqchip;
}
cfg.initrd_path = cmd.initrd_path;
if cmd.disable_sandbox {
cfg.jail_config = None;
}
if let Some(p) = cmd.bios {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Bios(p));
}
cfg.pflash_parameters = cmd.pflash_parameters;
#[cfg(feature = "video-decoder")]
{
cfg.video_dec = cmd.video_dec;
}
#[cfg(feature = "video-encoder")]
{
cfg.video_enc = cmd.video_enc;
}
cfg.acpi_tables = cmd.acpi_tables;
cfg.usb = !cmd.no_usb;
cfg.rng = !cmd.no_rng;
cfg.balloon = !cmd.no_balloon;
#[cfg(feature = "audio")]
{
cfg.virtio_snds = cmd.virtio_snds;
}
#[cfg(feature = "audio_cras")]
{
// cmd.cras_snds is the old parameter for virtio snd with cras backend.
cfg.virtio_snds
.extend(cmd.cras_snds.into_iter().map(|s| SndParameters {
backend: devices::virtio::parameters::StreamSourceBackend::Sys(
devices::virtio::snd::sys::StreamSourceBackend::CRAS,
),
..s
}));
}
#[cfg(feature = "gpu")]
{
cfg.gpu_parameters = cmd.gpu_params;
}
#[cfg(unix)]
{
if cmd.vhost_vsock_device.is_some() && cmd.vhost_vsock_fd.is_some() {
return Err(
"Only one of vhost-vsock-device vhost-vsock-fd has to be specified".to_string(),
);
}
cfg.vhost_vsock_device = cmd.vhost_vsock_device;
if let Some(fd) = cmd.vhost_vsock_fd {
cfg.vhost_vsock_device = Some(PathBuf::from(format!("/proc/self/fd/{}", fd)));
}
cfg.shared_dirs = cmd.shared_dirs;
cfg.host_ip = cmd.host_ip;
cfg.netmask = cmd.netmask;
cfg.mac_address = cmd.mac_address;
cfg.tap_name = cmd.tap_name;
cfg.tap_fd = cmd.tap_fd;
cfg.coiommu_param = cmd.coiommu;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
{
cfg.gpu_render_server_parameters = cmd.gpu_render_server;
}
if let Some(d) = cmd.seccomp_policy_dir {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_policy_dir = Some(d);
}
if cmd.seccomp_log_failures {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_log_failures = true;
}
if let Some(p) = cmd.pivot_root {
cfg.jail_config
.get_or_insert_with(Default::default)
.pivot_root = p;
}
#[cfg(feature = "gpu")]
{
if !cmd.gpu_display.is_empty() {
cfg.gpu_parameters
.get_or_insert_with(Default::default)
.display_params
.extend(cmd.gpu_display);
}
}
cfg.net_vq_pairs = cmd.net_vq_pairs;
}
if cmd.protected_vm && cmd.protected_vm_without_firmware && cmd.pvm_fw.is_some() {
return Err("Only one protection mode has to be specified".to_string());
}
if cmd.protected_vm {
cfg.protected_vm = ProtectionType::Protected;
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
} else if cmd.protected_vm_without_firmware {
cfg.protected_vm = ProtectionType::ProtectedWithoutFirmware;
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
} else if let Some(p) = cmd.pvm_fw {
if !p.exists() || !p.is_file() {
return Err(
"unprotected-vm-with-firmware path should be an existing file".to_string(),
);
}
cfg.protected_vm = ProtectionType::Unprotected;
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
cfg.pvm_fw = Some(p);
}
cfg.battery_type = cmd.battery;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
{
cfg.gdb = cmd.gdb;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
cfg.host_cpu_topology = cmd.host_cpu_topology;
cfg.force_s2idle = cmd.s2idle;
cfg.pcie_ecam = cmd.pcie_ecam;
cfg.pci_low_start = cmd.pci_low_start;
cfg.no_i8042 = cmd.no_i8042;
cfg.no_rtc = cmd.no_rtc;
for (index, msr_config) in cmd.userspace_msr {
if cfg.userspace_msr.insert(index, msr_config).is_some() {
return Err(String::from("msr must be unique"));
}
}
}
// cfg.balloon_bias is in bytes.
if let Some(b) = cmd.balloon_bias {
cfg.balloon_bias = b * 1024 * 1024;
}
cfg.vhost_user_blk = cmd.vhost_user_blk;
cfg.vhost_user_console = cmd.vhost_user_console;
cfg.vhost_user_gpu = cmd.vhost_user_gpu;
cfg.vhost_user_mac80211_hwsim = cmd.vhost_user_mac80211_hwsim;
cfg.vhost_user_net = cmd.vhost_user_net;
cfg.vhost_user_vsock = cmd.vhost_user_vsock;
cfg.vhost_user_wl = cmd.vhost_user_wl;
#[cfg(feature = "direct")]
{
cfg.direct_pmio = cmd.direct_pmio;
cfg.direct_mmio = cmd.direct_mmio;
cfg.direct_level_irq = cmd.direct_level_irq;
cfg.direct_edge_irq = cmd.direct_edge_irq;
cfg.direct_gpe = cmd.direct_gpe;
cfg.pcie_rp = cmd.pcie_rp;
cfg.mmio_address_ranges = cmd.mmio_address_ranges.unwrap_or_default();
}
cfg.disable_virtio_intx = cmd.disable_virtio_intx;
cfg.dmi_path = cmd.dmi_path;
cfg.itmt = cmd.itmt;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if cmd.enable_pnp_data && cmd.force_calibrated_tsc_leaf {
return Err(
"Only one of [enable_pnp_data,force_calibrated_tsc_leaf] can be specified"
.to_string(),
);
}
cfg.enable_pnp_data = cmd.enable_pnp_data;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.force_calibrated_tsc_leaf = cmd.force_calibrated_tsc_leaf;
}
cfg.privileged_vm = cmd.privileged_vm;
cfg.stub_pci_devices = cmd.stub_pci_devices;
cfg.vvu_proxy = cmd.vvu_proxy;
cfg.file_backed_mappings = cmd.file_backed_mappings;
cfg.init_memory = cmd.init_memory;
cfg.strict_balloon = cmd.strict_balloon;
#[cfg(target_os = "android")]
{
cfg.task_profiles = cmd.task_profiles;
}
#[cfg(unix)]
{
cfg.vfio.extend(cmd.vfio);
cfg.vfio.extend(cmd.vfio_platform);
}
// Now do validation of constructed config
super::config::validate_config(&mut cfg)?;
Ok(cfg)
}
}
crosvm: Fix UnprotectedWithFirmware
Currently, --unprotected-vm-with-firmware does not load the provided
firmware and therefore doesn't execute it either. Instead, it jumps
directly to the provided payload. Fix the config assignment causing this
behavior.
BUG=b:240704906
Change-Id: Iaedaa372d668b2fab70a80252e30f608b48c2099
Signed-off-by: Pierre-Clément Tosi <2823ff726385ec3ac1cbf6edb75016ef1c0f9dc0@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3794252
Reviewed-by: Dmitry Torokhov <10a8c465cefc9bdd6c925e26964d23c90f1141cc@chromium.org>
Reviewed-by: Keiichi Watanabe <815e4e8f7c92a23424d7c3f51ead622bcedbc720@chromium.org>
Reviewed-by: Andrew Walbran <91a7c1b7f9f624e0ed1b099dc85ecb6025941ee9@google.com>
// Copyright 2022 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
cfg_if::cfg_if! {
if #[cfg(unix)] {
use std::net;
use base::RawDescriptor;
#[cfg(feature = "gpu")]
use devices::virtio::GpuDisplayParameters;
use devices::virtio::vhost::user::device::parse_wayland_sock;
#[cfg(feature = "gpu")]
use super::sys::config::parse_gpu_display_options;
use super::sys::config::{
parse_coiommu_params, VfioCommand, parse_vfio, parse_vfio_platform,
};
use super::config::SharedDir;
} else if #[cfg(windows)] {
use crate::crosvm::sys::config::IrqChipKind;
}
}
use std::collections::BTreeMap;
use std::path::PathBuf;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use arch::MsrConfig;
use arch::Pstore;
use arch::VcpuAffinity;
use argh::FromArgs;
use base::getpid;
use devices::virtio::block::block::DiskOption;
#[cfg(feature = "audio")]
use devices::virtio::snd::parameters::Parameters as SndParameters;
use devices::virtio::vhost::user::device;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
use devices::virtio::VideoBackendType;
#[cfg(feature = "audio")]
use devices::Ac97Parameters;
use devices::PflashParameters;
use devices::SerialHardware;
use devices::SerialParameters;
use devices::StubPciParameters;
use hypervisor::ProtectionType;
use resources::AddressRange;
use vm_control::BatteryType;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
use super::config::parse_video_options;
#[cfg(feature = "gpu")]
use super::sys::config::parse_gpu_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::config::parse_gpu_render_server_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::GpuRenderServerParameters;
use crate::crosvm::config::numbered_disk_option;
#[cfg(feature = "audio")]
use crate::crosvm::config::parse_ac97_options;
use crate::crosvm::config::parse_battery_options;
use crate::crosvm::config::parse_bus_id_addr;
use crate::crosvm::config::parse_cpu_affinity;
use crate::crosvm::config::parse_cpu_capacity;
use crate::crosvm::config::parse_cpu_set;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_direct_io_options;
use crate::crosvm::config::parse_file_backed_mapping;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_memory_region;
use crate::crosvm::config::parse_mmio_address_range;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_pcie_root_port_params;
use crate::crosvm::config::parse_pflash_parameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::parse_plugin_mount_option;
use crate::crosvm::config::parse_pstore;
use crate::crosvm::config::parse_serial_options;
use crate::crosvm::config::parse_stub_pci_parameters;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_userspace_msr_options;
#[cfg(feature = "plugin")]
use crate::crosvm::config::BindMount;
#[cfg(feature = "direct")]
use crate::crosvm::config::DirectIoOption;
use crate::crosvm::config::Executable;
use crate::crosvm::config::FileBackedMappingParameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::GidMap;
#[cfg(feature = "direct")]
use crate::crosvm::config::HostPcieRootPortParameters;
use crate::crosvm::config::HypervisorKind;
use crate::crosvm::config::TouchDeviceOption;
use crate::crosvm::config::VhostUserFsOption;
use crate::crosvm::config::VhostUserOption;
use crate::crosvm::config::VhostUserWlOption;
use crate::crosvm::config::VvuOption;
#[derive(FromArgs)]
/// crosvm
pub struct CrosvmCmdlineArgs {
#[argh(switch)]
/// use extended exit status
pub extended_status: bool,
#[argh(option, default = r#"String::from("info")"#)]
/// specify log level, eg "off", "error", "debug,disk=off", etc
pub log_level: String,
#[argh(switch)]
/// disable output to syslog
pub no_syslog: bool,
#[argh(subcommand)]
pub command: Command,
}
#[allow(clippy::large_enum_variant)]
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum CrossPlatformCommands {
#[cfg(feature = "balloon")]
Balloon(BalloonCommand),
#[cfg(feature = "balloon")]
BalloonStats(BalloonStatsCommand),
Battery(BatteryCommand),
#[cfg(feature = "composite-disk")]
CreateComposite(CreateCompositeCommand),
CreateQcow2(CreateQcow2Command),
Device(DeviceCommand),
Disk(DiskCommand),
MakeRT(MakeRTCommand),
Resume(ResumeCommand),
Run(RunCommand),
Stop(StopCommand),
Suspend(SuspendCommand),
Powerbtn(PowerbtnCommand),
Sleepbtn(SleepCommand),
Gpe(GpeCommand),
Usb(UsbCommand),
Version(VersionCommand),
Vfio(VfioCrosvmCommand),
}
#[allow(clippy::large_enum_variant)]
#[derive(argh_helpers::FlattenSubcommand)]
pub enum Command {
CrossPlatform(CrossPlatformCommands),
Sys(super::sys::cmdline::Commands),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "balloon")]
/// Set balloon size of the crosvm instance to `SIZE` bytes
pub struct BalloonCommand {
#[argh(positional, arg_name = "SIZE")]
/// amount of bytes
pub num_bytes: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(argh::FromArgs)]
#[argh(subcommand, name = "balloon_stats")]
/// Prints virtio balloon statistics for a `VM_SOCKET`
pub struct BalloonStatsCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "battery")]
/// Modify battery
pub struct BatteryCommand {
#[argh(positional, arg_name = "BATTERY_TYPE")]
/// battery type
pub battery_type: String,
#[argh(positional)]
/// battery property
/// status | present | health | capacity | aconline
pub property: String,
#[argh(positional)]
/// battery property target
/// STATUS | PRESENT | HEALTH | CAPACITY | ACONLINE
pub target: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[cfg(feature = "composite-disk")]
#[derive(FromArgs)]
#[argh(subcommand, name = "create_composite")]
/// Create a new composite disk image file
pub struct CreateCompositeCommand {
#[argh(positional, arg_name = "PATH")]
/// image path
pub path: String,
#[argh(positional, arg_name = "LABEL:PARTITION")]
/// partitions
pub partitions: Vec<String>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "create_qcow2")]
/// Create Qcow2 image given path and size
pub struct CreateQcow2Command {
#[argh(positional, arg_name = "PATH")]
/// path to the new qcow2 file to create
pub file_path: String,
#[argh(positional, arg_name = "SIZE")]
/// desired size of the image in bytes; required if not using --backing-file
pub size: Option<u64>,
#[argh(option)]
/// path to backing file; if specified, the image will be the same size as the backing file, and
/// SIZE may not be specified
pub backing_file: Option<String>,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum DiskSubcommand {
Resize(ResizeDiskSubcommand),
}
#[derive(FromArgs)]
/// resize disk
#[argh(subcommand, name = "resize")]
pub struct ResizeDiskSubcommand {
#[argh(positional, arg_name = "DISK_INDEX")]
/// disk index
pub disk_index: usize,
#[argh(positional, arg_name = "NEW_SIZE")]
/// new disk size
pub disk_size: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "disk")]
/// Manage attached virtual disk devices
pub struct DiskCommand {
#[argh(subcommand)]
pub command: DiskSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "make_rt")]
/// Enables real-time vcpu priority for crosvm instances started with `--delay-rt`
pub struct MakeRTCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "resume")]
/// Resumes the crosvm instance
pub struct ResumeCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "stop")]
/// Stops crosvm instances via their control sockets
pub struct StopCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "suspend")]
/// Suspends the crosvm instance
pub struct SuspendCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "powerbtn")]
/// Triggers a power button event in the crosvm instance
pub struct PowerbtnCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "sleepbtn")]
/// Triggers a sleep button event in the crosvm instance
pub struct SleepCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "gpe")]
/// Injects a general-purpose event into the crosvm instance
pub struct GpeCommand {
#[argh(positional)]
/// GPE #
pub gpe: u32,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "usb")]
/// Manage attached virtual USB devices.
pub struct UsbCommand {
#[argh(subcommand)]
pub command: UsbSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "version")]
/// Show package version.
pub struct VersionCommand {}
#[derive(FromArgs)]
#[argh(subcommand, name = "add")]
/// ADD
pub struct VfioAddSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "remove")]
/// REMOVE
pub struct VfioRemoveSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum VfioSubCommand {
Add(VfioAddSubCommand),
Remove(VfioRemoveSubCommand),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "vfio")]
/// add/remove host vfio pci device into guest
pub struct VfioCrosvmCommand {
#[argh(subcommand)]
pub command: VfioSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "device")]
/// Start a device process
pub struct DeviceCommand {
#[argh(subcommand)]
pub command: DeviceSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand)]
/// Cross-platform Devices
pub enum CrossPlatformDevicesCommands {
Block(device::BlockOptions),
Net(device::NetOptions),
}
#[derive(argh_helpers::FlattenSubcommand)]
pub enum DeviceSubcommand {
CrossPlatform(CrossPlatformDevicesCommands),
Sys(super::sys::cmdline::DevicesSubcommand),
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum UsbSubCommand {
Attach(UsbAttachCommand),
Detach(UsbDetachCommand),
List(UsbListCommand),
}
#[derive(FromArgs)]
/// Attach usb device
#[argh(subcommand, name = "attach")]
pub struct UsbAttachCommand {
#[argh(
positional,
arg_name = "BUS_ID:ADDR:BUS_NUM:DEV_NUM",
from_str_fn(parse_bus_id_addr)
)]
pub addr: (u8, u8, u16, u16),
#[argh(positional)]
/// usb device path
pub dev_path: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "detach")]
pub struct UsbDetachCommand {
#[argh(positional, arg_name = "PORT")]
/// usb port
pub port: u8,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "list")]
pub struct UsbListCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
/// Start a new crosvm instance
#[remain::sorted]
#[argh_helpers::pad_description_for_argh]
#[derive(FromArgs)]
#[argh(subcommand, name = "run")]
pub struct RunCommand {
#[cfg(feature = "audio")]
#[argh(
option,
from_str_fn(parse_ac97_options),
arg_name = "[backend=BACKEND,capture=true,capture_effect=EFFECT,client_type=TYPE,shm-fd=FD,client-fd=FD,server-fd=FD]"
)]
/// comma separated key=value pairs for setting up Ac97 devices.
/// Can be given more than once.
/// Possible key values:
/// backend=(null, cras, vios) - Where to route the audio
/// device. If not provided, backend will default to
/// null. `null` for /dev/null, cras for CRAS server
/// and vios for VioS server.
/// capture - Enable audio capture
/// capture_effects - | separated effects to be enabled for
/// recording. The only supported effect value now is
/// EchoCancellation or aec.
/// client_type - Set specific client type for cras backend.
/// socket_type - Set specific socket type for cras backend.
/// server - The to the VIOS server (unix socket)
pub ac97: Vec<Ac97Parameters>,
#[argh(option, long = "acpi-table", arg_name = "PATH")]
/// path to user provided ACPI table
pub acpi_tables: Vec<PathBuf>,
#[argh(option)]
/// path to Android fstab
pub android_fstab: Option<PathBuf>,
#[argh(option, arg_name = "N", long = "balloon-bias-mib")]
/// amount to bias balance of memory between host and guest as the balloon inflates, in mib.
pub balloon_bias: Option<i64>,
#[argh(option, arg_name = "PATH")]
/// path for balloon controller socket.
pub balloon_control: Option<PathBuf>,
#[argh(option, from_str_fn(parse_battery_options))]
/// comma separated key=value pairs for setting up battery
/// device
/// Possible key values:
/// type=goldfish - type of battery emulation, defaults to
/// goldfish
pub battery: Option<BatteryType>,
#[argh(option)]
/// path to BIOS/firmware ROM
pub bios: Option<PathBuf>,
#[argh(option, arg_name = "CID")]
/// context ID for virtual sockets.
pub cid: Option<u64>,
#[cfg(unix)]
#[argh(
option,
arg_name = "unpin_policy=POLICY,unpin_interval=NUM,unpin_limit=NUM,unpin_gen_threshold=NUM",
from_str_fn(parse_coiommu_params)
)]
/// comma separated key=value pairs for setting up coiommu
/// devices.
/// Possible key values:
/// unpin_policy=lru - LRU unpin policy.
/// unpin_interval=NUM - Unpin interval time in seconds.
/// unpin_limit=NUM - Unpin limit for each unpin cycle, in
/// unit of page count. 0 is invalid.
/// unpin_gen_threshold=NUM - Number of unpin intervals a
/// pinned page must be busy for to be aged into the
/// older which is less frequently checked generation.
pub coiommu: Option<devices::CoIommuParameters>,
#[argh(
option,
arg_name = "CPU=CAP[,CPU=CAP[,...]]",
from_str_fn(parse_cpu_capacity)
)]
/// set the relative capacity of the given CPU (default: no capacity)
pub cpu_capacity: Option<BTreeMap<usize, u32>>, // CPU index -> capacity
#[argh(
option,
long = "cpu-cluster",
arg_name = "CPUSET",
from_str_fn(parse_cpu_set)
)]
/// group the given CPUs into a cluster (default: no clusters)
pub cpu_clusters: Vec<Vec<usize>>,
#[cfg(feature = "audio_cras")]
#[argh(
option,
arg_name = "[capture=true,client=crosvm,socket=unified,\
num_output_devices=1,num_input_devices=1,num_output_streams=1,num_input_streams=1]",
long = "cras-snd"
)]
/// comma separated key=value pairs for setting up virtio snd
/// devices.
/// Possible key values:
/// capture=(false,true) - Disable/enable audio capture.
/// Default is false.
/// client_type=(crosvm,arcvm,borealis) - Set specific
/// client type for cras backend. Default is crosvm.
/// socket_type=(legacy,unified) Set specific socket type
/// for cras backend. Default is unified.
/// num_output_devices=INT - Set number of output PCM
/// devices.
/// num_input_devices=INT - Set number of input PCM devices.
/// num_output_streams=INT - Set number of output PCM
/// streams per device.
/// num_input_streams=INT - Set number of input PCM streams
/// per device.
pub cras_snds: Vec<SndParameters>,
#[cfg(feature = "crash-report")]
#[argh(option, long = "crash-pipe-name", arg_name = "\\\\.\\pipe\\PIPE_NAME")]
/// the crash handler ipc pipe name.
pub crash_pipe_name: Option<String>,
#[argh(switch)]
/// don't set VCPUs real-time until make-rt command is run
pub delay_rt: bool,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_edge_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "gpe")]
/// enable GPE interrupt and register access passthrough
pub direct_gpe: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_level_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct memory mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_mmio: Option<DirectIoOption>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct port mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_pmio: Option<DirectIoOption>,
#[argh(switch)]
/// run all devices in one, non-sandboxed process
pub disable_sandbox: bool,
#[argh(switch)]
/// disable INTx in virtio devices
pub disable_virtio_intx: bool,
#[argh(
option,
short = 'd',
long = "disk",
arg_name = "PATH[,key=value[,key=value[,...]]",
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache"
pub disks: Vec<(usize, DiskOption)>,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_keyboard: bool,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_mouse: bool,
#[argh(option, long = "dmi", arg_name = "DIR")]
/// directory with smbios_entry_point/DMI files
pub dmi_path: Option<PathBuf>,
#[argh(switch)]
/// expose Power and Perfomance (PnP) data to guest and guest can show these PnP data
pub enable_pnp_data: bool,
#[argh(positional, arg_name = "KERNEL")]
/// bzImage of kernel to run
pub executable_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(switch, long = "exit-stats")]
/// gather and display statistics on Vm Exits and Bus Reads/Writes.
pub exit_stats: bool,
#[argh(
option,
long = "file-backed-mapping",
arg_name = "addr=NUM,size=SIZE,path=PATH[,offset=NUM][,ro][,rw][,sync]",
from_str_fn(parse_file_backed_mapping)
)]
/// map the given file into guest memory at the specified
/// address.
/// Parameters (addr, size, path are required):
/// addr=NUM - guest physical address to map at
/// size=NUM - amount of memory to map
/// path=PATH - path to backing file/device to map
/// offset=NUM - offset in backing file (default 0)
/// ro - make the mapping readonly (default)
/// rw - make the mapping writable
/// sync - open backing file with O_SYNC
/// align - whether to adjust addr and size to page
/// boundaries implicitly
pub file_backed_mappings: Vec<FileBackedMappingParameters>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// force use of a calibrated TSC cpuid leaf (0x15) even if the hypervisor
/// doesn't require one.
pub force_calibrated_tsc_leaf: bool,
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
#[argh(option, arg_name = "PORT")]
/// (EXPERIMENTAL) gdb on the given port
pub gdb: Option<u32>,
#[cfg(feature = "gpu")]
#[argh(
option,
arg_name = "[width=INT,height=INT]",
from_str_fn(parse_gpu_display_options)
)]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a display on the virtio-gpu device
/// Possible key values:
/// width=INT - The width of the virtual display connected
/// to the virtio-gpu.
/// height=INT - The height of the virtual display
/// connected to the virtio-gpu
#[cfg(unix)]
pub gpu_display: Vec<GpuDisplayParameters>,
#[cfg(feature = "gpu")]
#[argh(option, long = "gpu", from_str_fn(parse_gpu_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a virtio-gpu device
/// Possible key values:
/// backend=(2d|virglrenderer|gfxstream) - Which backend to
/// use for virtio-gpu (determining rendering protocol)
/// context-types=LIST - The list of supported context
/// types, separated by ':' (default: no contexts enabled)
/// width=INT - The width of the virtual display connected
/// to the virtio-gpu.
/// height=INT - The height of the virtual display
/// connected to the virtio-gpu.
/// egl[=true|=false] - If the backend should use a EGL
/// context for rendering.
/// glx[=true|=false] - If the backend should use a GLX
/// context for rendering.
/// surfaceless[=true|=false] - If the backend should use a
/// surfaceless context for rendering.
/// angle[=true|=false] - If the gfxstream backend should
/// use ANGLE (OpenGL on Vulkan) as its native OpenGL
/// driver.
/// vulkan[=true|=false] - If the backend should support
/// vulkan
/// wsi=vk - If the gfxstream backend should use the Vulkan
/// swapchain to draw on a window
/// cache-path=PATH - The path to the virtio-gpu device
/// shader cache.
/// cache-size=SIZE - The maximum size of the shader cache.
/// pci-bar-size=SIZE - The size for the PCI BAR in bytes
/// (default 8gb).
pub gpu_params: Option<devices::virtio::GpuParameters>,
#[cfg(all(unix, feature = "gpu", feature = "virgl_renderer_next"))]
#[argh(option, from_str_fn(parse_gpu_render_server_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a render server for the virtio-gpu device
/// Possible key values:
/// path=PATH - The path to the render server executable.
/// cache-path=PATH - The path to the render server shader
/// cache.
/// cache-size=SIZE - The maximum size of the shader cache
pub gpu_render_server: Option<GpuRenderServerParameters>,
#[argh(switch)]
/// use mirror cpu topology of Host for Guest VM, also copy some cpu feature to Guest VM
pub host_cpu_topology: bool,
#[cfg(windows)]
#[argh(option, long = "host-guid", arg_name = "PATH")]
/// string representation of the host guid in registry format, for namespacing vsock connections.
pub host_guid: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "IP")]
/// IP address to assign to host tap interface
pub host_ip: Option<net::Ipv4Addr>,
#[argh(switch)]
/// advise the kernel to use Huge Pages for guest memory mappings
pub hugepages: bool,
/// hypervisor backend
#[argh(option)]
pub hypervisor: Option<HypervisorKind>,
#[argh(option, long = "init-mem", arg_name = "N")]
/// amount of guest memory outside the balloon at boot in MiB. (default: --mem)
pub init_memory: Option<u64>,
#[argh(option, short = 'i', long = "initrd", arg_name = "PATH")]
/// initial ramdisk to load
pub initrd_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(option, long = "irqchip", arg_name = "kernel|split|userspace")]
/// type of interrupt controller emulation. \"split\" is only available for x86 KVM.
pub irq_chip: Option<IrqChipKind>,
#[argh(switch)]
/// allow to enable ITMT scheduling feature in VM. The success of enabling depends on HWP and ACPI CPPC support on hardware
pub itmt: bool,
#[cfg(windows)]
#[argh(option, long = "kernel-log-file", arg_name = "PATH")]
/// forward hypervisor kernel driver logs for this VM to a file.
pub kernel_log_file: Option<String>,
#[cfg(unix)]
#[argh(option, long = "kvm-device", arg_name = "PATH")]
/// path to the KVM device. (default /dev/kvm)
pub kvm_device_path: Option<PathBuf>,
#[cfg(unix)]
#[argh(switch)]
/// disable host swap on guest VM pages.
pub lock_guest_memory: bool,
#[cfg(windows)]
#[argh(option, long = "log-file", arg_name = "PATH")]
/// redirect logs to the supplied log file at PATH rather than stderr. For multi-process mode, use --logs-directory instead
pub log_file: Option<String>,
#[cfg(windows)]
#[argh(option, long = "logs-directory", arg_name = "PATH")]
/// path to the logs directory used for crosvm processes. Logs will be sent to stderr if unset, and stderr/stdout will be uncaptured
pub logs_directory: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "MAC", long = "mac")]
/// MAC address for VM
pub mac_address: Option<net_util::MacAddress>,
#[argh(option, long = "mem", short = 'm', arg_name = "N")]
/// amount of guest memory in MiB. (default: 256)
pub memory: Option<u64>,
#[argh(
option,
long = "mmio-address-range",
from_str_fn(parse_mmio_address_range)
)]
/// MMIO address ranges
pub mmio_address_ranges: Option<Vec<AddressRange>>,
#[cfg(unix)]
#[argh(option, arg_name = "N")]
/// virtio net virtual queue pairs. (default: 1)
pub net_vq_pairs: Option<u16>,
#[cfg(unix)]
#[argh(option, arg_name = "NETMASK")]
/// netmask for VM subnet
pub netmask: Option<net::Ipv4Addr>,
#[argh(switch)]
/// don't use virtio-balloon device in the guest
pub no_balloon: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy KBD devices emulation
pub no_i8042: bool,
#[argh(switch)]
/// don't create RNG device in the guest
pub no_rng: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy RTC devices emulation
pub no_rtc: bool,
#[argh(switch)]
/// don't use SMT in the guest
pub no_smt: bool,
#[argh(switch)]
/// don't use usb devices in the guest
pub no_usb: bool,
#[argh(option, short = 'p', arg_name = "PARAMS")]
/// extra kernel or plugin command line arguments. Can be given more than once
pub params: Vec<String>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(option, long = "pci-start", arg_name = "pci_low_mmio_start")]
/// the pci mmio start address below 4G
pub pci_low_start: Option<u64>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "mmio_base,mmio_length",
from_str_fn(parse_memory_region)
)]
/// region for PCIe Enhanced Configuration Access Mechanism
pub pcie_ecam: Option<AddressRange>,
#[cfg(feature = "direct")]
#[argh(
option,
long = "pcie-root-port",
arg_name = "PATH[,hp_gpe=NUM]",
from_str_fn(parse_pcie_root_port_params)
)]
/// path to sysfs of host pcie root port and host pcie root port hotplug gpe number
pub pcie_rp: Vec<HostPcieRootPortParameters>,
#[argh(switch)]
/// enable per-VM core scheduling intead of the default one (per-vCPU core scheduing) by
/// making all vCPU threads share same cookie for core scheduling.
/// This option is no-op on devices that have neither MDS nor L1TF vulnerability
pub per_vm_core_scheduling: bool,
#[argh(
option,
long = "pflash",
arg_name = "path=PATH,[block_size=SIZE]",
from_str_fn(parse_pflash_parameters)
)]
/// comma-seperated key-value pair for setting up the pflash device, which provides space to store UEFI variables.
/// block_size defaults to 4K.
/// [--pflash <path=PATH,[block_size=SIZE]>]
pub pflash_parameters: Option<PflashParameters>,
#[argh(option, arg_name = "PATH")]
/// path to empty directory to use for sandbox pivot root
pub pivot_root: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to plugin process to run under crosvm
pub plugin: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option)]
/// path to the file listing supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_map_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-gid-map", arg_name = "GID:GID:INT")]
/// supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_maps: Vec<GidMap>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// path to the file listing paths be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mount_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-mount", arg_name = "PATH:PATH:BOOL")]
/// path to be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mounts: Vec<BindMount>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to a directory that will become root filesystem for the plugin process.
pub plugin_root: Option<PathBuf>,
#[argh(option, long = "pmem-device", arg_name = "PATH")]
/// path to a disk image
pub pmem_devices: Vec<DiskOption>,
#[argh(switch)]
/// grant this Guest VM certain privileges to manage Host resources, such as power management
pub privileged_vm: bool,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-handle", arg_name = "PATH")]
/// shared read-only memory address for a serialized EmulatorProcessInvariants proto
pub process_invariants_data_handle: Option<u64>,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-size", arg_name = "PATH")]
/// size of the serialized EmulatorProcessInvariants proto pointed at by process-invariants-handle
pub process_invariants_data_size: Option<usize>,
#[cfg(windows)]
#[argh(option, long = "product-channel")]
/// product channel
pub product_channel: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-name")]
/// the product name for file paths.
pub product_name: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-version")]
/// product version
pub product_version: Option<String>,
#[argh(switch)]
/// prevent host access to guest memory
pub protected_vm: bool,
#[argh(switch)]
/// (EXPERIMENTAL) prevent host access to guest memory, but don't use protected VM firmware
protected_vm_without_firmware: bool,
#[argh(option, arg_name = "path=PATH,size=SIZE", from_str_fn(parse_pstore))]
/// path to pstore buffer backend file followed by size
/// [--pstore <path=PATH,size=SIZE>]
pub pstore: Option<Pstore>,
#[cfg(windows)]
#[argh(switch)]
/// enable virtio-pvclock.
pub pvclock: bool,
// Must be `Some` iff `protected_vm == ProtectionType::UnprotectedWithFirmware`.
#[argh(option, long = "unprotected-vm-with-firmware", arg_name = "PATH")]
/// (EXPERIMENTAL/FOR DEBUGGING) Use VM firmware, but allow host access to guest memory
pub pvm_fw: Option<PathBuf>,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]",
short = 'r',
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
root: Option<(usize, DiskOption)>,
#[argh(option, arg_name = "CPUSET", from_str_fn(parse_cpu_set))]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on. (e.g. 0,1-3,5) (default: none)
pub rt_cpus: Option<Vec<usize>>,
#[argh(option, long = "rw-pmem-device", arg_name = "PATH")]
/// path to a writable disk image
rw_pmem_devices: Vec<DiskOption>,
#[argh(
option,
long = "rwdisk",
arg_name = "PATH[,key=value[,key=value[,...]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwdisks: Vec<(usize, DiskOption)>,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write root disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwroot: Option<(usize, DiskOption)>,
#[argh(switch)]
/// set Low Power S0 Idle Capable Flag for guest Fixed ACPI
/// Description Table, additionally use enhanced crosvm suspend and resume
/// routines to perform full guest suspension/resumption
pub s2idle: bool,
#[cfg(unix)]
#[argh(switch)]
/// instead of seccomp filter failures being fatal, they will be logged instead
pub seccomp_log_failures: bool,
#[cfg(unix)]
#[argh(option, arg_name = "PATH")]
/// path to seccomp .policy files
pub seccomp_policy_dir: Option<PathBuf>,
#[argh(
option,
long = "serial",
arg_name = "type=TYPE,[hardware=HW,num=NUM,path=PATH,input=PATH,console,earlycon,stdin]",
from_str_fn(parse_serial_options)
)]
/// comma separated key=value pairs for setting up serial
/// devices. Can be given more than once.
/// Possible key values:
/// type=(stdout,syslog,sink,file) - Where to route the
/// serial device
/// hardware=(serial,virtio-console) - Which type of serial
/// hardware to emulate. Defaults to 8250 UART (serial).
/// num=(1,2,3,4) - Serial Device Number. If not provided,
/// num will default to 1.
/// path=PATH - The path to the file to write to when
/// type=file
/// input=PATH - The path to the file to read from when not
/// stdin
/// console - Use this serial device as the guest console.
/// Can only be given once. Will default to first
/// serial port if not provided.
/// earlycon - Use this serial device as the early console.
/// Can only be given once.
/// stdin - Direct standard input to this serial device.
/// Can only be given once. Will default to first serial
/// port if not provided.
pub serial_parameters: Vec<SerialParameters>,
#[cfg(feature = "kiwi")]
#[argh(option, long = "service-pipe-name", arg_name = "PIPE_NAME")]
/// the service ipc pipe name. (Prefix \\\\.\\pipe\\ not needed.
pub service_pipe_name: Option<String>,
#[cfg(unix)]
#[argh(
option,
long = "shared-dir",
arg_name = "PATH:TAG[:type=TYPE:writeback=BOOL:timeout=SECONDS:uidmap=UIDMAP:gidmap=GIDMAP:cache=CACHE:dax=BOOL,posix_acl=BOOL]"
)]
/// colon-separated options for configuring a directory to be
/// shared with the VM. The first field is the directory to be
/// shared and the second field is the tag that the VM can use
/// to identify the device. The remaining fields are key=value
/// pairs that may appear in any order.
/// Valid keys are:
/// type=(p9, fs) - Indicates whether the directory should
/// be shared via virtio-9p or virtio-fs (default: p9).
/// uidmap=UIDMAP - The uid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current euid> 1).
/// gidmap=GIDMAP - The gid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current egid> 1).
/// cache=(never, auto, always) - Indicates whether the VM
/// can cache the contents of the shared directory
/// (default: auto). When set to "auto" and the type
/// is "fs", the VM will use close-to-open consistency
/// for file contents.
/// timeout=SECONDS - How long the VM should consider file
/// attributes and directory entries to be valid
/// (default: 5). If the VM has exclusive access to the
/// directory, then this should be a large value. If
/// the directory can be modified by other processes,
/// then this should be 0.
/// writeback=BOOL - Enables writeback caching
/// (default: false). This is only safe to do when the
/// VM has exclusive access to the files in a directory.
/// Additionally, the server should have read
/// permission for all files as the VM may issue read
/// requests even for files that are opened write-only.
/// dax=BOOL - Enables DAX support. Enabling DAX can
/// improve performance for frequently accessed files
/// by mapping regions of the file directly into the
/// VM's memory. There is a cost of slightly increased
/// latency the first time the file is accessed. Since
/// the mapping is shared directly from the host kernel's
/// file cache, enabling DAX can improve performance even
/// when the guest cache policy is "Never". The default
/// value for this option is "false".
/// posix_acl=BOOL - Indicates whether the shared directory
/// supports POSIX ACLs. This should only be enabled
/// when the underlying file system supports POSIX ACLs.
/// The default value for this option is "true".
pub shared_dirs: Vec<SharedDir>,
#[cfg(feature = "slirp-ring-capture")]
#[argh(option, long = "slirp-capture-file", arg_name = "PATH")]
/// Redirects slirp network packets to the supplied log file rather than the current directory as `slirp_capture_packets.pcap`
pub slirp_capture_file: Option<String>,
#[argh(option, short = 's', long = "socket", arg_name = "PATH")]
/// path to put the control socket. If PATH is a directory, a name will be generated
pub socket_path: Option<PathBuf>,
#[cfg(feature = "tpm")]
#[argh(switch)]
/// enable a software emulated trusted platform module device
pub software_tpm: bool,
#[cfg(feature = "audio")]
#[argh(option, arg_name = "PATH")]
/// path to the VioS server socket for setting up virtio-snd devices
pub sound: Option<PathBuf>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// (EXPERIMENTAL) enable split-irqchip support
pub split_irqchip: bool,
#[argh(switch)]
/// don't allow guest to use pages from the balloon
pub strict_balloon: bool,
#[argh(
option,
long = "stub-pci-device",
arg_name = "DOMAIN:BUS:DEVICE.FUNCTION[,vendor=NUM][,device=NUM][,class=NUM][,subsystem_vendor=NUM][,subsystem_device=NUM][,revision=NUM]",
from_str_fn(parse_stub_pci_parameters)
)]
/// comma-separated key=value pairs for setting up a stub PCI
/// device that just enumerates. The first option in the list
/// must specify a PCI address to claim.
/// Optional further parameters
/// vendor=NUM - PCI vendor ID
/// device=NUM - PCI device ID
/// class=NUM - PCI class (including class code, subclass,
/// and programming interface)
/// subsystem_vendor=NUM - PCI subsystem vendor ID
/// subsystem_device=NUM - PCI subsystem device ID
/// revision=NUM - revision
pub stub_pci_devices: Vec<StubPciParameters>,
#[argh(option, arg_name = "N")]
/// (EXPERIMENTAL) Size of virtio swiotlb buffer in MiB (default: 64 if `--protected-vm` or `--protected-vm-without-firmware` is present)
pub swiotlb: Option<u64>,
#[argh(option, arg_name = "TAG")]
/// when logging to syslog, use the provided tag
pub syslog_tag: Option<String>,
#[cfg(unix)]
#[argh(option)]
/// file descriptor for configured tap device. A different virtual network card will be added each time this argument is given
pub tap_fd: Vec<RawDescriptor>,
#[cfg(unix)]
#[argh(option)]
/// name of a configured persistent TAP interface to use for networking. A different virtual network card will be added each time this argument is given
pub tap_name: Vec<String>,
#[cfg(target_os = "android")]
#[argh(option, arg_name = "NAME[,...]")]
/// comma-separated names of the task profiles to apply to all threads in crosvm including the vCPU threads
pub task_profiles: Vec<String>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "INDEX,type=TYPE,action=ACTION,[from=FROM],[filter=FILTER]",
from_str_fn(parse_userspace_msr_options)
)]
/// userspace MSR handling. Takes INDEX of the MSR and how they
/// are handled.
/// type=(r|w|rw|wr) - read/write permission control.
/// action=(pass|emu) - if the control of msr is effective
/// on host.
/// from=(cpu0) - source of msr value. if not set, the
/// source is running CPU.
/// filter=(yes|no) - if the msr is filtered in KVM.
pub userspace_msr: Vec<(u32, MsrConfig)>,
#[argh(
option,
long = "cpu-affinity",
arg_name = "CPUSET",
from_str_fn(parse_cpu_affinity)
)]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on (e.g. 0,1-3,5)
/// or colon-separated list of assignments of guest to host CPU assignments (e.g. 0=0:1=1:2=2) (default: no mask)
pub vcpu_affinity: Option<VcpuAffinity>,
#[argh(option, arg_name = "PATH")]
/// move all vCPU threads to this CGroup (default: nothing moves)
pub vcpu_cgroup_path: Option<PathBuf>,
#[argh(option, long = "cpus", short = 'c')]
/// number of VCPUs. (default: 1)
pub vcpu_count: Option<usize>,
#[cfg(unix)]
#[argh(
option,
arg_name = "PATH[,guest-address=auto|<BUS:DEVICE.FUNCTION>][,iommu=on|off]",
from_str_fn(parse_vfio)
)]
/// path to sysfs of PCI pass through or mdev device.
/// guest-address=auto|<BUS:DEVICE.FUNCTION> - PCI address
/// that the device will be assigned in the guest
/// (default: auto). When set to "auto", the device will
/// be assigned an address that mirrors its address in
/// the host.
/// iommu=on|off - indicates whether to enable virtio IOMMU
/// for this device
pub vfio: Vec<VfioCommand>,
#[cfg(unix)]
#[argh(option, arg_name = "PATH", from_str_fn(parse_vfio_platform))]
/// path to sysfs of platform pass through
pub vfio_platform: Vec<VfioCommand>,
#[argh(switch)]
/// use vhost for networking
pub vhost_net: bool,
#[cfg(unix)]
#[argh(option, long = "vhost-net-device", arg_name = "PATH")]
/// path to the vhost-net device. (default /dev/vhost-net)
pub vhost_net_device_path: Option<PathBuf>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user block
pub vhost_user_blk: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user console
pub vhost_user_console: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH:TAG")]
/// path to a socket path for vhost-user fs, and tag for the shared dir
pub vhost_user_fs: Vec<VhostUserFsOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// paths to a vhost-user socket for gpu
pub vhost_user_gpu: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user mac80211_hwsim
pub vhost_user_mac80211_hwsim: Option<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user net
pub vhost_user_net: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user snd
pub vhost_user_snd: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user vsock
pub vhost_user_vsock: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a vhost-user socket for wayland
pub vhost_user_wl: Option<VhostUserWlOption>,
#[cfg(unix)]
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user vsock
pub vhost_vsock_device: Option<PathBuf>,
#[cfg(unix)]
#[argh(option, arg_name = "FD")]
/// open FD to the vhost-vsock device, mutually exclusive with vhost-vsock-device
pub vhost_vsock_fd: Option<RawDescriptor>,
#[cfg(feature = "video-decoder")]
#[argh(
option,
long = "video-decoder",
arg_name = "[backend]",
from_str_fn(parse_video_options)
)]
/// (EXPERIMENTAL) enable virtio-video decoder device
/// Possible backend values: libvda, ffmpeg, vaapi
pub video_dec: Option<VideoBackendType>,
#[cfg(feature = "video-encoder")]
#[argh(
option,
long = "video-encoder",
arg_name = "[backend]",
from_str_fn(parse_video_options)
)]
/// (EXPERIMENTAL) enable virtio-video encoder device
/// Possible backend values: libvda
pub video_enc: Option<VideoBackendType>,
#[argh(option, long = "evdev", arg_name = "PATH")]
/// path to an event device node. The device will be grabbed (unusable from the host) and made available to the guest with the same configuration it shows on the host
pub virtio_input_evdevs: Vec<PathBuf>,
#[argh(option, long = "keyboard", arg_name = "PATH")]
/// path to a socket from where to read keyboard input events and write status updates to
pub virtio_keyboard: Vec<PathBuf>,
#[argh(option, long = "mouse", arg_name = "PATH")]
/// path to a socket from where to read mouse input events and write status updates to
pub virtio_mice: Vec<PathBuf>,
#[argh(option, long = "multi-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read multi touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_multi_touch: Vec<TouchDeviceOption>,
#[argh(option, long = "single-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read single touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_single_touch: Vec<TouchDeviceOption>,
#[cfg(feature = "audio")]
#[argh(
option,
arg_name = "[capture=true,backend=BACKEND,num_output_devices=1,
num_input_devices=1,num_output_streams=1,num_input_streams=1]",
long = "virtio-snd"
)]
/// comma separated key=value pairs for setting up virtio snd
/// devices.
/// Possible key values:
/// capture=(false,true) - Disable/enable audio capture.
/// Default is false.
/// backend=(null,[cras]) - Which backend to use for
/// virtio-snd.
/// client_type=(crosvm,arcvm,borealis) - Set specific
/// client type for cras backend. Default is crosvm.
/// socket_type=(legacy,unified) Set specific socket type
/// for cras backend. Default is unified.
/// num_output_devices=INT - Set number of output PCM
/// devices.
/// num_input_devices=INT - Set number of input PCM devices.
/// num_output_streams=INT - Set number of output PCM
/// streams per device.
/// num_input_streams=INT - Set number of input PCM streams
/// per device.
pub virtio_snds: Vec<SndParameters>,
#[argh(option, long = "switches", arg_name = "PATH")]
/// path to a socket from where to read switch input events and write status updates to
pub virtio_switches: Vec<PathBuf>,
#[argh(option, long = "trackpad", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read trackpad input events and write status updates to, optionally followed by screen width and height (defaults to 800x1280)
pub virtio_trackpad: Vec<TouchDeviceOption>,
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
#[argh(switch)]
/// enable the virtio-tpm connection to vtpm daemon
pub vtpm_proxy: bool,
#[argh(
option,
arg_name = "SOCKET_PATH[,addr=DOMAIN:BUS:DEVICE.FUNCTION,uuid=UUID]"
)]
/// socket path for the Virtio Vhost User proxy device.
/// Parameters
/// addr=BUS:DEVICE.FUNCTION - PCI address that the proxy
/// device will be allocated
/// (default: automatically allocated)
/// uuid=UUID - UUID which will be stored in VVU PCI config
/// space that is readable from guest userspace
pub vvu_proxy: Vec<VvuOption>,
#[cfg(unix)]
#[argh(
option,
long = "wayland-sock",
arg_name = "PATH[,name=NAME]",
from_str_fn(parse_wayland_sock)
)]
/// path to the Wayland socket to use. The unnamed one is used for displaying virtual screens. Named ones are only for IPC
pub wayland_socket_paths: Vec<(String, PathBuf)>,
#[argh(option, arg_name = "DISPLAY")]
/// X11 display name to use
pub x_display: Option<String>,
}
impl TryFrom<RunCommand> for super::config::Config {
type Error = String;
fn try_from(cmd: RunCommand) -> Result<Self, Self::Error> {
let mut cfg = Self::default();
// TODO: we need to factor out some(?) of the checks into config::validate_config
// Process arguments
if let Some(p) = cmd.executable_path {
cfg.executable_path = Some(Executable::Kernel(p));
}
#[cfg(unix)]
if let Some(p) = cmd.kvm_device_path {
cfg.kvm_device_path = p;
}
#[cfg(unix)]
if let Some(p) = cmd.vhost_net_device_path {
if !p.exists() {
return Err(format!("vhost-net-device path {:?} does not exist", p));
}
cfg.vhost_net_device_path = p;
}
if let Some(p) = cmd.android_fstab {
if !p.exists() {
return Err(format!("android-fstab path {:?} does not exist", p));
}
cfg.android_fstab = Some(p);
}
cfg.params.extend(cmd.params);
cfg.per_vm_core_scheduling = cmd.per_vm_core_scheduling;
cfg.vcpu_count = cmd.vcpu_count;
cfg.vcpu_affinity = cmd.vcpu_affinity;
cfg.cpu_clusters = cmd.cpu_clusters;
if let Some(capacity) = cmd.cpu_capacity {
cfg.cpu_capacity = capacity;
}
cfg.vcpu_cgroup_path = cmd.vcpu_cgroup_path;
cfg.no_smt = cmd.no_smt;
if let Some(rt_cpus) = cmd.rt_cpus {
cfg.rt_cpus = rt_cpus;
}
cfg.delay_rt = cmd.delay_rt;
cfg.memory = cmd.memory;
#[cfg(target_arch = "aarch64")]
{
cfg.swiotlb = cmd.swiotlb;
}
cfg.hugepages = cmd.hugepages;
cfg.hypervisor = cmd.hypervisor;
#[cfg(unix)]
{
cfg.lock_guest_memory = cmd.lock_guest_memory;
}
#[cfg(feature = "audio")]
{
cfg.ac97_parameters = cmd.ac97;
cfg.sound = cmd.sound;
}
cfg.vhost_user_snd = cmd.vhost_user_snd;
for serial_params in cmd.serial_parameters {
super::sys::config::check_serial_params(&serial_params)?;
let num = serial_params.num;
let key = (serial_params.hardware, num);
if cfg.serial_parameters.contains_key(&key) {
return Err(format!(
"serial hardware {} num {}",
serial_params.hardware, num,
));
}
if serial_params.console {
for params in cfg.serial_parameters.values() {
if params.console {
return Err(format!(
"{} device {} already set as console",
params.hardware, params.num,
));
}
}
}
if serial_params.earlycon {
// Only SerialHardware::Serial supports earlycon= currently.
match serial_params.hardware {
SerialHardware::Serial => {}
_ => {
return Err(super::config::invalid_value_err(
serial_params.hardware.to_string(),
String::from("earlycon not supported for hardware"),
));
}
}
for params in cfg.serial_parameters.values() {
if params.earlycon {
return Err(format!(
"{} device {} already set as earlycon",
params.hardware, params.num,
));
}
}
}
if serial_params.stdin {
if let Some(previous_stdin) = cfg.serial_parameters.values().find(|sp| sp.stdin) {
return Err(format!(
"{} device {} already connected to standard input",
previous_stdin.hardware, previous_stdin.num,
));
}
}
cfg.serial_parameters.insert(key, serial_params);
}
if cmd.root.is_some() && cmd.rwroot.is_some() {
return Err("Only one of [root,rwroot] has to be specified".to_string());
}
let root_disk = if let Some((read_only, (index, mut disk_option))) = cmd
.root
.map(|d| (true, d))
.or(cmd.rwroot.map(|d| (false, d)))
{
if index >= 26 {
return Err("ran out of letters for to assign to root disk".to_string());
}
disk_option.read_only = read_only;
cfg.params.push(format!(
"root=/dev/vd{} {}",
char::from(b'a' + index as u8),
if read_only { "ro" } else { "rw" }
));
Some((index, disk_option))
} else {
None
};
let mut disks = root_disk
.into_iter()
.chain(cmd.disks.into_iter().map(|(i, mut d)| {
d.read_only = true;
(i, d)
}))
.chain(cmd.rwdisks.into_iter().map(|(i, mut d)| {
d.read_only = false;
(i, d)
}))
.collect::<Vec<_>>();
disks.sort_by_key(|(i, _)| *i);
cfg.disks = disks.into_iter().map(|(_, d)| d).collect();
for (mut pmem, read_only) in cmd
.pmem_devices
.into_iter()
.map(|p| (p, true))
.chain(cmd.rw_pmem_devices.into_iter().map(|p| (p, false)))
{
pmem.read_only = read_only;
cfg.pmem_devices.push(pmem);
}
#[cfg(windows)]
{
#[cfg(feature = "crash-report")]
{
cfg.crash_pipe_name = cmd.crash_pipe_name;
}
cfg.product_name = cmd.product_name;
cfg.exit_stats = cmd.exit_stats;
cfg.host_guid = cmd.host_guid;
cfg.irq_chip = cmd.irq_chip;
cfg.kernel_log_file = cmd.kernel_log_file;
cfg.log_file = cmd.log_file;
cfg.logs_directory = cmd.logs_directory;
#[cfg(feature = "process-invariants")]
{
cfg.process_invariants_data_handle = cmd.process_invariants_data_handle;
cfg.process_invariants_data_size = cmd.process_invariants_data_size;
}
cfg.pvclock = cmd.pvclock;
#[cfg(feature = "kiwi")]
{
cfg.service_pipe_name = cmd.service_pipe_name;
}
#[cfg(feature = "slirp-ring-capture")]
{
cfg.slirp_capture_file = cmd.slirp_capture_file;
}
cfg.syslog_tag = cmd.syslog_tag;
cfg.product_channel = cmd.product_channel;
cfg.product_version = cmd.product_version;
}
cfg.pstore = cmd.pstore;
#[cfg(unix)]
for (name, params) in cmd.wayland_socket_paths {
if cfg.wayland_socket_paths.contains_key(&name) {
return Err(format!("wayland socket name already used: '{}'", name));
}
cfg.wayland_socket_paths.insert(name, params);
}
cfg.x_display = cmd.x_display;
cfg.display_window_keyboard = cmd.display_window_keyboard;
cfg.display_window_mouse = cmd.display_window_mouse;
if let Some(mut socket_path) = cmd.socket_path {
if socket_path.is_dir() {
socket_path.push(format!("crosvm-{}.sock", getpid()));
}
cfg.socket_path = Some(socket_path);
}
cfg.balloon_control = cmd.balloon_control;
cfg.cid = cmd.cid;
#[cfg(feature = "plugin")]
{
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
if let Some(p) = cmd.plugin {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Plugin(p));
}
cfg.plugin_root = cmd.plugin_root;
cfg.plugin_mounts = cmd.plugin_mounts;
if let Some(path) = cmd.plugin_mount_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-mount-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let mount = parse_plugin_mount_option(trimmed_line)?;
cfg.plugin_mounts.push(mount);
}
}
}
cfg.plugin_gid_maps = cmd.plugin_gid_maps;
if let Some(path) = cmd.plugin_gid_map_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-gid-map-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let map = trimmed_line.parse()?;
cfg.plugin_gid_maps.push(map);
}
}
}
}
cfg.vhost_net = cmd.vhost_net;
#[cfg(feature = "tpm")]
{
cfg.software_tpm = cmd.software_tpm;
}
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
{
cfg.vtpm_proxy = cmd.vtpm_proxy;
}
cfg.virtio_single_touch = cmd.virtio_single_touch;
cfg.virtio_multi_touch = cmd.virtio_multi_touch;
cfg.virtio_trackpad = cmd.virtio_trackpad;
cfg.virtio_mice = cmd.virtio_mice;
cfg.virtio_keyboard = cmd.virtio_keyboard;
cfg.virtio_switches = cmd.virtio_switches;
cfg.virtio_input_evdevs = cmd.virtio_input_evdevs;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.split_irqchip = cmd.split_irqchip;
}
cfg.initrd_path = cmd.initrd_path;
if cmd.disable_sandbox {
cfg.jail_config = None;
}
if let Some(p) = cmd.bios {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Bios(p));
}
cfg.pflash_parameters = cmd.pflash_parameters;
#[cfg(feature = "video-decoder")]
{
cfg.video_dec = cmd.video_dec;
}
#[cfg(feature = "video-encoder")]
{
cfg.video_enc = cmd.video_enc;
}
cfg.acpi_tables = cmd.acpi_tables;
cfg.usb = !cmd.no_usb;
cfg.rng = !cmd.no_rng;
cfg.balloon = !cmd.no_balloon;
#[cfg(feature = "audio")]
{
cfg.virtio_snds = cmd.virtio_snds;
}
#[cfg(feature = "audio_cras")]
{
// cmd.cras_snds is the old parameter for virtio snd with cras backend.
cfg.virtio_snds
.extend(cmd.cras_snds.into_iter().map(|s| SndParameters {
backend: devices::virtio::parameters::StreamSourceBackend::Sys(
devices::virtio::snd::sys::StreamSourceBackend::CRAS,
),
..s
}));
}
#[cfg(feature = "gpu")]
{
cfg.gpu_parameters = cmd.gpu_params;
}
#[cfg(unix)]
{
if cmd.vhost_vsock_device.is_some() && cmd.vhost_vsock_fd.is_some() {
return Err(
"Only one of vhost-vsock-device vhost-vsock-fd has to be specified".to_string(),
);
}
cfg.vhost_vsock_device = cmd.vhost_vsock_device;
if let Some(fd) = cmd.vhost_vsock_fd {
cfg.vhost_vsock_device = Some(PathBuf::from(format!("/proc/self/fd/{}", fd)));
}
cfg.shared_dirs = cmd.shared_dirs;
cfg.host_ip = cmd.host_ip;
cfg.netmask = cmd.netmask;
cfg.mac_address = cmd.mac_address;
cfg.tap_name = cmd.tap_name;
cfg.tap_fd = cmd.tap_fd;
cfg.coiommu_param = cmd.coiommu;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
{
cfg.gpu_render_server_parameters = cmd.gpu_render_server;
}
if let Some(d) = cmd.seccomp_policy_dir {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_policy_dir = Some(d);
}
if cmd.seccomp_log_failures {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_log_failures = true;
}
if let Some(p) = cmd.pivot_root {
cfg.jail_config
.get_or_insert_with(Default::default)
.pivot_root = p;
}
#[cfg(feature = "gpu")]
{
if !cmd.gpu_display.is_empty() {
cfg.gpu_parameters
.get_or_insert_with(Default::default)
.display_params
.extend(cmd.gpu_display);
}
}
cfg.net_vq_pairs = cmd.net_vq_pairs;
}
if cmd.protected_vm && cmd.protected_vm_without_firmware && cmd.pvm_fw.is_some() {
return Err("Only one protection mode has to be specified".to_string());
}
if cmd.protected_vm {
cfg.protected_vm = ProtectionType::Protected;
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
} else if cmd.protected_vm_without_firmware {
cfg.protected_vm = ProtectionType::ProtectedWithoutFirmware;
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
} else if let Some(p) = cmd.pvm_fw {
if !p.exists() || !p.is_file() {
return Err(
"unprotected-vm-with-firmware path should be an existing file".to_string(),
);
}
cfg.protected_vm = ProtectionType::UnprotectedWithFirmware;
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
cfg.pvm_fw = Some(p);
}
cfg.battery_type = cmd.battery;
#[cfg(all(target_arch = "x86_64", feature = "gdb"))]
{
cfg.gdb = cmd.gdb;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
cfg.host_cpu_topology = cmd.host_cpu_topology;
cfg.force_s2idle = cmd.s2idle;
cfg.pcie_ecam = cmd.pcie_ecam;
cfg.pci_low_start = cmd.pci_low_start;
cfg.no_i8042 = cmd.no_i8042;
cfg.no_rtc = cmd.no_rtc;
for (index, msr_config) in cmd.userspace_msr {
if cfg.userspace_msr.insert(index, msr_config).is_some() {
return Err(String::from("msr must be unique"));
}
}
}
// cfg.balloon_bias is in bytes.
if let Some(b) = cmd.balloon_bias {
cfg.balloon_bias = b * 1024 * 1024;
}
cfg.vhost_user_blk = cmd.vhost_user_blk;
cfg.vhost_user_console = cmd.vhost_user_console;
cfg.vhost_user_gpu = cmd.vhost_user_gpu;
cfg.vhost_user_mac80211_hwsim = cmd.vhost_user_mac80211_hwsim;
cfg.vhost_user_net = cmd.vhost_user_net;
cfg.vhost_user_vsock = cmd.vhost_user_vsock;
cfg.vhost_user_wl = cmd.vhost_user_wl;
#[cfg(feature = "direct")]
{
cfg.direct_pmio = cmd.direct_pmio;
cfg.direct_mmio = cmd.direct_mmio;
cfg.direct_level_irq = cmd.direct_level_irq;
cfg.direct_edge_irq = cmd.direct_edge_irq;
cfg.direct_gpe = cmd.direct_gpe;
cfg.pcie_rp = cmd.pcie_rp;
cfg.mmio_address_ranges = cmd.mmio_address_ranges.unwrap_or_default();
}
cfg.disable_virtio_intx = cmd.disable_virtio_intx;
cfg.dmi_path = cmd.dmi_path;
cfg.itmt = cmd.itmt;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if cmd.enable_pnp_data && cmd.force_calibrated_tsc_leaf {
return Err(
"Only one of [enable_pnp_data,force_calibrated_tsc_leaf] can be specified"
.to_string(),
);
}
cfg.enable_pnp_data = cmd.enable_pnp_data;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.force_calibrated_tsc_leaf = cmd.force_calibrated_tsc_leaf;
}
cfg.privileged_vm = cmd.privileged_vm;
cfg.stub_pci_devices = cmd.stub_pci_devices;
cfg.vvu_proxy = cmd.vvu_proxy;
cfg.file_backed_mappings = cmd.file_backed_mappings;
cfg.init_memory = cmd.init_memory;
cfg.strict_balloon = cmd.strict_balloon;
#[cfg(target_os = "android")]
{
cfg.task_profiles = cmd.task_profiles;
}
#[cfg(unix)]
{
cfg.vfio.extend(cmd.vfio);
cfg.vfio.extend(cmd.vfio_platform);
}
// Now do validation of constructed config
super::config::validate_config(&mut cfg)?;
Ok(cfg)
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
cfg_if::cfg_if! {
if #[cfg(unix)] {
use std::net;
use base::RawDescriptor;
#[cfg(feature = "gpu")]
use devices::virtio::GpuDisplayParameters;
use devices::virtio::vhost::user::device::parse_wayland_sock;
use super::sys::config::{
VfioCommand, parse_vfio, parse_vfio_platform,
};
use super::config::SharedDir;
} else if #[cfg(windows)] {
use crate::crosvm::sys::config::IrqChipKind;
}
}
use std::collections::BTreeMap;
use std::path::PathBuf;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use arch::MsrConfig;
use arch::Pstore;
use arch::VcpuAffinity;
use argh::FromArgs;
use base::getpid;
use devices::virtio::block::block::DiskOption;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
use devices::virtio::device_constants::video::VideoDeviceConfig;
#[cfg(feature = "audio")]
use devices::virtio::snd::parameters::Parameters as SndParameters;
use devices::virtio::vhost::user::device;
#[cfg(feature = "audio")]
use devices::Ac97Parameters;
use devices::PflashParameters;
use devices::SerialHardware;
use devices::SerialParameters;
use devices::StubPciParameters;
use hypervisor::ProtectionType;
use resources::AddressRange;
#[cfg(feature = "gpu")]
use super::sys::config::parse_gpu_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::config::parse_gpu_render_server_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::GpuRenderServerParameters;
use crate::crosvm::config::numbered_disk_option;
#[cfg(feature = "audio")]
use crate::crosvm::config::parse_ac97_options;
use crate::crosvm::config::parse_bus_id_addr;
use crate::crosvm::config::parse_cpu_affinity;
use crate::crosvm::config::parse_cpu_capacity;
use crate::crosvm::config::parse_cpu_set;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_direct_io_options;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_memory_region;
use crate::crosvm::config::parse_mmio_address_range;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_pcie_root_port_params;
use crate::crosvm::config::parse_pflash_parameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::parse_plugin_mount_option;
use crate::crosvm::config::parse_pstore;
use crate::crosvm::config::parse_serial_options;
use crate::crosvm::config::parse_stub_pci_parameters;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_userspace_msr_options;
use crate::crosvm::config::BatteryConfig;
#[cfg(feature = "plugin")]
use crate::crosvm::config::BindMount;
#[cfg(feature = "direct")]
use crate::crosvm::config::DirectIoOption;
use crate::crosvm::config::Executable;
use crate::crosvm::config::FileBackedMappingParameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::GidMap;
#[cfg(feature = "direct")]
use crate::crosvm::config::HostPcieRootPortParameters;
use crate::crosvm::config::HypervisorKind;
use crate::crosvm::config::TouchDeviceOption;
use crate::crosvm::config::VhostUserFsOption;
use crate::crosvm::config::VhostUserOption;
use crate::crosvm::config::VhostUserWlOption;
use crate::crosvm::config::VvuOption;
#[derive(FromArgs)]
/// crosvm
pub struct CrosvmCmdlineArgs {
#[argh(switch)]
/// use extended exit status
pub extended_status: bool,
#[argh(option, default = r#"String::from("info")"#)]
/// specify log level, eg "off", "error", "debug,disk=off", etc
pub log_level: String,
#[argh(option, arg_name = "TAG")]
/// when logging to syslog, use the provided tag
pub syslog_tag: Option<String>,
#[argh(switch)]
/// disable output to syslog
pub no_syslog: bool,
#[argh(subcommand)]
pub command: Command,
}
#[allow(clippy::large_enum_variant)]
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum CrossPlatformCommands {
#[cfg(feature = "balloon")]
Balloon(BalloonCommand),
#[cfg(feature = "balloon")]
BalloonStats(BalloonStatsCommand),
Battery(BatteryCommand),
#[cfg(feature = "composite-disk")]
CreateComposite(CreateCompositeCommand),
#[cfg(feature = "qcow")]
CreateQcow2(CreateQcow2Command),
Device(DeviceCommand),
Disk(DiskCommand),
MakeRT(MakeRTCommand),
Resume(ResumeCommand),
Run(RunCommand),
Stop(StopCommand),
Suspend(SuspendCommand),
Powerbtn(PowerbtnCommand),
Sleepbtn(SleepCommand),
Gpe(GpeCommand),
Usb(UsbCommand),
Version(VersionCommand),
Vfio(VfioCrosvmCommand),
}
#[allow(clippy::large_enum_variant)]
#[derive(argh_helpers::FlattenSubcommand)]
pub enum Command {
CrossPlatform(CrossPlatformCommands),
Sys(super::sys::cmdline::Commands),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "balloon")]
/// Set balloon size of the crosvm instance to `SIZE` bytes
pub struct BalloonCommand {
#[argh(positional, arg_name = "SIZE")]
/// amount of bytes
pub num_bytes: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(argh::FromArgs)]
#[argh(subcommand, name = "balloon_stats")]
/// Prints virtio balloon statistics for a `VM_SOCKET`
pub struct BalloonStatsCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "battery")]
/// Modify battery
pub struct BatteryCommand {
#[argh(positional, arg_name = "BATTERY_TYPE")]
/// battery type
pub battery_type: String,
#[argh(positional)]
/// battery property
/// status | present | health | capacity | aconline
pub property: String,
#[argh(positional)]
/// battery property target
/// STATUS | PRESENT | HEALTH | CAPACITY | ACONLINE
pub target: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[cfg(feature = "composite-disk")]
#[derive(FromArgs)]
#[argh(subcommand, name = "create_composite")]
/// Create a new composite disk image file
pub struct CreateCompositeCommand {
#[argh(positional, arg_name = "PATH")]
/// image path
pub path: String,
#[argh(positional, arg_name = "LABEL:PARTITION")]
/// partitions
pub partitions: Vec<String>,
}
#[cfg(feature = "qcow")]
#[derive(FromArgs)]
#[argh(subcommand, name = "create_qcow2")]
/// Create Qcow2 image given path and size
pub struct CreateQcow2Command {
#[argh(positional, arg_name = "PATH")]
/// path to the new qcow2 file to create
pub file_path: String,
#[argh(positional, arg_name = "SIZE")]
/// desired size of the image in bytes; required if not using --backing-file
pub size: Option<u64>,
#[argh(option)]
/// path to backing file; if specified, the image will be the same size as the backing file, and
/// SIZE may not be specified
pub backing_file: Option<String>,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum DiskSubcommand {
Resize(ResizeDiskSubcommand),
}
#[derive(FromArgs)]
/// resize disk
#[argh(subcommand, name = "resize")]
pub struct ResizeDiskSubcommand {
#[argh(positional, arg_name = "DISK_INDEX")]
/// disk index
pub disk_index: usize,
#[argh(positional, arg_name = "NEW_SIZE")]
/// new disk size
pub disk_size: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "disk")]
/// Manage attached virtual disk devices
pub struct DiskCommand {
#[argh(subcommand)]
pub command: DiskSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "make_rt")]
/// Enables real-time vcpu priority for crosvm instances started with `--delay-rt`
pub struct MakeRTCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "resume")]
/// Resumes the crosvm instance
pub struct ResumeCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "stop")]
/// Stops crosvm instances via their control sockets
pub struct StopCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "suspend")]
/// Suspends the crosvm instance
pub struct SuspendCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "powerbtn")]
/// Triggers a power button event in the crosvm instance
pub struct PowerbtnCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "sleepbtn")]
/// Triggers a sleep button event in the crosvm instance
pub struct SleepCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "gpe")]
/// Injects a general-purpose event into the crosvm instance
pub struct GpeCommand {
#[argh(positional)]
/// GPE #
pub gpe: u32,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "usb")]
/// Manage attached virtual USB devices.
pub struct UsbCommand {
#[argh(subcommand)]
pub command: UsbSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "version")]
/// Show package version.
pub struct VersionCommand {}
#[derive(FromArgs)]
#[argh(subcommand, name = "add")]
/// ADD
pub struct VfioAddSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "remove")]
/// REMOVE
pub struct VfioRemoveSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum VfioSubCommand {
Add(VfioAddSubCommand),
Remove(VfioRemoveSubCommand),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "vfio")]
/// add/remove host vfio pci device into guest
pub struct VfioCrosvmCommand {
#[argh(subcommand)]
pub command: VfioSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "device")]
/// Start a device process
pub struct DeviceCommand {
#[argh(subcommand)]
pub command: DeviceSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand)]
/// Cross-platform Devices
pub enum CrossPlatformDevicesCommands {
Block(device::BlockOptions),
#[cfg(unix)]
Net(device::NetOptions),
}
#[derive(argh_helpers::FlattenSubcommand)]
pub enum DeviceSubcommand {
CrossPlatform(CrossPlatformDevicesCommands),
Sys(super::sys::cmdline::DeviceSubcommand),
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum UsbSubCommand {
Attach(UsbAttachCommand),
Detach(UsbDetachCommand),
List(UsbListCommand),
}
#[derive(FromArgs)]
/// Attach usb device
#[argh(subcommand, name = "attach")]
pub struct UsbAttachCommand {
#[argh(
positional,
arg_name = "BUS_ID:ADDR:BUS_NUM:DEV_NUM",
from_str_fn(parse_bus_id_addr)
)]
pub addr: (u8, u8, u16, u16),
#[argh(positional)]
/// usb device path
pub dev_path: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "detach")]
pub struct UsbDetachCommand {
#[argh(positional, arg_name = "PORT")]
/// usb port
pub port: u8,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "list")]
pub struct UsbListCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
/// Start a new crosvm instance
#[remain::sorted]
#[argh_helpers::pad_description_for_argh]
#[derive(FromArgs)]
#[argh(subcommand, name = "run")]
pub struct RunCommand {
#[cfg(feature = "audio")]
#[argh(
option,
from_str_fn(parse_ac97_options),
arg_name = "[backend=BACKEND,capture=true,capture_effect=EFFECT,client_type=TYPE,shm-fd=FD,client-fd=FD,server-fd=FD]"
)]
/// comma separated key=value pairs for setting up Ac97 devices.
/// Can be given more than once.
/// Possible key values:
/// backend=(null, cras) - Where to route the audio
/// device. If not provided, backend will default to
/// null. `null` for /dev/null, cras for CRAS server.
/// capture - Enable audio capture
/// capture_effects - | separated effects to be enabled for
/// recording. The only supported effect value now is
/// EchoCancellation or aec.
/// client_type - Set specific client type for cras backend.
/// socket_type - Set specific socket type for cras backend.
pub ac97: Vec<Ac97Parameters>,
#[argh(option, long = "acpi-table", arg_name = "PATH")]
/// path to user provided ACPI table
pub acpi_tables: Vec<PathBuf>,
#[argh(option)]
/// path to Android fstab
pub android_fstab: Option<PathBuf>,
#[argh(option, arg_name = "N", long = "balloon-bias-mib")]
/// amount to bias balance of memory between host and guest as the balloon inflates, in mib.
pub balloon_bias: Option<i64>,
#[argh(option, arg_name = "PATH")]
/// path for balloon controller socket.
pub balloon_control: Option<PathBuf>,
#[argh(switch)]
/// enable page reporting in balloon.
pub balloon_page_reporting: bool,
#[argh(option)]
/// comma separated key=value pairs for setting up battery
/// device
/// Possible key values:
/// type=goldfish - type of battery emulation, defaults to
/// goldfish
pub battery: Option<BatteryConfig>,
#[argh(option)]
/// path to BIOS/firmware ROM
pub bios: Option<PathBuf>,
#[argh(option, arg_name = "CID")]
/// context ID for virtual sockets.
pub cid: Option<u64>,
#[cfg(unix)]
#[argh(
option,
arg_name = "unpin_policy=POLICY,unpin_interval=NUM,unpin_limit=NUM,unpin_gen_threshold=NUM"
)]
/// comma separated key=value pairs for setting up coiommu
/// devices.
/// Possible key values:
/// unpin_policy=lru - LRU unpin policy.
/// unpin_interval=NUM - Unpin interval time in seconds.
/// unpin_limit=NUM - Unpin limit for each unpin cycle, in
/// unit of page count. 0 is invalid.
/// unpin_gen_threshold=NUM - Number of unpin intervals a
/// pinned page must be busy for to be aged into the
/// older which is less frequently checked generation.
pub coiommu: Option<devices::CoIommuParameters>,
#[argh(
option,
arg_name = "CPU=CAP[,CPU=CAP[,...]]",
from_str_fn(parse_cpu_capacity)
)]
/// set the relative capacity of the given CPU (default: no capacity)
pub cpu_capacity: Option<BTreeMap<usize, u32>>, // CPU index -> capacity
#[argh(
option,
long = "cpu-cluster",
arg_name = "CPUSET",
from_str_fn(parse_cpu_set)
)]
/// group the given CPUs into a cluster (default: no clusters)
pub cpu_clusters: Vec<Vec<usize>>,
#[cfg(feature = "crash-report")]
#[argh(option, long = "crash-pipe-name", arg_name = "\\\\.\\pipe\\PIPE_NAME")]
/// the crash handler ipc pipe name.
pub crash_pipe_name: Option<String>,
#[argh(switch)]
/// don't set VCPUs real-time until make-rt command is run
pub delay_rt: bool,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_edge_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(
option,
long = "direct-fixed-event",
arg_name = "event=gbllock|powerbtn|sleepbtn|rtc"
)]
/// enable ACPI fixed event interrupt and register access passthrough
pub direct_fixed_evts: Vec<devices::ACPIPMFixedEvent>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "gpe")]
/// enable GPE interrupt and register access passthrough
pub direct_gpe: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_level_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct memory mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_mmio: Option<DirectIoOption>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct port mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_pmio: Option<DirectIoOption>,
#[argh(switch)]
/// run all devices in one, non-sandboxed process
pub disable_sandbox: bool,
#[argh(switch)]
/// disable INTx in virtio devices
pub disable_virtio_intx: bool,
#[argh(
option,
short = 'd',
long = "disk",
arg_name = "PATH[,key=value[,key=value[,...]]]",
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache"
pub disks: Vec<(usize, DiskOption)>,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_keyboard: bool,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_mouse: bool,
#[argh(option, long = "dmi", arg_name = "DIR")]
/// directory with smbios_entry_point/DMI files
pub dmi_path: Option<PathBuf>,
#[argh(switch)]
/// expose HWP feature to the guest
pub enable_hwp: bool,
#[argh(switch)]
/// expose Power and Perfomance (PnP) data to guest and guest can show these PnP data
pub enable_pnp_data: bool,
#[argh(positional, arg_name = "KERNEL")]
/// bzImage of kernel to run
pub executable_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(switch, long = "exit-stats")]
/// gather and display statistics on Vm Exits and Bus Reads/Writes.
pub exit_stats: bool,
#[argh(
option,
long = "file-backed-mapping",
arg_name = "addr=NUM,size=SIZE,path=PATH[,offset=NUM][,rw][,sync]"
)]
/// map the given file into guest memory at the specified
/// address.
/// Parameters (addr, size, path are required):
/// addr=NUM - guest physical address to map at
/// size=NUM - amount of memory to map
/// path=PATH - path to backing file/device to map
/// offset=NUM - offset in backing file (default 0)
/// rw - make the mapping writable (default readonly)
/// sync - open backing file with O_SYNC
/// align - whether to adjust addr and size to page
/// boundaries implicitly
pub file_backed_mappings: Vec<FileBackedMappingParameters>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// force use of a calibrated TSC cpuid leaf (0x15) even if the hypervisor
/// doesn't require one.
pub force_calibrated_tsc_leaf: bool,
#[cfg(feature = "gdb")]
#[argh(option, arg_name = "PORT")]
/// (EXPERIMENTAL) gdb on the given port
pub gdb: Option<u32>,
#[cfg(feature = "gpu")]
#[argh(option)]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a display on the virtio-gpu device
/// Possible key values:
/// mode=(borderless_full_screen|windowed) - Whether to show the window on the host in full
/// screen or windowed mode. If not specified, windowed mode is used by default.
/// width=INT - The width of the virtual display connected to the virtio-gpu. Can't be set
/// with the borderless_full_screen display mode.
/// height=INT - The height of the virtual display connected to the virtio-gpu. Can't be set
/// with the borderless_full_screen display mode.
/// hidden[=true|=false] - If the display window is initially hidden.
/// refresh_rate=INT - Force a specific vsync generation rate in hertz on the guest.
#[cfg(unix)]
pub gpu_display: Vec<GpuDisplayParameters>,
#[cfg(feature = "gpu")]
#[argh(option, long = "gpu", from_str_fn(parse_gpu_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a virtio-gpu device
/// Possible key values:
/// backend=(2d|virglrenderer|gfxstream) - Which backend to
/// use for virtio-gpu (determining rendering protocol)
/// context-types=LIST - The list of supported context
/// types, separated by ':' (default: no contexts enabled)
/// width=INT - The width of the virtual display connected
/// to the virtio-gpu.
/// height=INT - The height of the virtual display
/// connected to the virtio-gpu.
/// egl[=true|=false] - If the backend should use a EGL
/// context for rendering.
/// glx[=true|=false] - If the backend should use a GLX
/// context for rendering.
/// surfaceless[=true|=false] - If the backend should use a
/// surfaceless context for rendering.
/// angle[=true|=false] - If the gfxstream backend should
/// use ANGLE (OpenGL on Vulkan) as its native OpenGL
/// driver.
/// vulkan[=true|=false] - If the backend should support
/// vulkan
/// wsi=vk - If the gfxstream backend should use the Vulkan
/// swapchain to draw on a window
/// cache-path=PATH - The path to the virtio-gpu device
/// shader cache.
/// cache-size=SIZE - The maximum size of the shader cache.
/// pci-bar-size=SIZE - The size for the PCI BAR in bytes
/// (default 8gb).
pub gpu_params: Option<devices::virtio::GpuParameters>,
#[cfg(all(unix, feature = "gpu", feature = "virgl_renderer_next"))]
#[argh(option, from_str_fn(parse_gpu_render_server_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a render server for the virtio-gpu device
/// Possible key values:
/// path=PATH - The path to the render server executable.
/// cache-path=PATH - The path to the render server shader
/// cache.
/// cache-size=SIZE - The maximum size of the shader cache
pub gpu_render_server: Option<GpuRenderServerParameters>,
#[argh(switch)]
/// use mirror cpu topology of Host for Guest VM, also copy some cpu feature to Guest VM
pub host_cpu_topology: bool,
#[cfg(windows)]
#[argh(option, long = "host-guid", arg_name = "PATH")]
/// string representation of the host guid in registry format, for namespacing vsock connections.
pub host_guid: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "IP")]
/// IP address to assign to host tap interface
pub host_ip: Option<net::Ipv4Addr>,
#[argh(switch)]
/// advise the kernel to use Huge Pages for guest memory mappings
pub hugepages: bool,
/// hypervisor backend
#[argh(option)]
pub hypervisor: Option<HypervisorKind>,
#[argh(option, long = "init-mem", arg_name = "N")]
/// amount of guest memory outside the balloon at boot in MiB. (default: --mem)
pub init_memory: Option<u64>,
#[argh(option, short = 'i', long = "initrd", arg_name = "PATH")]
/// initial ramdisk to load
pub initrd_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(option, long = "irqchip", arg_name = "kernel|split|userspace")]
/// type of interrupt controller emulation. \"split\" is only available for x86 KVM.
pub irq_chip: Option<IrqChipKind>,
#[argh(switch)]
/// allow to enable ITMT scheduling feature in VM. The success of enabling depends on HWP and ACPI CPPC support on hardware
pub itmt: bool,
#[cfg(windows)]
#[argh(option, long = "kernel-log-file", arg_name = "PATH")]
/// forward hypervisor kernel driver logs for this VM to a file.
pub kernel_log_file: Option<String>,
#[cfg(unix)]
#[argh(option, long = "kvm-device", arg_name = "PATH")]
/// path to the KVM device. (default /dev/kvm)
pub kvm_device_path: Option<PathBuf>,
#[cfg(unix)]
#[argh(switch)]
/// disable host swap on guest VM pages.
pub lock_guest_memory: bool,
#[cfg(windows)]
#[argh(option, long = "log-file", arg_name = "PATH")]
/// redirect logs to the supplied log file at PATH rather than stderr. For multi-process mode, use --logs-directory instead
pub log_file: Option<String>,
#[cfg(windows)]
#[argh(option, long = "logs-directory", arg_name = "PATH")]
/// path to the logs directory used for crosvm processes. Logs will be sent to stderr if unset, and stderr/stdout will be uncaptured
pub logs_directory: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "MAC", long = "mac")]
/// MAC address for VM
pub mac_address: Option<net_util::MacAddress>,
#[argh(option, long = "mem", short = 'm', arg_name = "N")]
/// amount of guest memory in MiB. (default: 256)
pub memory: Option<u64>,
#[argh(
option,
long = "mmio-address-range",
from_str_fn(parse_mmio_address_range)
)]
/// MMIO address ranges
pub mmio_address_ranges: Option<Vec<AddressRange>>,
#[cfg(target_arch = "aarch64")]
#[argh(switch)]
/// enable the Memory Tagging Extension in the guest
pub mte: bool,
#[cfg(unix)]
#[argh(option, arg_name = "N")]
/// virtio net virtual queue pairs. (default: 1)
pub net_vq_pairs: Option<u16>,
#[cfg(unix)]
#[argh(option, arg_name = "NETMASK")]
/// netmask for VM subnet
pub netmask: Option<net::Ipv4Addr>,
#[argh(switch)]
/// don't use virtio-balloon device in the guest
pub no_balloon: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy KBD devices emulation
pub no_i8042: bool,
#[argh(switch)]
/// don't create RNG device in the guest
pub no_rng: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy RTC devices emulation
pub no_rtc: bool,
#[argh(switch)]
/// don't use SMT in the guest
pub no_smt: bool,
#[argh(switch)]
/// don't use usb devices in the guest
pub no_usb: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(option, arg_name = "OEM_STRING")]
/// SMBIOS OEM string values to add to the DMI tables
pub oem_strings: Vec<String>,
#[argh(option, short = 'p', arg_name = "PARAMS")]
/// extra kernel or plugin command line arguments. Can be given more than once
pub params: Vec<String>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(option, long = "pci-start", arg_name = "pci_low_mmio_start")]
/// the pci mmio start address below 4G
pub pci_low_start: Option<u64>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "mmio_base,mmio_length",
from_str_fn(parse_memory_region)
)]
/// region for PCIe Enhanced Configuration Access Mechanism
pub pcie_ecam: Option<AddressRange>,
#[cfg(feature = "direct")]
#[argh(
option,
long = "pcie-root-port",
arg_name = "PATH[,hp_gpe=NUM]",
from_str_fn(parse_pcie_root_port_params)
)]
/// path to sysfs of host pcie root port and host pcie root port hotplug gpe number
pub pcie_rp: Vec<HostPcieRootPortParameters>,
#[argh(switch)]
/// enable per-VM core scheduling intead of the default one (per-vCPU core scheduing) by
/// making all vCPU threads share same cookie for core scheduling.
/// This option is no-op on devices that have neither MDS nor L1TF vulnerability
pub per_vm_core_scheduling: bool,
#[argh(
option,
long = "pflash",
arg_name = "path=PATH,[block_size=SIZE]",
from_str_fn(parse_pflash_parameters)
)]
/// comma-seperated key-value pair for setting up the pflash device, which provides space to store UEFI variables.
/// block_size defaults to 4K.
/// [--pflash <path=PATH,[block_size=SIZE]>]
pub pflash_parameters: Option<PflashParameters>,
#[argh(option, arg_name = "PATH")]
/// path to empty directory to use for sandbox pivot root
pub pivot_root: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to plugin process to run under crosvm
pub plugin: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option)]
/// path to the file listing supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_map_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-gid-map", arg_name = "GID:GID:INT")]
/// supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_maps: Vec<GidMap>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// path to the file listing paths be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mount_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-mount", arg_name = "PATH:PATH:BOOL")]
/// path to be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mounts: Vec<BindMount>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to a directory that will become root filesystem for the plugin process.
pub plugin_root: Option<PathBuf>,
#[argh(option, long = "pmem-device", arg_name = "PATH")]
/// path to a disk image
pub pmem_devices: Vec<DiskOption>,
#[argh(switch)]
/// grant this Guest VM certain privileges to manage Host resources, such as power management
pub privileged_vm: bool,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-handle", arg_name = "PATH")]
/// shared read-only memory address for a serialized EmulatorProcessInvariants proto
pub process_invariants_data_handle: Option<u64>,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-size", arg_name = "PATH")]
/// size of the serialized EmulatorProcessInvariants proto pointed at by process-invariants-handle
pub process_invariants_data_size: Option<usize>,
#[cfg(windows)]
#[argh(option, long = "product-channel")]
/// product channel
pub product_channel: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-name")]
/// the product name for file paths.
pub product_name: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-version")]
/// product version
pub product_version: Option<String>,
#[argh(switch)]
/// prevent host access to guest memory
pub protected_vm: bool,
#[argh(switch)]
/// (EXPERIMENTAL) prevent host access to guest memory, but don't use protected VM firmware
protected_vm_without_firmware: bool,
#[argh(option, arg_name = "path=PATH,size=SIZE", from_str_fn(parse_pstore))]
/// path to pstore buffer backend file followed by size
/// [--pstore <path=PATH,size=SIZE>]
pub pstore: Option<Pstore>,
#[cfg(windows)]
#[argh(switch)]
/// enable virtio-pvclock.
pub pvclock: bool,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]]",
short = 'r',
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
root: Option<(usize, DiskOption)>,
#[argh(option, arg_name = "CPUSET", from_str_fn(parse_cpu_set))]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on. (e.g. 0,1-3,5) (default: none)
pub rt_cpus: Option<Vec<usize>>,
#[argh(option, long = "rw-pmem-device", arg_name = "PATH")]
/// path to a writable disk image
rw_pmem_devices: Vec<DiskOption>,
#[argh(
option,
long = "rwdisk",
arg_name = "PATH[,key=value[,key=value[,...]]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwdisks: Vec<(usize, DiskOption)>,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write root disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwroot: Option<(usize, DiskOption)>,
#[argh(switch)]
/// set Low Power S0 Idle Capable Flag for guest Fixed ACPI
/// Description Table, additionally use enhanced crosvm suspend and resume
/// routines to perform full guest suspension/resumption
pub s2idle: bool,
#[cfg(unix)]
#[argh(switch)]
/// instead of seccomp filter failures being fatal, they will be logged instead
pub seccomp_log_failures: bool,
#[cfg(unix)]
#[argh(option, arg_name = "PATH")]
/// path to seccomp .policy files
pub seccomp_policy_dir: Option<PathBuf>,
#[argh(
option,
long = "serial",
arg_name = "type=TYPE,[hardware=HW,num=NUM,path=PATH,input=PATH,console,earlycon,stdin]",
from_str_fn(parse_serial_options)
)]
/// comma separated key=value pairs for setting up serial
/// devices. Can be given more than once.
/// Possible key values:
/// type=(stdout,syslog,sink,file) - Where to route the
/// serial device
/// hardware=(serial,virtio-console,debugcon) - Which type
/// of serial hardware to emulate. Defaults to 8250 UART
/// (serial).
/// num=(1,2,3,4) - Serial Device Number. If not provided,
/// num will default to 1.
/// debugcon_port=PORT - Port for the debugcon device to
/// listen to. Defaults to 0x402, which is what OVMF
/// expects.
/// path=PATH - The path to the file to write to when
/// type=file
/// input=PATH - The path to the file to read from when not
/// stdin
/// console - Use this serial device as the guest console.
/// Can only be given once. Will default to first
/// serial port if not provided.
/// earlycon - Use this serial device as the early console.
/// Can only be given once.
/// stdin - Direct standard input to this serial device.
/// Can only be given once. Will default to first serial
/// port if not provided.
pub serial_parameters: Vec<SerialParameters>,
#[cfg(feature = "kiwi")]
#[argh(option, long = "service-pipe-name", arg_name = "PIPE_NAME")]
/// the service ipc pipe name. (Prefix \\\\.\\pipe\\ not needed.
pub service_pipe_name: Option<String>,
#[cfg(unix)]
#[argh(
option,
long = "shared-dir",
arg_name = "PATH:TAG[:type=TYPE:writeback=BOOL:timeout=SECONDS:uidmap=UIDMAP:gidmap=GIDMAP:cache=CACHE:dax=BOOL,posix_acl=BOOL]"
)]
/// colon-separated options for configuring a directory to be
/// shared with the VM. The first field is the directory to be
/// shared and the second field is the tag that the VM can use
/// to identify the device. The remaining fields are key=value
/// pairs that may appear in any order.
/// Valid keys are:
/// type=(p9, fs) - Indicates whether the directory should
/// be shared via virtio-9p or virtio-fs (default: p9).
/// uidmap=UIDMAP - The uid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current euid> 1).
/// gidmap=GIDMAP - The gid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current egid> 1).
/// cache=(never, auto, always) - Indicates whether the VM
/// can cache the contents of the shared directory
/// (default: auto). When set to "auto" and the type
/// is "fs", the VM will use close-to-open consistency
/// for file contents.
/// timeout=SECONDS - How long the VM should consider file
/// attributes and directory entries to be valid
/// (default: 5). If the VM has exclusive access to the
/// directory, then this should be a large value. If
/// the directory can be modified by other processes,
/// then this should be 0.
/// writeback=BOOL - Enables writeback caching
/// (default: false). This is only safe to do when the
/// VM has exclusive access to the files in a directory.
/// Additionally, the server should have read
/// permission for all files as the VM may issue read
/// requests even for files that are opened write-only.
/// dax=BOOL - Enables DAX support. Enabling DAX can
/// improve performance for frequently accessed files
/// by mapping regions of the file directly into the
/// VM's memory. There is a cost of slightly increased
/// latency the first time the file is accessed. Since
/// the mapping is shared directly from the host kernel's
/// file cache, enabling DAX can improve performance even
/// when the guest cache policy is "Never". The default
/// value for this option is "false".
/// posix_acl=BOOL - Indicates whether the shared directory
/// supports POSIX ACLs. This should only be enabled
/// when the underlying file system supports POSIX ACLs.
/// The default value for this option is "true".
pub shared_dirs: Vec<SharedDir>,
#[cfg(feature = "slirp-ring-capture")]
#[argh(option, long = "slirp-capture-file", arg_name = "PATH")]
/// Redirects slirp network packets to the supplied log file rather than the current directory as `slirp_capture_packets.pcap`
pub slirp_capture_file: Option<String>,
#[argh(option, short = 's', long = "socket", arg_name = "PATH")]
/// path to put the control socket. If PATH is a directory, a name will be generated
pub socket_path: Option<PathBuf>,
#[cfg(feature = "tpm")]
#[argh(switch)]
/// enable a software emulated trusted platform module device
pub software_tpm: bool,
#[cfg(feature = "audio")]
#[argh(option, arg_name = "PATH")]
/// path to the VioS server socket for setting up virtio-snd devices
pub sound: Option<PathBuf>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// (EXPERIMENTAL) enable split-irqchip support
pub split_irqchip: bool,
#[argh(switch)]
/// don't allow guest to use pages from the balloon
pub strict_balloon: bool,
#[argh(
option,
long = "stub-pci-device",
arg_name = "DOMAIN:BUS:DEVICE.FUNCTION[,vendor=NUM][,device=NUM][,class=NUM][,subsystem_vendor=NUM][,subsystem_device=NUM][,revision=NUM]",
from_str_fn(parse_stub_pci_parameters)
)]
/// comma-separated key=value pairs for setting up a stub PCI
/// device that just enumerates. The first option in the list
/// must specify a PCI address to claim.
/// Optional further parameters
/// vendor=NUM - PCI vendor ID
/// device=NUM - PCI device ID
/// class=NUM - PCI class (including class code, subclass,
/// and programming interface)
/// subsystem_vendor=NUM - PCI subsystem vendor ID
/// subsystem_device=NUM - PCI subsystem device ID
/// revision=NUM - revision
pub stub_pci_devices: Vec<StubPciParameters>,
#[argh(option, arg_name = "N")]
/// (EXPERIMENTAL) Size of virtio swiotlb buffer in MiB (default: 64 if `--protected-vm` or `--protected-vm-without-firmware` is present)
pub swiotlb: Option<u64>,
#[argh(option, arg_name = "TAG")]
/// when logging to syslog, use the provided tag
pub syslog_tag: Option<String>,
#[cfg(unix)]
#[argh(option)]
/// file descriptor for configured tap device. A different virtual network card will be added each time this argument is given
pub tap_fd: Vec<RawDescriptor>,
#[cfg(unix)]
#[argh(option)]
/// name of a configured persistent TAP interface to use for networking. A different virtual network card will be added each time this argument is given
pub tap_name: Vec<String>,
#[cfg(target_os = "android")]
#[argh(option, arg_name = "NAME[,...]")]
/// comma-separated names of the task profiles to apply to all threads in crosvm including the vCPU threads
pub task_profiles: Vec<String>,
// Must be `Some` iff `protection_type == ProtectionType::UnprotectedWithFirmware`.
#[argh(option, long = "unprotected-vm-with-firmware", arg_name = "PATH")]
/// (EXPERIMENTAL/FOR DEBUGGING) Use VM firmware, but allow host access to guest memory
pub unprotected_vm_with_firmware: Option<PathBuf>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "INDEX,type=TYPE,action=ACTION,[from=FROM],[filter=FILTER]",
from_str_fn(parse_userspace_msr_options)
)]
/// userspace MSR handling. Takes INDEX of the MSR and how they
/// are handled.
/// type=(r|w|rw|wr) - read/write permission control.
/// action=(pass|emu) - if the control of msr is effective
/// on host.
/// from=(cpu0) - source of msr value. if not set, the
/// source is running CPU.
/// filter=(yes|no) - if the msr is filtered in KVM.
pub userspace_msr: Vec<(u32, MsrConfig)>,
#[argh(
option,
long = "cpu-affinity",
arg_name = "CPUSET",
from_str_fn(parse_cpu_affinity)
)]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on (e.g. 0,1-3,5)
/// or colon-separated list of assignments of guest to host CPU assignments (e.g. 0=0:1=1:2=2) (default: no mask)
pub vcpu_affinity: Option<VcpuAffinity>,
#[argh(option, arg_name = "PATH")]
/// move all vCPU threads to this CGroup (default: nothing moves)
pub vcpu_cgroup_path: Option<PathBuf>,
#[argh(option, long = "cpus", short = 'c')]
/// number of VCPUs. (default: 1)
pub vcpu_count: Option<usize>,
#[cfg(unix)]
#[argh(
option,
arg_name = "PATH[,guest-address=auto|<BUS:DEVICE.FUNCTION>][,iommu=on|off]",
from_str_fn(parse_vfio)
)]
/// path to sysfs of PCI pass through or mdev device.
/// guest-address=auto|<BUS:DEVICE.FUNCTION> - PCI address
/// that the device will be assigned in the guest
/// (default: auto). When set to "auto", the device will
/// be assigned an address that mirrors its address in
/// the host.
/// iommu=on|off - indicates whether to enable virtio IOMMU
/// for this device
pub vfio: Vec<VfioCommand>,
#[cfg(unix)]
#[argh(switch)]
/// isolate all hotplugged passthrough vfio device behind virtio-iommu
pub vfio_isolate_hotplug: bool,
#[cfg(unix)]
#[argh(option, arg_name = "PATH", from_str_fn(parse_vfio_platform))]
/// path to sysfs of platform pass through
pub vfio_platform: Vec<VfioCommand>,
#[argh(switch)]
/// use vhost for networking
pub vhost_net: bool,
#[cfg(unix)]
#[argh(option, long = "vhost-net-device", arg_name = "PATH")]
/// path to the vhost-net device. (default /dev/vhost-net)
pub vhost_net_device_path: Option<PathBuf>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user block
pub vhost_user_blk: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user console
pub vhost_user_console: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH:TAG")]
/// path to a socket path for vhost-user fs, and tag for the shared dir
pub vhost_user_fs: Vec<VhostUserFsOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// paths to a vhost-user socket for gpu
pub vhost_user_gpu: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user mac80211_hwsim
pub vhost_user_mac80211_hwsim: Option<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user net
pub vhost_user_net: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user snd
pub vhost_user_snd: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user video decoder
pub vhost_user_video_decoder: Option<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user vsock
pub vhost_user_vsock: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a vhost-user socket for wayland
pub vhost_user_wl: Option<VhostUserWlOption>,
#[cfg(unix)]
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to the vhost-vsock device. (default /dev/vhost-vsock)
pub vhost_vsock_device: Option<PathBuf>,
#[cfg(unix)]
#[argh(option, arg_name = "FD")]
/// open FD to the vhost-vsock device, mutually exclusive with vhost-vsock-device
pub vhost_vsock_fd: Option<RawDescriptor>,
#[cfg(feature = "video-decoder")]
#[argh(option, long = "video-decoder", arg_name = "[backend]")]
/// (EXPERIMENTAL) enable virtio-video decoder device
/// Possible backend values: libvda, ffmpeg, vaapi
pub video_dec: Option<VideoDeviceConfig>,
#[cfg(feature = "video-encoder")]
#[argh(option, long = "video-encoder", arg_name = "[backend]")]
/// (EXPERIMENTAL) enable virtio-video encoder device
/// Possible backend values: libvda
pub video_enc: Option<VideoDeviceConfig>,
#[argh(option, long = "evdev", arg_name = "PATH")]
/// path to an event device node. The device will be grabbed (unusable from the host) and made available to the guest with the same configuration it shows on the host
pub virtio_input_evdevs: Vec<PathBuf>,
#[argh(option, long = "keyboard", arg_name = "PATH")]
/// path to a socket from where to read keyboard input events and write status updates to
pub virtio_keyboard: Vec<PathBuf>,
#[argh(option, long = "mouse", arg_name = "PATH")]
/// path to a socket from where to read mouse input events and write status updates to
pub virtio_mice: Vec<PathBuf>,
#[argh(option, long = "multi-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read multi touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_multi_touch: Vec<TouchDeviceOption>,
#[argh(option, long = "single-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read single touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_single_touch: Vec<TouchDeviceOption>,
#[cfg(feature = "audio")]
#[argh(
option,
arg_name = "[capture=true,backend=BACKEND,num_output_devices=1,
num_input_devices=1,num_output_streams=1,num_input_streams=1]",
long = "virtio-snd"
)]
/// comma separated key=value pairs for setting up virtio snd
/// devices.
/// Possible key values:
/// capture=(false,true) - Disable/enable audio capture.
/// Default is false.
/// backend=(null,[cras]) - Which backend to use for
/// virtio-snd.
/// client_type=(crosvm,arcvm,borealis) - Set specific
/// client type for cras backend. Default is crosvm.
/// socket_type=(legacy,unified) Set specific socket type
/// for cras backend. Default is unified.
/// num_output_devices=INT - Set number of output PCM
/// devices.
/// num_input_devices=INT - Set number of input PCM devices.
/// num_output_streams=INT - Set number of output PCM
/// streams per device.
/// num_input_streams=INT - Set number of input PCM streams
/// per device.
pub virtio_snds: Vec<SndParameters>,
#[argh(option, long = "switches", arg_name = "PATH")]
/// path to a socket from where to read switch input events and write status updates to
pub virtio_switches: Vec<PathBuf>,
#[argh(option, long = "trackpad", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read trackpad input events and write status updates to, optionally followed by screen width and height (defaults to 800x1280)
pub virtio_trackpad: Vec<TouchDeviceOption>,
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
#[argh(switch)]
/// enable the virtio-tpm connection to vtpm daemon
pub vtpm_proxy: bool,
#[argh(
option,
arg_name = "SOCKET_PATH[,addr=DOMAIN:BUS:DEVICE.FUNCTION,uuid=UUID]"
)]
/// socket path for the Virtio Vhost User proxy device.
/// Parameters
/// addr=BUS:DEVICE.FUNCTION - PCI address that the proxy
/// device will be allocated
/// (default: automatically allocated)
/// uuid=UUID - UUID which will be stored in VVU PCI config
/// space that is readable from guest userspace
pub vvu_proxy: Vec<VvuOption>,
#[cfg(unix)]
#[argh(
option,
long = "wayland-sock",
arg_name = "PATH[,name=NAME]",
from_str_fn(parse_wayland_sock)
)]
/// path to the Wayland socket to use. The unnamed one is used for displaying virtual screens. Named ones are only for IPC
pub wayland_socket_paths: Vec<(String, PathBuf)>,
#[argh(option, arg_name = "DISPLAY")]
/// X11 display name to use
pub x_display: Option<String>,
}
impl TryFrom<RunCommand> for super::config::Config {
type Error = String;
fn try_from(cmd: RunCommand) -> Result<Self, Self::Error> {
let mut cfg = Self::default();
// TODO: we need to factor out some(?) of the checks into config::validate_config
// Process arguments
if let Some(p) = cmd.executable_path {
cfg.executable_path = Some(Executable::Kernel(p));
}
#[cfg(unix)]
if let Some(p) = cmd.kvm_device_path {
cfg.kvm_device_path = p;
}
#[cfg(unix)]
if let Some(p) = cmd.vhost_net_device_path {
if !p.exists() {
return Err(format!("vhost-net-device path {:?} does not exist", p));
}
cfg.vhost_net_device_path = p;
}
cfg.android_fstab = cmd.android_fstab;
cfg.params.extend(cmd.params);
cfg.per_vm_core_scheduling = cmd.per_vm_core_scheduling;
cfg.vcpu_count = cmd.vcpu_count;
cfg.vcpu_affinity = cmd.vcpu_affinity;
cfg.cpu_clusters = cmd.cpu_clusters;
if let Some(capacity) = cmd.cpu_capacity {
cfg.cpu_capacity = capacity;
}
cfg.vcpu_cgroup_path = cmd.vcpu_cgroup_path;
cfg.no_smt = cmd.no_smt;
if let Some(rt_cpus) = cmd.rt_cpus {
cfg.rt_cpus = rt_cpus;
}
cfg.delay_rt = cmd.delay_rt;
cfg.memory = cmd.memory;
#[cfg(target_arch = "aarch64")]
{
if cmd.mte && !(cmd.pmem_devices.is_empty() && cmd.rw_pmem_devices.is_empty()) {
return Err(
"--mte cannot be specified together with --pmem-device or --rw-pmem-device"
.to_string(),
);
}
cfg.mte = cmd.mte;
cfg.swiotlb = cmd.swiotlb;
}
cfg.hugepages = cmd.hugepages;
cfg.hypervisor = cmd.hypervisor;
#[cfg(unix)]
{
cfg.lock_guest_memory = cmd.lock_guest_memory;
}
#[cfg(feature = "audio")]
{
cfg.ac97_parameters = cmd.ac97;
cfg.sound = cmd.sound;
}
cfg.vhost_user_snd = cmd.vhost_user_snd;
for serial_params in cmd.serial_parameters {
super::sys::config::check_serial_params(&serial_params)?;
let num = serial_params.num;
let key = (serial_params.hardware, num);
if cfg.serial_parameters.contains_key(&key) {
return Err(format!(
"serial hardware {} num {}",
serial_params.hardware, num,
));
}
if serial_params.console {
for params in cfg.serial_parameters.values() {
if params.console {
return Err(format!(
"{} device {} already set as console",
params.hardware, params.num,
));
}
}
}
if serial_params.earlycon {
// Only SerialHardware::Serial supports earlycon= currently.
match serial_params.hardware {
SerialHardware::Serial => {}
_ => {
return Err(super::config::invalid_value_err(
serial_params.hardware.to_string(),
String::from("earlycon not supported for hardware"),
));
}
}
for params in cfg.serial_parameters.values() {
if params.earlycon {
return Err(format!(
"{} device {} already set as earlycon",
params.hardware, params.num,
));
}
}
}
if serial_params.stdin {
if let Some(previous_stdin) = cfg.serial_parameters.values().find(|sp| sp.stdin) {
return Err(format!(
"{} device {} already connected to standard input",
previous_stdin.hardware, previous_stdin.num,
));
}
}
cfg.serial_parameters.insert(key, serial_params);
}
if cmd.root.is_some() && cmd.rwroot.is_some() {
return Err("Only one of [root,rwroot] has to be specified".to_string());
}
let root_disk = if let Some((read_only, (index, mut disk_option))) = cmd
.root
.map(|d| (true, d))
.or(cmd.rwroot.map(|d| (false, d)))
{
if index >= 26 {
return Err("ran out of letters for to assign to root disk".to_string());
}
disk_option.read_only = read_only;
cfg.params.push(format!(
"root=/dev/vd{} {}",
char::from(b'a' + index as u8),
if read_only { "ro" } else { "rw" }
));
Some((index, disk_option))
} else {
None
};
let mut disks = root_disk
.into_iter()
.chain(cmd.disks.into_iter().map(|(i, mut d)| {
d.read_only = true;
(i, d)
}))
.chain(cmd.rwdisks.into_iter().map(|(i, mut d)| {
d.read_only = false;
(i, d)
}))
.collect::<Vec<_>>();
disks.sort_by_key(|(i, _)| *i);
cfg.disks = disks.into_iter().map(|(_, d)| d).collect();
for (mut pmem, read_only) in cmd
.pmem_devices
.into_iter()
.map(|p| (p, true))
.chain(cmd.rw_pmem_devices.into_iter().map(|p| (p, false)))
{
pmem.read_only = read_only;
cfg.pmem_devices.push(pmem);
}
#[cfg(windows)]
{
#[cfg(feature = "crash-report")]
{
cfg.crash_pipe_name = cmd.crash_pipe_name;
}
cfg.product_name = cmd.product_name;
cfg.exit_stats = cmd.exit_stats;
cfg.host_guid = cmd.host_guid;
cfg.irq_chip = cmd.irq_chip;
cfg.kernel_log_file = cmd.kernel_log_file;
cfg.log_file = cmd.log_file;
cfg.logs_directory = cmd.logs_directory;
#[cfg(feature = "process-invariants")]
{
cfg.process_invariants_data_handle = cmd.process_invariants_data_handle;
cfg.process_invariants_data_size = cmd.process_invariants_data_size;
}
cfg.pvclock = cmd.pvclock;
#[cfg(feature = "kiwi")]
{
cfg.service_pipe_name = cmd.service_pipe_name;
}
#[cfg(feature = "slirp-ring-capture")]
{
cfg.slirp_capture_file = cmd.slirp_capture_file;
}
cfg.syslog_tag = cmd.syslog_tag;
cfg.product_channel = cmd.product_channel;
cfg.product_version = cmd.product_version;
}
cfg.pstore = cmd.pstore;
#[cfg(unix)]
for (name, params) in cmd.wayland_socket_paths {
if cfg.wayland_socket_paths.contains_key(&name) {
return Err(format!("wayland socket name already used: '{}'", name));
}
cfg.wayland_socket_paths.insert(name, params);
}
cfg.x_display = cmd.x_display;
cfg.display_window_keyboard = cmd.display_window_keyboard;
cfg.display_window_mouse = cmd.display_window_mouse;
if let Some(mut socket_path) = cmd.socket_path {
if socket_path.is_dir() {
socket_path.push(format!("crosvm-{}.sock", getpid()));
}
cfg.socket_path = Some(socket_path);
}
cfg.balloon_control = cmd.balloon_control;
cfg.cid = cmd.cid;
#[cfg(feature = "plugin")]
{
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
if let Some(p) = cmd.plugin {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Plugin(p));
}
cfg.plugin_root = cmd.plugin_root;
cfg.plugin_mounts = cmd.plugin_mounts;
if let Some(path) = cmd.plugin_mount_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-mount-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let mount = parse_plugin_mount_option(trimmed_line)?;
cfg.plugin_mounts.push(mount);
}
}
}
cfg.plugin_gid_maps = cmd.plugin_gid_maps;
if let Some(path) = cmd.plugin_gid_map_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-gid-map-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let map = trimmed_line.parse()?;
cfg.plugin_gid_maps.push(map);
}
}
}
}
cfg.vhost_net = cmd.vhost_net;
#[cfg(feature = "tpm")]
{
cfg.software_tpm = cmd.software_tpm;
}
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
{
cfg.vtpm_proxy = cmd.vtpm_proxy;
}
cfg.virtio_single_touch = cmd.virtio_single_touch;
cfg.virtio_multi_touch = cmd.virtio_multi_touch;
cfg.virtio_trackpad = cmd.virtio_trackpad;
cfg.virtio_mice = cmd.virtio_mice;
cfg.virtio_keyboard = cmd.virtio_keyboard;
cfg.virtio_switches = cmd.virtio_switches;
cfg.virtio_input_evdevs = cmd.virtio_input_evdevs;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.split_irqchip = cmd.split_irqchip;
}
cfg.initrd_path = cmd.initrd_path;
if cmd.disable_sandbox {
cfg.jail_config = None;
}
if let Some(p) = cmd.bios {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Bios(p));
}
cfg.pflash_parameters = cmd.pflash_parameters;
#[cfg(feature = "video-decoder")]
{
cfg.video_dec = cmd.video_dec;
}
#[cfg(feature = "video-encoder")]
{
cfg.video_enc = cmd.video_enc;
}
cfg.acpi_tables = cmd.acpi_tables;
cfg.usb = !cmd.no_usb;
cfg.rng = !cmd.no_rng;
cfg.balloon = !cmd.no_balloon;
cfg.balloon_page_reporting = cmd.balloon_page_reporting;
#[cfg(feature = "audio")]
{
cfg.virtio_snds = cmd.virtio_snds;
}
#[cfg(feature = "gpu")]
{
cfg.gpu_parameters = cmd.gpu_params;
}
#[cfg(unix)]
{
if cmd.vhost_vsock_device.is_some() && cmd.vhost_vsock_fd.is_some() {
return Err(
"Only one of vhost-vsock-device vhost-vsock-fd has to be specified".to_string(),
);
}
cfg.vhost_vsock_device = cmd.vhost_vsock_device;
if let Some(fd) = cmd.vhost_vsock_fd {
cfg.vhost_vsock_device = Some(PathBuf::from(format!("/proc/self/fd/{}", fd)));
}
cfg.shared_dirs = cmd.shared_dirs;
cfg.host_ip = cmd.host_ip;
cfg.netmask = cmd.netmask;
cfg.mac_address = cmd.mac_address;
cfg.tap_name = cmd.tap_name;
cfg.tap_fd = cmd.tap_fd;
cfg.coiommu_param = cmd.coiommu;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
{
cfg.gpu_render_server_parameters = cmd.gpu_render_server;
}
if let Some(d) = cmd.seccomp_policy_dir {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_policy_dir = Some(d);
}
if cmd.seccomp_log_failures {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_log_failures = true;
}
if let Some(p) = cmd.pivot_root {
cfg.jail_config
.get_or_insert_with(Default::default)
.pivot_root = p;
}
#[cfg(feature = "gpu")]
{
if !cmd.gpu_display.is_empty() {
cfg.gpu_parameters
.get_or_insert_with(Default::default)
.display_params
.extend(cmd.gpu_display);
}
}
cfg.net_vq_pairs = cmd.net_vq_pairs;
}
let protection_flags = [
cmd.protected_vm,
cmd.protected_vm_without_firmware,
cmd.unprotected_vm_with_firmware.is_some(),
];
if protection_flags.into_iter().filter(|b| *b).count() > 1 {
return Err("Only one protection mode has to be specified".to_string());
}
cfg.protection_type = if cmd.protected_vm {
ProtectionType::Protected
} else if cmd.protected_vm_without_firmware {
ProtectionType::ProtectedWithoutFirmware
} else if let Some(p) = cmd.unprotected_vm_with_firmware {
if !p.exists() || !p.is_file() {
return Err(
"unprotected-vm-with-firmware path should be an existing file".to_string(),
);
}
cfg.pvm_fw = Some(p);
ProtectionType::UnprotectedWithFirmware
} else {
ProtectionType::Unprotected
};
if !matches!(cfg.protection_type, ProtectionType::Unprotected) {
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
}
cfg.battery_config = cmd.battery;
#[cfg(feature = "gdb")]
{
cfg.gdb = cmd.gdb;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
cfg.enable_hwp = cmd.enable_hwp;
cfg.host_cpu_topology = cmd.host_cpu_topology;
cfg.force_s2idle = cmd.s2idle;
cfg.pcie_ecam = cmd.pcie_ecam;
cfg.pci_low_start = cmd.pci_low_start;
cfg.no_i8042 = cmd.no_i8042;
cfg.no_rtc = cmd.no_rtc;
cfg.oem_strings = cmd.oem_strings;
if !cfg.oem_strings.is_empty() && cfg.dmi_path.is_some() {
return Err("unable to use oem-strings and dmi-path together".to_string());
}
for (index, msr_config) in cmd.userspace_msr {
if cfg.userspace_msr.insert(index, msr_config).is_some() {
return Err(String::from("msr must be unique"));
}
}
}
// cfg.balloon_bias is in bytes.
if let Some(b) = cmd.balloon_bias {
cfg.balloon_bias = b * 1024 * 1024;
}
cfg.vhost_user_blk = cmd.vhost_user_blk;
cfg.vhost_user_console = cmd.vhost_user_console;
cfg.vhost_user_fs = cmd.vhost_user_fs;
cfg.vhost_user_gpu = cmd.vhost_user_gpu;
cfg.vhost_user_mac80211_hwsim = cmd.vhost_user_mac80211_hwsim;
cfg.vhost_user_net = cmd.vhost_user_net;
cfg.vhost_user_video_dec = cmd.vhost_user_video_decoder;
cfg.vhost_user_vsock = cmd.vhost_user_vsock;
cfg.vhost_user_wl = cmd.vhost_user_wl;
#[cfg(feature = "direct")]
{
cfg.direct_pmio = cmd.direct_pmio;
cfg.direct_mmio = cmd.direct_mmio;
cfg.direct_level_irq = cmd.direct_level_irq;
cfg.direct_edge_irq = cmd.direct_edge_irq;
cfg.direct_gpe = cmd.direct_gpe;
cfg.direct_fixed_evts = cmd.direct_fixed_evts;
cfg.pcie_rp = cmd.pcie_rp;
cfg.mmio_address_ranges = cmd.mmio_address_ranges.unwrap_or_default();
}
cfg.disable_virtio_intx = cmd.disable_virtio_intx;
cfg.dmi_path = cmd.dmi_path;
cfg.itmt = cmd.itmt;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if cmd.enable_pnp_data && cmd.force_calibrated_tsc_leaf {
return Err(
"Only one of [enable_pnp_data,force_calibrated_tsc_leaf] can be specified"
.to_string(),
);
}
cfg.enable_pnp_data = cmd.enable_pnp_data;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.force_calibrated_tsc_leaf = cmd.force_calibrated_tsc_leaf;
}
cfg.privileged_vm = cmd.privileged_vm;
cfg.stub_pci_devices = cmd.stub_pci_devices;
cfg.vvu_proxy = cmd.vvu_proxy;
cfg.file_backed_mappings = cmd.file_backed_mappings;
cfg.init_memory = cmd.init_memory;
cfg.strict_balloon = cmd.strict_balloon;
#[cfg(target_os = "android")]
{
cfg.task_profiles = cmd.task_profiles;
}
#[cfg(unix)]
{
cfg.vfio.extend(cmd.vfio);
cfg.vfio.extend(cmd.vfio_platform);
cfg.vfio_isolate_hotplug = cmd.vfio_isolate_hotplug;
}
// Now do validation of constructed config
super::config::validate_config(&mut cfg)?;
Ok(cfg)
}
}
crosvm: handle '--disable-sandbox' after other sandboxing options
The '--disable-sandbox' argument has the effect of setting the jail
configuration to `None`, but other sandboxing options can potentially
recreate it afterwards if they are also specified.
Fix this by handling '--disable-sandbox' after all other sandboxing
options, so the jail configuration always ends up being `None` if that
option is specified.
Reported and fix proposed by Dmitrii Osipenko.
TEST=cargo run with and without --disable-sandbox
Change-Id: I57bed8a3a4fdd543c7f7a24d778ecc16a3ad0d8a
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3893545
Reviewed-by: Dmitrii Osipenko <d14ae288b3ef7814554c8e7efda3eb3bced64e98@collabora.corp-partner.google.com>
Commit-Queue: Alexandre Courbot <301cd6a3ea50a894b4b51e385c5eb12b9a4ffc0c@chromium.org>
Reviewed-by: Daniel Verkamp <72bc170b46ec491f7bdd4359a1c0bfed274de40c@chromium.org>
Auto-Submit: Alexandre Courbot <301cd6a3ea50a894b4b51e385c5eb12b9a4ffc0c@chromium.org>
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
cfg_if::cfg_if! {
if #[cfg(unix)] {
use std::net;
use base::RawDescriptor;
#[cfg(feature = "gpu")]
use devices::virtio::GpuDisplayParameters;
use devices::virtio::vhost::user::device::parse_wayland_sock;
use super::sys::config::{
VfioCommand, parse_vfio, parse_vfio_platform,
};
use super::config::SharedDir;
} else if #[cfg(windows)] {
use crate::crosvm::sys::config::IrqChipKind;
}
}
use std::collections::BTreeMap;
use std::path::PathBuf;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use arch::MsrConfig;
use arch::Pstore;
use arch::VcpuAffinity;
use argh::FromArgs;
use base::getpid;
use devices::virtio::block::block::DiskOption;
#[cfg(any(feature = "video-decoder", feature = "video-encoder"))]
use devices::virtio::device_constants::video::VideoDeviceConfig;
#[cfg(feature = "audio")]
use devices::virtio::snd::parameters::Parameters as SndParameters;
use devices::virtio::vhost::user::device;
#[cfg(feature = "audio")]
use devices::Ac97Parameters;
use devices::PflashParameters;
use devices::SerialHardware;
use devices::SerialParameters;
use devices::StubPciParameters;
use hypervisor::ProtectionType;
use resources::AddressRange;
#[cfg(feature = "gpu")]
use super::sys::config::parse_gpu_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::config::parse_gpu_render_server_options;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
use super::sys::GpuRenderServerParameters;
use crate::crosvm::config::numbered_disk_option;
#[cfg(feature = "audio")]
use crate::crosvm::config::parse_ac97_options;
use crate::crosvm::config::parse_bus_id_addr;
use crate::crosvm::config::parse_cpu_affinity;
use crate::crosvm::config::parse_cpu_capacity;
use crate::crosvm::config::parse_cpu_set;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_direct_io_options;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_memory_region;
use crate::crosvm::config::parse_mmio_address_range;
#[cfg(feature = "direct")]
use crate::crosvm::config::parse_pcie_root_port_params;
use crate::crosvm::config::parse_pflash_parameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::parse_plugin_mount_option;
use crate::crosvm::config::parse_pstore;
use crate::crosvm::config::parse_serial_options;
use crate::crosvm::config::parse_stub_pci_parameters;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::crosvm::config::parse_userspace_msr_options;
use crate::crosvm::config::BatteryConfig;
#[cfg(feature = "plugin")]
use crate::crosvm::config::BindMount;
#[cfg(feature = "direct")]
use crate::crosvm::config::DirectIoOption;
use crate::crosvm::config::Executable;
use crate::crosvm::config::FileBackedMappingParameters;
#[cfg(feature = "plugin")]
use crate::crosvm::config::GidMap;
#[cfg(feature = "direct")]
use crate::crosvm::config::HostPcieRootPortParameters;
use crate::crosvm::config::HypervisorKind;
use crate::crosvm::config::TouchDeviceOption;
use crate::crosvm::config::VhostUserFsOption;
use crate::crosvm::config::VhostUserOption;
use crate::crosvm::config::VhostUserWlOption;
use crate::crosvm::config::VvuOption;
#[derive(FromArgs)]
/// crosvm
pub struct CrosvmCmdlineArgs {
#[argh(switch)]
/// use extended exit status
pub extended_status: bool,
#[argh(option, default = r#"String::from("info")"#)]
/// specify log level, eg "off", "error", "debug,disk=off", etc
pub log_level: String,
#[argh(option, arg_name = "TAG")]
/// when logging to syslog, use the provided tag
pub syslog_tag: Option<String>,
#[argh(switch)]
/// disable output to syslog
pub no_syslog: bool,
#[argh(subcommand)]
pub command: Command,
}
#[allow(clippy::large_enum_variant)]
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum CrossPlatformCommands {
#[cfg(feature = "balloon")]
Balloon(BalloonCommand),
#[cfg(feature = "balloon")]
BalloonStats(BalloonStatsCommand),
Battery(BatteryCommand),
#[cfg(feature = "composite-disk")]
CreateComposite(CreateCompositeCommand),
#[cfg(feature = "qcow")]
CreateQcow2(CreateQcow2Command),
Device(DeviceCommand),
Disk(DiskCommand),
MakeRT(MakeRTCommand),
Resume(ResumeCommand),
Run(RunCommand),
Stop(StopCommand),
Suspend(SuspendCommand),
Powerbtn(PowerbtnCommand),
Sleepbtn(SleepCommand),
Gpe(GpeCommand),
Usb(UsbCommand),
Version(VersionCommand),
Vfio(VfioCrosvmCommand),
}
#[allow(clippy::large_enum_variant)]
#[derive(argh_helpers::FlattenSubcommand)]
pub enum Command {
CrossPlatform(CrossPlatformCommands),
Sys(super::sys::cmdline::Commands),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "balloon")]
/// Set balloon size of the crosvm instance to `SIZE` bytes
pub struct BalloonCommand {
#[argh(positional, arg_name = "SIZE")]
/// amount of bytes
pub num_bytes: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(argh::FromArgs)]
#[argh(subcommand, name = "balloon_stats")]
/// Prints virtio balloon statistics for a `VM_SOCKET`
pub struct BalloonStatsCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "battery")]
/// Modify battery
pub struct BatteryCommand {
#[argh(positional, arg_name = "BATTERY_TYPE")]
/// battery type
pub battery_type: String,
#[argh(positional)]
/// battery property
/// status | present | health | capacity | aconline
pub property: String,
#[argh(positional)]
/// battery property target
/// STATUS | PRESENT | HEALTH | CAPACITY | ACONLINE
pub target: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[cfg(feature = "composite-disk")]
#[derive(FromArgs)]
#[argh(subcommand, name = "create_composite")]
/// Create a new composite disk image file
pub struct CreateCompositeCommand {
#[argh(positional, arg_name = "PATH")]
/// image path
pub path: String,
#[argh(positional, arg_name = "LABEL:PARTITION")]
/// partitions
pub partitions: Vec<String>,
}
#[cfg(feature = "qcow")]
#[derive(FromArgs)]
#[argh(subcommand, name = "create_qcow2")]
/// Create Qcow2 image given path and size
pub struct CreateQcow2Command {
#[argh(positional, arg_name = "PATH")]
/// path to the new qcow2 file to create
pub file_path: String,
#[argh(positional, arg_name = "SIZE")]
/// desired size of the image in bytes; required if not using --backing-file
pub size: Option<u64>,
#[argh(option)]
/// path to backing file; if specified, the image will be the same size as the backing file, and
/// SIZE may not be specified
pub backing_file: Option<String>,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum DiskSubcommand {
Resize(ResizeDiskSubcommand),
}
#[derive(FromArgs)]
/// resize disk
#[argh(subcommand, name = "resize")]
pub struct ResizeDiskSubcommand {
#[argh(positional, arg_name = "DISK_INDEX")]
/// disk index
pub disk_index: usize,
#[argh(positional, arg_name = "NEW_SIZE")]
/// new disk size
pub disk_size: u64,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "disk")]
/// Manage attached virtual disk devices
pub struct DiskCommand {
#[argh(subcommand)]
pub command: DiskSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "make_rt")]
/// Enables real-time vcpu priority for crosvm instances started with `--delay-rt`
pub struct MakeRTCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "resume")]
/// Resumes the crosvm instance
pub struct ResumeCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "stop")]
/// Stops crosvm instances via their control sockets
pub struct StopCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "suspend")]
/// Suspends the crosvm instance
pub struct SuspendCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "powerbtn")]
/// Triggers a power button event in the crosvm instance
pub struct PowerbtnCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "sleepbtn")]
/// Triggers a sleep button event in the crosvm instance
pub struct SleepCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "gpe")]
/// Injects a general-purpose event into the crosvm instance
pub struct GpeCommand {
#[argh(positional)]
/// GPE #
pub gpe: u32,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "usb")]
/// Manage attached virtual USB devices.
pub struct UsbCommand {
#[argh(subcommand)]
pub command: UsbSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "version")]
/// Show package version.
pub struct VersionCommand {}
#[derive(FromArgs)]
#[argh(subcommand, name = "add")]
/// ADD
pub struct VfioAddSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "remove")]
/// REMOVE
pub struct VfioRemoveSubCommand {
#[argh(positional)]
/// path to host's vfio sysfs
pub vfio_path: PathBuf,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum VfioSubCommand {
Add(VfioAddSubCommand),
Remove(VfioRemoveSubCommand),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "vfio")]
/// add/remove host vfio pci device into guest
pub struct VfioCrosvmCommand {
#[argh(subcommand)]
pub command: VfioSubCommand,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "device")]
/// Start a device process
pub struct DeviceCommand {
#[argh(subcommand)]
pub command: DeviceSubcommand,
}
#[derive(FromArgs)]
#[argh(subcommand)]
/// Cross-platform Devices
pub enum CrossPlatformDevicesCommands {
Block(device::BlockOptions),
#[cfg(unix)]
Net(device::NetOptions),
}
#[derive(argh_helpers::FlattenSubcommand)]
pub enum DeviceSubcommand {
CrossPlatform(CrossPlatformDevicesCommands),
Sys(super::sys::cmdline::DeviceSubcommand),
}
#[derive(FromArgs)]
#[argh(subcommand)]
pub enum UsbSubCommand {
Attach(UsbAttachCommand),
Detach(UsbDetachCommand),
List(UsbListCommand),
}
#[derive(FromArgs)]
/// Attach usb device
#[argh(subcommand, name = "attach")]
pub struct UsbAttachCommand {
#[argh(
positional,
arg_name = "BUS_ID:ADDR:BUS_NUM:DEV_NUM",
from_str_fn(parse_bus_id_addr)
)]
pub addr: (u8, u8, u16, u16),
#[argh(positional)]
/// usb device path
pub dev_path: String,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "detach")]
pub struct UsbDetachCommand {
#[argh(positional, arg_name = "PORT")]
/// usb port
pub port: u8,
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
#[derive(FromArgs)]
/// Detach usb device
#[argh(subcommand, name = "list")]
pub struct UsbListCommand {
#[argh(positional, arg_name = "VM_SOCKET")]
/// VM Socket path
pub socket_path: String,
}
/// Start a new crosvm instance
#[remain::sorted]
#[argh_helpers::pad_description_for_argh]
#[derive(FromArgs)]
#[argh(subcommand, name = "run")]
pub struct RunCommand {
#[cfg(feature = "audio")]
#[argh(
option,
from_str_fn(parse_ac97_options),
arg_name = "[backend=BACKEND,capture=true,capture_effect=EFFECT,client_type=TYPE,shm-fd=FD,client-fd=FD,server-fd=FD]"
)]
/// comma separated key=value pairs for setting up Ac97 devices.
/// Can be given more than once.
/// Possible key values:
/// backend=(null, cras) - Where to route the audio
/// device. If not provided, backend will default to
/// null. `null` for /dev/null, cras for CRAS server.
/// capture - Enable audio capture
/// capture_effects - | separated effects to be enabled for
/// recording. The only supported effect value now is
/// EchoCancellation or aec.
/// client_type - Set specific client type for cras backend.
/// socket_type - Set specific socket type for cras backend.
pub ac97: Vec<Ac97Parameters>,
#[argh(option, long = "acpi-table", arg_name = "PATH")]
/// path to user provided ACPI table
pub acpi_tables: Vec<PathBuf>,
#[argh(option)]
/// path to Android fstab
pub android_fstab: Option<PathBuf>,
#[argh(option, arg_name = "N", long = "balloon-bias-mib")]
/// amount to bias balance of memory between host and guest as the balloon inflates, in mib.
pub balloon_bias: Option<i64>,
#[argh(option, arg_name = "PATH")]
/// path for balloon controller socket.
pub balloon_control: Option<PathBuf>,
#[argh(switch)]
/// enable page reporting in balloon.
pub balloon_page_reporting: bool,
#[argh(option)]
/// comma separated key=value pairs for setting up battery
/// device
/// Possible key values:
/// type=goldfish - type of battery emulation, defaults to
/// goldfish
pub battery: Option<BatteryConfig>,
#[argh(option)]
/// path to BIOS/firmware ROM
pub bios: Option<PathBuf>,
#[argh(option, arg_name = "CID")]
/// context ID for virtual sockets.
pub cid: Option<u64>,
#[cfg(unix)]
#[argh(
option,
arg_name = "unpin_policy=POLICY,unpin_interval=NUM,unpin_limit=NUM,unpin_gen_threshold=NUM"
)]
/// comma separated key=value pairs for setting up coiommu
/// devices.
/// Possible key values:
/// unpin_policy=lru - LRU unpin policy.
/// unpin_interval=NUM - Unpin interval time in seconds.
/// unpin_limit=NUM - Unpin limit for each unpin cycle, in
/// unit of page count. 0 is invalid.
/// unpin_gen_threshold=NUM - Number of unpin intervals a
/// pinned page must be busy for to be aged into the
/// older which is less frequently checked generation.
pub coiommu: Option<devices::CoIommuParameters>,
#[argh(
option,
arg_name = "CPU=CAP[,CPU=CAP[,...]]",
from_str_fn(parse_cpu_capacity)
)]
/// set the relative capacity of the given CPU (default: no capacity)
pub cpu_capacity: Option<BTreeMap<usize, u32>>, // CPU index -> capacity
#[argh(
option,
long = "cpu-cluster",
arg_name = "CPUSET",
from_str_fn(parse_cpu_set)
)]
/// group the given CPUs into a cluster (default: no clusters)
pub cpu_clusters: Vec<Vec<usize>>,
#[cfg(feature = "crash-report")]
#[argh(option, long = "crash-pipe-name", arg_name = "\\\\.\\pipe\\PIPE_NAME")]
/// the crash handler ipc pipe name.
pub crash_pipe_name: Option<String>,
#[argh(switch)]
/// don't set VCPUs real-time until make-rt command is run
pub delay_rt: bool,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_edge_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(
option,
long = "direct-fixed-event",
arg_name = "event=gbllock|powerbtn|sleepbtn|rtc"
)]
/// enable ACPI fixed event interrupt and register access passthrough
pub direct_fixed_evts: Vec<devices::ACPIPMFixedEvent>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "gpe")]
/// enable GPE interrupt and register access passthrough
pub direct_gpe: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(option, arg_name = "irq")]
/// enable interrupt passthrough
pub direct_level_irq: Vec<u32>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct memory mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_mmio: Option<DirectIoOption>,
#[cfg(feature = "direct")]
#[argh(
option,
arg_name = "PATH@RANGE[,RANGE[,...]]",
from_str_fn(parse_direct_io_options)
)]
/// path and ranges for direct port mapped I/O access. RANGE may be decimal or hex (starting with 0x)
pub direct_pmio: Option<DirectIoOption>,
#[argh(switch)]
/// run all devices in one, non-sandboxed process
pub disable_sandbox: bool,
#[argh(switch)]
/// disable INTx in virtio devices
pub disable_virtio_intx: bool,
#[argh(
option,
short = 'd',
long = "disk",
arg_name = "PATH[,key=value[,key=value[,...]]]",
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache"
pub disks: Vec<(usize, DiskOption)>,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_keyboard: bool,
#[argh(switch)]
/// capture keyboard input from the display window
pub display_window_mouse: bool,
#[argh(option, long = "dmi", arg_name = "DIR")]
/// directory with smbios_entry_point/DMI files
pub dmi_path: Option<PathBuf>,
#[argh(switch)]
/// expose HWP feature to the guest
pub enable_hwp: bool,
#[argh(switch)]
/// expose Power and Perfomance (PnP) data to guest and guest can show these PnP data
pub enable_pnp_data: bool,
#[argh(positional, arg_name = "KERNEL")]
/// bzImage of kernel to run
pub executable_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(switch, long = "exit-stats")]
/// gather and display statistics on Vm Exits and Bus Reads/Writes.
pub exit_stats: bool,
#[argh(
option,
long = "file-backed-mapping",
arg_name = "addr=NUM,size=SIZE,path=PATH[,offset=NUM][,rw][,sync]"
)]
/// map the given file into guest memory at the specified
/// address.
/// Parameters (addr, size, path are required):
/// addr=NUM - guest physical address to map at
/// size=NUM - amount of memory to map
/// path=PATH - path to backing file/device to map
/// offset=NUM - offset in backing file (default 0)
/// rw - make the mapping writable (default readonly)
/// sync - open backing file with O_SYNC
/// align - whether to adjust addr and size to page
/// boundaries implicitly
pub file_backed_mappings: Vec<FileBackedMappingParameters>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// force use of a calibrated TSC cpuid leaf (0x15) even if the hypervisor
/// doesn't require one.
pub force_calibrated_tsc_leaf: bool,
#[cfg(feature = "gdb")]
#[argh(option, arg_name = "PORT")]
/// (EXPERIMENTAL) gdb on the given port
pub gdb: Option<u32>,
#[cfg(feature = "gpu")]
#[argh(option)]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a display on the virtio-gpu device
/// Possible key values:
/// mode=(borderless_full_screen|windowed) - Whether to show the window on the host in full
/// screen or windowed mode. If not specified, windowed mode is used by default.
/// width=INT - The width of the virtual display connected to the virtio-gpu. Can't be set
/// with the borderless_full_screen display mode.
/// height=INT - The height of the virtual display connected to the virtio-gpu. Can't be set
/// with the borderless_full_screen display mode.
/// hidden[=true|=false] - If the display window is initially hidden.
/// refresh_rate=INT - Force a specific vsync generation rate in hertz on the guest.
#[cfg(unix)]
pub gpu_display: Vec<GpuDisplayParameters>,
#[cfg(feature = "gpu")]
#[argh(option, long = "gpu", from_str_fn(parse_gpu_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a virtio-gpu device
/// Possible key values:
/// backend=(2d|virglrenderer|gfxstream) - Which backend to
/// use for virtio-gpu (determining rendering protocol)
/// context-types=LIST - The list of supported context
/// types, separated by ':' (default: no contexts enabled)
/// width=INT - The width of the virtual display connected
/// to the virtio-gpu.
/// height=INT - The height of the virtual display
/// connected to the virtio-gpu.
/// egl[=true|=false] - If the backend should use a EGL
/// context for rendering.
/// glx[=true|=false] - If the backend should use a GLX
/// context for rendering.
/// surfaceless[=true|=false] - If the backend should use a
/// surfaceless context for rendering.
/// angle[=true|=false] - If the gfxstream backend should
/// use ANGLE (OpenGL on Vulkan) as its native OpenGL
/// driver.
/// vulkan[=true|=false] - If the backend should support
/// vulkan
/// wsi=vk - If the gfxstream backend should use the Vulkan
/// swapchain to draw on a window
/// cache-path=PATH - The path to the virtio-gpu device
/// shader cache.
/// cache-size=SIZE - The maximum size of the shader cache.
/// pci-bar-size=SIZE - The size for the PCI BAR in bytes
/// (default 8gb).
pub gpu_params: Option<devices::virtio::GpuParameters>,
#[cfg(all(unix, feature = "gpu", feature = "virgl_renderer_next"))]
#[argh(option, from_str_fn(parse_gpu_render_server_options))]
/// (EXPERIMENTAL) Comma separated key=value pairs for setting
/// up a render server for the virtio-gpu device
/// Possible key values:
/// path=PATH - The path to the render server executable.
/// cache-path=PATH - The path to the render server shader
/// cache.
/// cache-size=SIZE - The maximum size of the shader cache
pub gpu_render_server: Option<GpuRenderServerParameters>,
#[argh(switch)]
/// use mirror cpu topology of Host for Guest VM, also copy some cpu feature to Guest VM
pub host_cpu_topology: bool,
#[cfg(windows)]
#[argh(option, long = "host-guid", arg_name = "PATH")]
/// string representation of the host guid in registry format, for namespacing vsock connections.
pub host_guid: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "IP")]
/// IP address to assign to host tap interface
pub host_ip: Option<net::Ipv4Addr>,
#[argh(switch)]
/// advise the kernel to use Huge Pages for guest memory mappings
pub hugepages: bool,
/// hypervisor backend
#[argh(option)]
pub hypervisor: Option<HypervisorKind>,
#[argh(option, long = "init-mem", arg_name = "N")]
/// amount of guest memory outside the balloon at boot in MiB. (default: --mem)
pub init_memory: Option<u64>,
#[argh(option, short = 'i', long = "initrd", arg_name = "PATH")]
/// initial ramdisk to load
pub initrd_path: Option<PathBuf>,
#[cfg(windows)]
#[argh(option, long = "irqchip", arg_name = "kernel|split|userspace")]
/// type of interrupt controller emulation. \"split\" is only available for x86 KVM.
pub irq_chip: Option<IrqChipKind>,
#[argh(switch)]
/// allow to enable ITMT scheduling feature in VM. The success of enabling depends on HWP and ACPI CPPC support on hardware
pub itmt: bool,
#[cfg(windows)]
#[argh(option, long = "kernel-log-file", arg_name = "PATH")]
/// forward hypervisor kernel driver logs for this VM to a file.
pub kernel_log_file: Option<String>,
#[cfg(unix)]
#[argh(option, long = "kvm-device", arg_name = "PATH")]
/// path to the KVM device. (default /dev/kvm)
pub kvm_device_path: Option<PathBuf>,
#[cfg(unix)]
#[argh(switch)]
/// disable host swap on guest VM pages.
pub lock_guest_memory: bool,
#[cfg(windows)]
#[argh(option, long = "log-file", arg_name = "PATH")]
/// redirect logs to the supplied log file at PATH rather than stderr. For multi-process mode, use --logs-directory instead
pub log_file: Option<String>,
#[cfg(windows)]
#[argh(option, long = "logs-directory", arg_name = "PATH")]
/// path to the logs directory used for crosvm processes. Logs will be sent to stderr if unset, and stderr/stdout will be uncaptured
pub logs_directory: Option<String>,
#[cfg(unix)]
#[argh(option, arg_name = "MAC", long = "mac")]
/// MAC address for VM
pub mac_address: Option<net_util::MacAddress>,
#[argh(option, long = "mem", short = 'm', arg_name = "N")]
/// amount of guest memory in MiB. (default: 256)
pub memory: Option<u64>,
#[argh(
option,
long = "mmio-address-range",
from_str_fn(parse_mmio_address_range)
)]
/// MMIO address ranges
pub mmio_address_ranges: Option<Vec<AddressRange>>,
#[cfg(target_arch = "aarch64")]
#[argh(switch)]
/// enable the Memory Tagging Extension in the guest
pub mte: bool,
#[cfg(unix)]
#[argh(option, arg_name = "N")]
/// virtio net virtual queue pairs. (default: 1)
pub net_vq_pairs: Option<u16>,
#[cfg(unix)]
#[argh(option, arg_name = "NETMASK")]
/// netmask for VM subnet
pub netmask: Option<net::Ipv4Addr>,
#[argh(switch)]
/// don't use virtio-balloon device in the guest
pub no_balloon: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy KBD devices emulation
pub no_i8042: bool,
#[argh(switch)]
/// don't create RNG device in the guest
pub no_rng: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// don't use legacy RTC devices emulation
pub no_rtc: bool,
#[argh(switch)]
/// don't use SMT in the guest
pub no_smt: bool,
#[argh(switch)]
/// don't use usb devices in the guest
pub no_usb: bool,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(option, arg_name = "OEM_STRING")]
/// SMBIOS OEM string values to add to the DMI tables
pub oem_strings: Vec<String>,
#[argh(option, short = 'p', arg_name = "PARAMS")]
/// extra kernel or plugin command line arguments. Can be given more than once
pub params: Vec<String>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(option, long = "pci-start", arg_name = "pci_low_mmio_start")]
/// the pci mmio start address below 4G
pub pci_low_start: Option<u64>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "mmio_base,mmio_length",
from_str_fn(parse_memory_region)
)]
/// region for PCIe Enhanced Configuration Access Mechanism
pub pcie_ecam: Option<AddressRange>,
#[cfg(feature = "direct")]
#[argh(
option,
long = "pcie-root-port",
arg_name = "PATH[,hp_gpe=NUM]",
from_str_fn(parse_pcie_root_port_params)
)]
/// path to sysfs of host pcie root port and host pcie root port hotplug gpe number
pub pcie_rp: Vec<HostPcieRootPortParameters>,
#[argh(switch)]
/// enable per-VM core scheduling intead of the default one (per-vCPU core scheduing) by
/// making all vCPU threads share same cookie for core scheduling.
/// This option is no-op on devices that have neither MDS nor L1TF vulnerability
pub per_vm_core_scheduling: bool,
#[argh(
option,
long = "pflash",
arg_name = "path=PATH,[block_size=SIZE]",
from_str_fn(parse_pflash_parameters)
)]
/// comma-seperated key-value pair for setting up the pflash device, which provides space to store UEFI variables.
/// block_size defaults to 4K.
/// [--pflash <path=PATH,[block_size=SIZE]>]
pub pflash_parameters: Option<PflashParameters>,
#[argh(option, arg_name = "PATH")]
/// path to empty directory to use for sandbox pivot root
pub pivot_root: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to plugin process to run under crosvm
pub plugin: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option)]
/// path to the file listing supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_map_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-gid-map", arg_name = "GID:GID:INT")]
/// supplemental GIDs that should be mapped in plugin jail. Can be given more than once
pub plugin_gid_maps: Vec<GidMap>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// path to the file listing paths be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mount_file: Option<PathBuf>,
#[cfg(feature = "plugin")]
#[argh(option, long = "plugin-mount", arg_name = "PATH:PATH:BOOL")]
/// path to be mounted into the plugin's root filesystem. Can be given more than once
pub plugin_mounts: Vec<BindMount>,
#[cfg(feature = "plugin")]
#[argh(option, arg_name = "PATH")]
/// absolute path to a directory that will become root filesystem for the plugin process.
pub plugin_root: Option<PathBuf>,
#[argh(option, long = "pmem-device", arg_name = "PATH")]
/// path to a disk image
pub pmem_devices: Vec<DiskOption>,
#[argh(switch)]
/// grant this Guest VM certain privileges to manage Host resources, such as power management
pub privileged_vm: bool,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-handle", arg_name = "PATH")]
/// shared read-only memory address for a serialized EmulatorProcessInvariants proto
pub process_invariants_data_handle: Option<u64>,
#[cfg(feature = "process-invariants")]
#[argh(option, long = "process-invariants-size", arg_name = "PATH")]
/// size of the serialized EmulatorProcessInvariants proto pointed at by process-invariants-handle
pub process_invariants_data_size: Option<usize>,
#[cfg(windows)]
#[argh(option, long = "product-channel")]
/// product channel
pub product_channel: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-name")]
/// the product name for file paths.
pub product_name: Option<String>,
#[cfg(windows)]
#[argh(option, long = "product-version")]
/// product version
pub product_version: Option<String>,
#[argh(switch)]
/// prevent host access to guest memory
pub protected_vm: bool,
#[argh(switch)]
/// (EXPERIMENTAL) prevent host access to guest memory, but don't use protected VM firmware
protected_vm_without_firmware: bool,
#[argh(option, arg_name = "path=PATH,size=SIZE", from_str_fn(parse_pstore))]
/// path to pstore buffer backend file followed by size
/// [--pstore <path=PATH,size=SIZE>]
pub pstore: Option<Pstore>,
#[cfg(windows)]
#[argh(switch)]
/// enable virtio-pvclock.
pub pvclock: bool,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]]",
short = 'r',
from_str_fn(numbered_disk_option)
)]
/// path to a disk image followed by optional comma-separated
/// options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
root: Option<(usize, DiskOption)>,
#[argh(option, arg_name = "CPUSET", from_str_fn(parse_cpu_set))]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on. (e.g. 0,1-3,5) (default: none)
pub rt_cpus: Option<Vec<usize>>,
#[argh(option, long = "rw-pmem-device", arg_name = "PATH")]
/// path to a writable disk image
rw_pmem_devices: Vec<DiskOption>,
#[argh(
option,
long = "rwdisk",
arg_name = "PATH[,key=value[,key=value[,...]]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwdisks: Vec<(usize, DiskOption)>,
#[argh(
option,
arg_name = "PATH[,key=value[,key=value[,...]]]",
from_str_fn(numbered_disk_option)
)]
/// path to a read-write root disk image followed by optional
/// comma-separated options.
/// Valid keys:
/// sparse=BOOL - Indicates whether the disk should support
/// the discard operation (default: true)
/// block_size=BYTES - Set the reported block size of the
/// disk (default: 512)
/// id=STRING - Set the block device identifier to an ASCII
/// string, up to 20 characters (default: no ID)
/// o_direct=BOOL - Use O_DIRECT mode to bypass page cache
rwroot: Option<(usize, DiskOption)>,
#[argh(switch)]
/// set Low Power S0 Idle Capable Flag for guest Fixed ACPI
/// Description Table, additionally use enhanced crosvm suspend and resume
/// routines to perform full guest suspension/resumption
pub s2idle: bool,
#[cfg(unix)]
#[argh(switch)]
/// instead of seccomp filter failures being fatal, they will be logged instead
pub seccomp_log_failures: bool,
#[cfg(unix)]
#[argh(option, arg_name = "PATH")]
/// path to seccomp .policy files
pub seccomp_policy_dir: Option<PathBuf>,
#[argh(
option,
long = "serial",
arg_name = "type=TYPE,[hardware=HW,num=NUM,path=PATH,input=PATH,console,earlycon,stdin]",
from_str_fn(parse_serial_options)
)]
/// comma separated key=value pairs for setting up serial
/// devices. Can be given more than once.
/// Possible key values:
/// type=(stdout,syslog,sink,file) - Where to route the
/// serial device
/// hardware=(serial,virtio-console,debugcon) - Which type
/// of serial hardware to emulate. Defaults to 8250 UART
/// (serial).
/// num=(1,2,3,4) - Serial Device Number. If not provided,
/// num will default to 1.
/// debugcon_port=PORT - Port for the debugcon device to
/// listen to. Defaults to 0x402, which is what OVMF
/// expects.
/// path=PATH - The path to the file to write to when
/// type=file
/// input=PATH - The path to the file to read from when not
/// stdin
/// console - Use this serial device as the guest console.
/// Can only be given once. Will default to first
/// serial port if not provided.
/// earlycon - Use this serial device as the early console.
/// Can only be given once.
/// stdin - Direct standard input to this serial device.
/// Can only be given once. Will default to first serial
/// port if not provided.
pub serial_parameters: Vec<SerialParameters>,
#[cfg(feature = "kiwi")]
#[argh(option, long = "service-pipe-name", arg_name = "PIPE_NAME")]
/// the service ipc pipe name. (Prefix \\\\.\\pipe\\ not needed.
pub service_pipe_name: Option<String>,
#[cfg(unix)]
#[argh(
option,
long = "shared-dir",
arg_name = "PATH:TAG[:type=TYPE:writeback=BOOL:timeout=SECONDS:uidmap=UIDMAP:gidmap=GIDMAP:cache=CACHE:dax=BOOL,posix_acl=BOOL]"
)]
/// colon-separated options for configuring a directory to be
/// shared with the VM. The first field is the directory to be
/// shared and the second field is the tag that the VM can use
/// to identify the device. The remaining fields are key=value
/// pairs that may appear in any order.
/// Valid keys are:
/// type=(p9, fs) - Indicates whether the directory should
/// be shared via virtio-9p or virtio-fs (default: p9).
/// uidmap=UIDMAP - The uid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current euid> 1).
/// gidmap=GIDMAP - The gid map to use for the device's
/// jail in the format "inner outer
/// count[,inner outer count]"
/// (default: 0 <current egid> 1).
/// cache=(never, auto, always) - Indicates whether the VM
/// can cache the contents of the shared directory
/// (default: auto). When set to "auto" and the type
/// is "fs", the VM will use close-to-open consistency
/// for file contents.
/// timeout=SECONDS - How long the VM should consider file
/// attributes and directory entries to be valid
/// (default: 5). If the VM has exclusive access to the
/// directory, then this should be a large value. If
/// the directory can be modified by other processes,
/// then this should be 0.
/// writeback=BOOL - Enables writeback caching
/// (default: false). This is only safe to do when the
/// VM has exclusive access to the files in a directory.
/// Additionally, the server should have read
/// permission for all files as the VM may issue read
/// requests even for files that are opened write-only.
/// dax=BOOL - Enables DAX support. Enabling DAX can
/// improve performance for frequently accessed files
/// by mapping regions of the file directly into the
/// VM's memory. There is a cost of slightly increased
/// latency the first time the file is accessed. Since
/// the mapping is shared directly from the host kernel's
/// file cache, enabling DAX can improve performance even
/// when the guest cache policy is "Never". The default
/// value for this option is "false".
/// posix_acl=BOOL - Indicates whether the shared directory
/// supports POSIX ACLs. This should only be enabled
/// when the underlying file system supports POSIX ACLs.
/// The default value for this option is "true".
pub shared_dirs: Vec<SharedDir>,
#[cfg(feature = "slirp-ring-capture")]
#[argh(option, long = "slirp-capture-file", arg_name = "PATH")]
/// Redirects slirp network packets to the supplied log file rather than the current directory as `slirp_capture_packets.pcap`
pub slirp_capture_file: Option<String>,
#[argh(option, short = 's', long = "socket", arg_name = "PATH")]
/// path to put the control socket. If PATH is a directory, a name will be generated
pub socket_path: Option<PathBuf>,
#[cfg(feature = "tpm")]
#[argh(switch)]
/// enable a software emulated trusted platform module device
pub software_tpm: bool,
#[cfg(feature = "audio")]
#[argh(option, arg_name = "PATH")]
/// path to the VioS server socket for setting up virtio-snd devices
pub sound: Option<PathBuf>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(switch)]
/// (EXPERIMENTAL) enable split-irqchip support
pub split_irqchip: bool,
#[argh(switch)]
/// don't allow guest to use pages from the balloon
pub strict_balloon: bool,
#[argh(
option,
long = "stub-pci-device",
arg_name = "DOMAIN:BUS:DEVICE.FUNCTION[,vendor=NUM][,device=NUM][,class=NUM][,subsystem_vendor=NUM][,subsystem_device=NUM][,revision=NUM]",
from_str_fn(parse_stub_pci_parameters)
)]
/// comma-separated key=value pairs for setting up a stub PCI
/// device that just enumerates. The first option in the list
/// must specify a PCI address to claim.
/// Optional further parameters
/// vendor=NUM - PCI vendor ID
/// device=NUM - PCI device ID
/// class=NUM - PCI class (including class code, subclass,
/// and programming interface)
/// subsystem_vendor=NUM - PCI subsystem vendor ID
/// subsystem_device=NUM - PCI subsystem device ID
/// revision=NUM - revision
pub stub_pci_devices: Vec<StubPciParameters>,
#[argh(option, arg_name = "N")]
/// (EXPERIMENTAL) Size of virtio swiotlb buffer in MiB (default: 64 if `--protected-vm` or `--protected-vm-without-firmware` is present)
pub swiotlb: Option<u64>,
#[argh(option, arg_name = "TAG")]
/// when logging to syslog, use the provided tag
pub syslog_tag: Option<String>,
#[cfg(unix)]
#[argh(option)]
/// file descriptor for configured tap device. A different virtual network card will be added each time this argument is given
pub tap_fd: Vec<RawDescriptor>,
#[cfg(unix)]
#[argh(option)]
/// name of a configured persistent TAP interface to use for networking. A different virtual network card will be added each time this argument is given
pub tap_name: Vec<String>,
#[cfg(target_os = "android")]
#[argh(option, arg_name = "NAME[,...]")]
/// comma-separated names of the task profiles to apply to all threads in crosvm including the vCPU threads
pub task_profiles: Vec<String>,
// Must be `Some` iff `protection_type == ProtectionType::UnprotectedWithFirmware`.
#[argh(option, long = "unprotected-vm-with-firmware", arg_name = "PATH")]
/// (EXPERIMENTAL/FOR DEBUGGING) Use VM firmware, but allow host access to guest memory
pub unprotected_vm_with_firmware: Option<PathBuf>,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[argh(
option,
arg_name = "INDEX,type=TYPE,action=ACTION,[from=FROM],[filter=FILTER]",
from_str_fn(parse_userspace_msr_options)
)]
/// userspace MSR handling. Takes INDEX of the MSR and how they
/// are handled.
/// type=(r|w|rw|wr) - read/write permission control.
/// action=(pass|emu) - if the control of msr is effective
/// on host.
/// from=(cpu0) - source of msr value. if not set, the
/// source is running CPU.
/// filter=(yes|no) - if the msr is filtered in KVM.
pub userspace_msr: Vec<(u32, MsrConfig)>,
#[argh(
option,
long = "cpu-affinity",
arg_name = "CPUSET",
from_str_fn(parse_cpu_affinity)
)]
/// comma-separated list of CPUs or CPU ranges to run VCPUs on (e.g. 0,1-3,5)
/// or colon-separated list of assignments of guest to host CPU assignments (e.g. 0=0:1=1:2=2) (default: no mask)
pub vcpu_affinity: Option<VcpuAffinity>,
#[argh(option, arg_name = "PATH")]
/// move all vCPU threads to this CGroup (default: nothing moves)
pub vcpu_cgroup_path: Option<PathBuf>,
#[argh(option, long = "cpus", short = 'c')]
/// number of VCPUs. (default: 1)
pub vcpu_count: Option<usize>,
#[cfg(unix)]
#[argh(
option,
arg_name = "PATH[,guest-address=auto|<BUS:DEVICE.FUNCTION>][,iommu=on|off]",
from_str_fn(parse_vfio)
)]
/// path to sysfs of PCI pass through or mdev device.
/// guest-address=auto|<BUS:DEVICE.FUNCTION> - PCI address
/// that the device will be assigned in the guest
/// (default: auto). When set to "auto", the device will
/// be assigned an address that mirrors its address in
/// the host.
/// iommu=on|off - indicates whether to enable virtio IOMMU
/// for this device
pub vfio: Vec<VfioCommand>,
#[cfg(unix)]
#[argh(switch)]
/// isolate all hotplugged passthrough vfio device behind virtio-iommu
pub vfio_isolate_hotplug: bool,
#[cfg(unix)]
#[argh(option, arg_name = "PATH", from_str_fn(parse_vfio_platform))]
/// path to sysfs of platform pass through
pub vfio_platform: Vec<VfioCommand>,
#[argh(switch)]
/// use vhost for networking
pub vhost_net: bool,
#[cfg(unix)]
#[argh(option, long = "vhost-net-device", arg_name = "PATH")]
/// path to the vhost-net device. (default /dev/vhost-net)
pub vhost_net_device_path: Option<PathBuf>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user block
pub vhost_user_blk: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user console
pub vhost_user_console: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH:TAG")]
/// path to a socket path for vhost-user fs, and tag for the shared dir
pub vhost_user_fs: Vec<VhostUserFsOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// paths to a vhost-user socket for gpu
pub vhost_user_gpu: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user mac80211_hwsim
pub vhost_user_mac80211_hwsim: Option<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user net
pub vhost_user_net: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user snd
pub vhost_user_snd: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user video decoder
pub vhost_user_video_decoder: Option<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a socket for vhost-user vsock
pub vhost_user_vsock: Vec<VhostUserOption>,
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to a vhost-user socket for wayland
pub vhost_user_wl: Option<VhostUserWlOption>,
#[cfg(unix)]
#[argh(option, arg_name = "SOCKET_PATH")]
/// path to the vhost-vsock device. (default /dev/vhost-vsock)
pub vhost_vsock_device: Option<PathBuf>,
#[cfg(unix)]
#[argh(option, arg_name = "FD")]
/// open FD to the vhost-vsock device, mutually exclusive with vhost-vsock-device
pub vhost_vsock_fd: Option<RawDescriptor>,
#[cfg(feature = "video-decoder")]
#[argh(option, long = "video-decoder", arg_name = "[backend]")]
/// (EXPERIMENTAL) enable virtio-video decoder device
/// Possible backend values: libvda, ffmpeg, vaapi
pub video_dec: Option<VideoDeviceConfig>,
#[cfg(feature = "video-encoder")]
#[argh(option, long = "video-encoder", arg_name = "[backend]")]
/// (EXPERIMENTAL) enable virtio-video encoder device
/// Possible backend values: libvda
pub video_enc: Option<VideoDeviceConfig>,
#[argh(option, long = "evdev", arg_name = "PATH")]
/// path to an event device node. The device will be grabbed (unusable from the host) and made available to the guest with the same configuration it shows on the host
pub virtio_input_evdevs: Vec<PathBuf>,
#[argh(option, long = "keyboard", arg_name = "PATH")]
/// path to a socket from where to read keyboard input events and write status updates to
pub virtio_keyboard: Vec<PathBuf>,
#[argh(option, long = "mouse", arg_name = "PATH")]
/// path to a socket from where to read mouse input events and write status updates to
pub virtio_mice: Vec<PathBuf>,
#[argh(option, long = "multi-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read multi touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_multi_touch: Vec<TouchDeviceOption>,
#[argh(option, long = "single-touch", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read single touch input events (such as those from a touchscreen) and write status updates to, optionally followed by width and height (defaults to 800x1280)
pub virtio_single_touch: Vec<TouchDeviceOption>,
#[cfg(feature = "audio")]
#[argh(
option,
arg_name = "[capture=true,backend=BACKEND,num_output_devices=1,
num_input_devices=1,num_output_streams=1,num_input_streams=1]",
long = "virtio-snd"
)]
/// comma separated key=value pairs for setting up virtio snd
/// devices.
/// Possible key values:
/// capture=(false,true) - Disable/enable audio capture.
/// Default is false.
/// backend=(null,[cras]) - Which backend to use for
/// virtio-snd.
/// client_type=(crosvm,arcvm,borealis) - Set specific
/// client type for cras backend. Default is crosvm.
/// socket_type=(legacy,unified) Set specific socket type
/// for cras backend. Default is unified.
/// num_output_devices=INT - Set number of output PCM
/// devices.
/// num_input_devices=INT - Set number of input PCM devices.
/// num_output_streams=INT - Set number of output PCM
/// streams per device.
/// num_input_streams=INT - Set number of input PCM streams
/// per device.
pub virtio_snds: Vec<SndParameters>,
#[argh(option, long = "switches", arg_name = "PATH")]
/// path to a socket from where to read switch input events and write status updates to
pub virtio_switches: Vec<PathBuf>,
#[argh(option, long = "trackpad", arg_name = "PATH:WIDTH:HEIGHT")]
/// path to a socket from where to read trackpad input events and write status updates to, optionally followed by screen width and height (defaults to 800x1280)
pub virtio_trackpad: Vec<TouchDeviceOption>,
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
#[argh(switch)]
/// enable the virtio-tpm connection to vtpm daemon
pub vtpm_proxy: bool,
#[argh(
option,
arg_name = "SOCKET_PATH[,addr=DOMAIN:BUS:DEVICE.FUNCTION,uuid=UUID]"
)]
/// socket path for the Virtio Vhost User proxy device.
/// Parameters
/// addr=BUS:DEVICE.FUNCTION - PCI address that the proxy
/// device will be allocated
/// (default: automatically allocated)
/// uuid=UUID - UUID which will be stored in VVU PCI config
/// space that is readable from guest userspace
pub vvu_proxy: Vec<VvuOption>,
#[cfg(unix)]
#[argh(
option,
long = "wayland-sock",
arg_name = "PATH[,name=NAME]",
from_str_fn(parse_wayland_sock)
)]
/// path to the Wayland socket to use. The unnamed one is used for displaying virtual screens. Named ones are only for IPC
pub wayland_socket_paths: Vec<(String, PathBuf)>,
#[argh(option, arg_name = "DISPLAY")]
/// X11 display name to use
pub x_display: Option<String>,
}
impl TryFrom<RunCommand> for super::config::Config {
type Error = String;
fn try_from(cmd: RunCommand) -> Result<Self, Self::Error> {
let mut cfg = Self::default();
// TODO: we need to factor out some(?) of the checks into config::validate_config
// Process arguments
if let Some(p) = cmd.executable_path {
cfg.executable_path = Some(Executable::Kernel(p));
}
#[cfg(unix)]
if let Some(p) = cmd.kvm_device_path {
cfg.kvm_device_path = p;
}
#[cfg(unix)]
if let Some(p) = cmd.vhost_net_device_path {
if !p.exists() {
return Err(format!("vhost-net-device path {:?} does not exist", p));
}
cfg.vhost_net_device_path = p;
}
cfg.android_fstab = cmd.android_fstab;
cfg.params.extend(cmd.params);
cfg.per_vm_core_scheduling = cmd.per_vm_core_scheduling;
cfg.vcpu_count = cmd.vcpu_count;
cfg.vcpu_affinity = cmd.vcpu_affinity;
cfg.cpu_clusters = cmd.cpu_clusters;
if let Some(capacity) = cmd.cpu_capacity {
cfg.cpu_capacity = capacity;
}
cfg.vcpu_cgroup_path = cmd.vcpu_cgroup_path;
cfg.no_smt = cmd.no_smt;
if let Some(rt_cpus) = cmd.rt_cpus {
cfg.rt_cpus = rt_cpus;
}
cfg.delay_rt = cmd.delay_rt;
cfg.memory = cmd.memory;
#[cfg(target_arch = "aarch64")]
{
if cmd.mte && !(cmd.pmem_devices.is_empty() && cmd.rw_pmem_devices.is_empty()) {
return Err(
"--mte cannot be specified together with --pmem-device or --rw-pmem-device"
.to_string(),
);
}
cfg.mte = cmd.mte;
cfg.swiotlb = cmd.swiotlb;
}
cfg.hugepages = cmd.hugepages;
cfg.hypervisor = cmd.hypervisor;
#[cfg(unix)]
{
cfg.lock_guest_memory = cmd.lock_guest_memory;
}
#[cfg(feature = "audio")]
{
cfg.ac97_parameters = cmd.ac97;
cfg.sound = cmd.sound;
}
cfg.vhost_user_snd = cmd.vhost_user_snd;
for serial_params in cmd.serial_parameters {
super::sys::config::check_serial_params(&serial_params)?;
let num = serial_params.num;
let key = (serial_params.hardware, num);
if cfg.serial_parameters.contains_key(&key) {
return Err(format!(
"serial hardware {} num {}",
serial_params.hardware, num,
));
}
if serial_params.console {
for params in cfg.serial_parameters.values() {
if params.console {
return Err(format!(
"{} device {} already set as console",
params.hardware, params.num,
));
}
}
}
if serial_params.earlycon {
// Only SerialHardware::Serial supports earlycon= currently.
match serial_params.hardware {
SerialHardware::Serial => {}
_ => {
return Err(super::config::invalid_value_err(
serial_params.hardware.to_string(),
String::from("earlycon not supported for hardware"),
));
}
}
for params in cfg.serial_parameters.values() {
if params.earlycon {
return Err(format!(
"{} device {} already set as earlycon",
params.hardware, params.num,
));
}
}
}
if serial_params.stdin {
if let Some(previous_stdin) = cfg.serial_parameters.values().find(|sp| sp.stdin) {
return Err(format!(
"{} device {} already connected to standard input",
previous_stdin.hardware, previous_stdin.num,
));
}
}
cfg.serial_parameters.insert(key, serial_params);
}
if cmd.root.is_some() && cmd.rwroot.is_some() {
return Err("Only one of [root,rwroot] has to be specified".to_string());
}
let root_disk = if let Some((read_only, (index, mut disk_option))) = cmd
.root
.map(|d| (true, d))
.or(cmd.rwroot.map(|d| (false, d)))
{
if index >= 26 {
return Err("ran out of letters for to assign to root disk".to_string());
}
disk_option.read_only = read_only;
cfg.params.push(format!(
"root=/dev/vd{} {}",
char::from(b'a' + index as u8),
if read_only { "ro" } else { "rw" }
));
Some((index, disk_option))
} else {
None
};
let mut disks = root_disk
.into_iter()
.chain(cmd.disks.into_iter().map(|(i, mut d)| {
d.read_only = true;
(i, d)
}))
.chain(cmd.rwdisks.into_iter().map(|(i, mut d)| {
d.read_only = false;
(i, d)
}))
.collect::<Vec<_>>();
disks.sort_by_key(|(i, _)| *i);
cfg.disks = disks.into_iter().map(|(_, d)| d).collect();
for (mut pmem, read_only) in cmd
.pmem_devices
.into_iter()
.map(|p| (p, true))
.chain(cmd.rw_pmem_devices.into_iter().map(|p| (p, false)))
{
pmem.read_only = read_only;
cfg.pmem_devices.push(pmem);
}
#[cfg(windows)]
{
#[cfg(feature = "crash-report")]
{
cfg.crash_pipe_name = cmd.crash_pipe_name;
}
cfg.product_name = cmd.product_name;
cfg.exit_stats = cmd.exit_stats;
cfg.host_guid = cmd.host_guid;
cfg.irq_chip = cmd.irq_chip;
cfg.kernel_log_file = cmd.kernel_log_file;
cfg.log_file = cmd.log_file;
cfg.logs_directory = cmd.logs_directory;
#[cfg(feature = "process-invariants")]
{
cfg.process_invariants_data_handle = cmd.process_invariants_data_handle;
cfg.process_invariants_data_size = cmd.process_invariants_data_size;
}
cfg.pvclock = cmd.pvclock;
#[cfg(feature = "kiwi")]
{
cfg.service_pipe_name = cmd.service_pipe_name;
}
#[cfg(feature = "slirp-ring-capture")]
{
cfg.slirp_capture_file = cmd.slirp_capture_file;
}
cfg.syslog_tag = cmd.syslog_tag;
cfg.product_channel = cmd.product_channel;
cfg.product_version = cmd.product_version;
}
cfg.pstore = cmd.pstore;
#[cfg(unix)]
for (name, params) in cmd.wayland_socket_paths {
if cfg.wayland_socket_paths.contains_key(&name) {
return Err(format!("wayland socket name already used: '{}'", name));
}
cfg.wayland_socket_paths.insert(name, params);
}
cfg.x_display = cmd.x_display;
cfg.display_window_keyboard = cmd.display_window_keyboard;
cfg.display_window_mouse = cmd.display_window_mouse;
if let Some(mut socket_path) = cmd.socket_path {
if socket_path.is_dir() {
socket_path.push(format!("crosvm-{}.sock", getpid()));
}
cfg.socket_path = Some(socket_path);
}
cfg.balloon_control = cmd.balloon_control;
cfg.cid = cmd.cid;
#[cfg(feature = "plugin")]
{
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
if let Some(p) = cmd.plugin {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Plugin(p));
}
cfg.plugin_root = cmd.plugin_root;
cfg.plugin_mounts = cmd.plugin_mounts;
if let Some(path) = cmd.plugin_mount_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-mount-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let mount = parse_plugin_mount_option(trimmed_line)?;
cfg.plugin_mounts.push(mount);
}
}
}
cfg.plugin_gid_maps = cmd.plugin_gid_maps;
if let Some(path) = cmd.plugin_gid_map_file {
let file = File::open(path)
.map_err(|_| String::from("unable to open `plugin-gid-map-file` file"))?;
let reader = BufReader::new(file);
for l in reader.lines() {
let line = l.unwrap();
let trimmed_line = line.split_once('#').map_or(&*line, |x| x.0).trim();
if !trimmed_line.is_empty() {
let map = trimmed_line.parse()?;
cfg.plugin_gid_maps.push(map);
}
}
}
}
cfg.vhost_net = cmd.vhost_net;
#[cfg(feature = "tpm")]
{
cfg.software_tpm = cmd.software_tpm;
}
#[cfg(all(feature = "tpm", feature = "chromeos", target_arch = "x86_64"))]
{
cfg.vtpm_proxy = cmd.vtpm_proxy;
}
cfg.virtio_single_touch = cmd.virtio_single_touch;
cfg.virtio_multi_touch = cmd.virtio_multi_touch;
cfg.virtio_trackpad = cmd.virtio_trackpad;
cfg.virtio_mice = cmd.virtio_mice;
cfg.virtio_keyboard = cmd.virtio_keyboard;
cfg.virtio_switches = cmd.virtio_switches;
cfg.virtio_input_evdevs = cmd.virtio_input_evdevs;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.split_irqchip = cmd.split_irqchip;
}
cfg.initrd_path = cmd.initrd_path;
if let Some(p) = cmd.bios {
if cfg.executable_path.is_some() {
return Err(format!(
"A VM executable was already specified: {:?}",
cfg.executable_path
));
}
cfg.executable_path = Some(Executable::Bios(p));
}
cfg.pflash_parameters = cmd.pflash_parameters;
#[cfg(feature = "video-decoder")]
{
cfg.video_dec = cmd.video_dec;
}
#[cfg(feature = "video-encoder")]
{
cfg.video_enc = cmd.video_enc;
}
cfg.acpi_tables = cmd.acpi_tables;
cfg.usb = !cmd.no_usb;
cfg.rng = !cmd.no_rng;
cfg.balloon = !cmd.no_balloon;
cfg.balloon_page_reporting = cmd.balloon_page_reporting;
#[cfg(feature = "audio")]
{
cfg.virtio_snds = cmd.virtio_snds;
}
#[cfg(feature = "gpu")]
{
cfg.gpu_parameters = cmd.gpu_params;
}
#[cfg(unix)]
{
if cmd.vhost_vsock_device.is_some() && cmd.vhost_vsock_fd.is_some() {
return Err(
"Only one of vhost-vsock-device vhost-vsock-fd has to be specified".to_string(),
);
}
cfg.vhost_vsock_device = cmd.vhost_vsock_device;
if let Some(fd) = cmd.vhost_vsock_fd {
cfg.vhost_vsock_device = Some(PathBuf::from(format!("/proc/self/fd/{}", fd)));
}
cfg.shared_dirs = cmd.shared_dirs;
cfg.host_ip = cmd.host_ip;
cfg.netmask = cmd.netmask;
cfg.mac_address = cmd.mac_address;
cfg.tap_name = cmd.tap_name;
cfg.tap_fd = cmd.tap_fd;
cfg.coiommu_param = cmd.coiommu;
#[cfg(all(feature = "gpu", feature = "virgl_renderer_next"))]
{
cfg.gpu_render_server_parameters = cmd.gpu_render_server;
}
if let Some(d) = cmd.seccomp_policy_dir {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_policy_dir = Some(d);
}
if cmd.seccomp_log_failures {
cfg.jail_config
.get_or_insert_with(Default::default)
.seccomp_log_failures = true;
}
if let Some(p) = cmd.pivot_root {
cfg.jail_config
.get_or_insert_with(Default::default)
.pivot_root = p;
}
#[cfg(feature = "gpu")]
{
if !cmd.gpu_display.is_empty() {
cfg.gpu_parameters
.get_or_insert_with(Default::default)
.display_params
.extend(cmd.gpu_display);
}
}
cfg.net_vq_pairs = cmd.net_vq_pairs;
}
let protection_flags = [
cmd.protected_vm,
cmd.protected_vm_without_firmware,
cmd.unprotected_vm_with_firmware.is_some(),
];
if protection_flags.into_iter().filter(|b| *b).count() > 1 {
return Err("Only one protection mode has to be specified".to_string());
}
cfg.protection_type = if cmd.protected_vm {
ProtectionType::Protected
} else if cmd.protected_vm_without_firmware {
ProtectionType::ProtectedWithoutFirmware
} else if let Some(p) = cmd.unprotected_vm_with_firmware {
if !p.exists() || !p.is_file() {
return Err(
"unprotected-vm-with-firmware path should be an existing file".to_string(),
);
}
cfg.pvm_fw = Some(p);
ProtectionType::UnprotectedWithFirmware
} else {
ProtectionType::Unprotected
};
if !matches!(cfg.protection_type, ProtectionType::Unprotected) {
// Balloon and USB devices only work for unprotected VMs.
cfg.balloon = false;
cfg.usb = false;
// Protected VMs can't trust the RNG device, so don't provide it.
cfg.rng = false;
}
cfg.battery_config = cmd.battery;
#[cfg(feature = "gdb")]
{
cfg.gdb = cmd.gdb;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
cfg.enable_hwp = cmd.enable_hwp;
cfg.host_cpu_topology = cmd.host_cpu_topology;
cfg.force_s2idle = cmd.s2idle;
cfg.pcie_ecam = cmd.pcie_ecam;
cfg.pci_low_start = cmd.pci_low_start;
cfg.no_i8042 = cmd.no_i8042;
cfg.no_rtc = cmd.no_rtc;
cfg.oem_strings = cmd.oem_strings;
if !cfg.oem_strings.is_empty() && cfg.dmi_path.is_some() {
return Err("unable to use oem-strings and dmi-path together".to_string());
}
for (index, msr_config) in cmd.userspace_msr {
if cfg.userspace_msr.insert(index, msr_config).is_some() {
return Err(String::from("msr must be unique"));
}
}
}
// cfg.balloon_bias is in bytes.
if let Some(b) = cmd.balloon_bias {
cfg.balloon_bias = b * 1024 * 1024;
}
cfg.vhost_user_blk = cmd.vhost_user_blk;
cfg.vhost_user_console = cmd.vhost_user_console;
cfg.vhost_user_fs = cmd.vhost_user_fs;
cfg.vhost_user_gpu = cmd.vhost_user_gpu;
cfg.vhost_user_mac80211_hwsim = cmd.vhost_user_mac80211_hwsim;
cfg.vhost_user_net = cmd.vhost_user_net;
cfg.vhost_user_video_dec = cmd.vhost_user_video_decoder;
cfg.vhost_user_vsock = cmd.vhost_user_vsock;
cfg.vhost_user_wl = cmd.vhost_user_wl;
#[cfg(feature = "direct")]
{
cfg.direct_pmio = cmd.direct_pmio;
cfg.direct_mmio = cmd.direct_mmio;
cfg.direct_level_irq = cmd.direct_level_irq;
cfg.direct_edge_irq = cmd.direct_edge_irq;
cfg.direct_gpe = cmd.direct_gpe;
cfg.direct_fixed_evts = cmd.direct_fixed_evts;
cfg.pcie_rp = cmd.pcie_rp;
cfg.mmio_address_ranges = cmd.mmio_address_ranges.unwrap_or_default();
}
cfg.disable_virtio_intx = cmd.disable_virtio_intx;
cfg.dmi_path = cmd.dmi_path;
cfg.itmt = cmd.itmt;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if cmd.enable_pnp_data && cmd.force_calibrated_tsc_leaf {
return Err(
"Only one of [enable_pnp_data,force_calibrated_tsc_leaf] can be specified"
.to_string(),
);
}
cfg.enable_pnp_data = cmd.enable_pnp_data;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
cfg.force_calibrated_tsc_leaf = cmd.force_calibrated_tsc_leaf;
}
cfg.privileged_vm = cmd.privileged_vm;
cfg.stub_pci_devices = cmd.stub_pci_devices;
cfg.vvu_proxy = cmd.vvu_proxy;
cfg.file_backed_mappings = cmd.file_backed_mappings;
cfg.init_memory = cmd.init_memory;
cfg.strict_balloon = cmd.strict_balloon;
#[cfg(target_os = "android")]
{
cfg.task_profiles = cmd.task_profiles;
}
#[cfg(unix)]
{
cfg.vfio.extend(cmd.vfio);
cfg.vfio.extend(cmd.vfio_platform);
cfg.vfio_isolate_hotplug = cmd.vfio_isolate_hotplug;
}
// `--disable-sandbox` has the effect of disabling sandboxing altogether, so make sure
// to handle it after other sandboxing options since they implicitly enable it.
if cmd.disable_sandbox {
cfg.jail_config = None;
}
// Now do validation of constructed config
super::config::validate_config(&mut cfg)?;
Ok(cfg)
}
}
|
use errors::wallet::WalletError;
use services::wallet::WalletService;
use std::rc::Rc;
pub enum WalletCommand {
Create(String, // pool name
String, // wallet name
Option<String>, // wallet type
Option<String>, // wallet config
Option<String>, // wallet credentials
Box<Fn(Result<(), WalletError>) + Send>),
Open(String, // wallet name
Option<String>, // wallet runtime config
Option<String>, // wallet credentials
Box<Fn(Result<i32, WalletError>) + Send>),
Close(i32, // handle
Box<Fn(Result<(), WalletError>) + Send>),
Delete(String, // name
Option<String>, // wallet credentials
Box<Fn(Result<(), WalletError>) + Send>),
SetSeqNoForValue(i32, // wallet handle
String, // wallet key
i32, // sequence number
Box<Fn(Result<(), WalletError>) + Send>)
}
pub struct WalletCommandExecutor {
wallet_service: Rc<WalletService>
}
impl WalletCommandExecutor {
pub fn new(wallet_service: Rc<WalletService>) -> WalletCommandExecutor {
WalletCommandExecutor {
wallet_service: wallet_service
}
}
pub fn execute(&self, command: WalletCommand) {
match command {
WalletCommand::Create(pool_name, name, xtype, config, credentials, cb) => {
info!(target: "wallet_command_executor", "Create command received");
self.create(&pool_name, &name, xtype.as_ref().map(String::as_str),
config.as_ref().map(String::as_str), credentials.as_ref().map(String::as_str), cb);
}
WalletCommand::Open(name, runtime_config, credentials, cb) => {
info!(target: "wallet_command_executor", "Open command received");
self.open(&name, runtime_config.as_ref().map(String::as_str),
credentials.as_ref().map(String::as_str), cb);
}
WalletCommand::Close(handle, cb) => {
info!(target: "wallet_command_executor", "Close command received");
self.close(handle, cb);
}
WalletCommand::Delete(name, credentials, cb) => {
info!(target: "wallet_command_executor", "Delete command received");
self.delete(&name, credentials.as_ref().map(String::as_str), cb);
}
WalletCommand::SetSeqNoForValue(handle, key, seq_no, cb) => {
info!(target: "wallet_command_executor", "SetSeqNoForValue command received");
self.set_seq_no_for_value(handle, &key, seq_no, cb);
}
};
}
fn create(&self,
pool_name: &str,
name: &str,
xtype: Option<&str>,
config: Option<&str>,
credentials: Option<&str>,
cb: Box<Fn(Result<(), WalletError>) + Send>) {
cb(self.wallet_service.create(pool_name, xtype, name, config, credentials));
}
fn open(&self,
name: &str,
runtime_config: Option<&str>,
credentials: Option<&str>,
cb: Box<Fn(Result<i32, WalletError>) + Send>) {
cb(self.wallet_service.open(name, runtime_config, credentials));
}
fn close(&self,
handle: i32,
cb: Box<Fn(Result<(), WalletError>) + Send>) {
cb(self.wallet_service.close(handle));
}
fn delete(&self,
handle: &str,
credentials: Option<&str>,
cb: Box<Fn(Result<(), WalletError>) + Send>) {
cb(self.wallet_service.delete(handle, credentials));
}
fn set_seq_no_for_value(&self,
handle: i32,
key: &str,
seq_no: i32,
cb: Box<Fn(Result<(), WalletError>) + Send>) {
cb(self.wallet_service.set(handle, &format!("seq_no::{}", seq_no), key));
}
}
wallet refactored
use errors::sovrin::SovrinError;
use services::wallet::WalletService;
use std::rc::Rc;
pub enum WalletCommand {
Create(String, // pool name
String, // wallet name
Option<String>, // wallet type
Option<String>, // wallet config
Option<String>, // wallet credentials
Box<Fn(Result<(), SovrinError>) + Send>),
Open(String, // wallet name
Option<String>, // wallet runtime config
Option<String>, // wallet credentials
Box<Fn(Result<i32, SovrinError>) + Send>),
Close(i32, // handle
Box<Fn(Result<(), SovrinError>) + Send>),
Delete(String, // name
Option<String>, // wallet credentials
Box<Fn(Result<(), SovrinError>) + Send>),
SetSeqNoForValue(i32, // wallet handle
String, // wallet key
i32, // sequence number
Box<Fn(Result<(), SovrinError>) + Send>)
}
pub struct WalletCommandExecutor {
wallet_service: Rc<WalletService>
}
impl WalletCommandExecutor {
pub fn new(wallet_service: Rc<WalletService>) -> WalletCommandExecutor {
WalletCommandExecutor {
wallet_service: wallet_service
}
}
pub fn execute(&self, command: WalletCommand) {
match command {
WalletCommand::Create(pool_name, name, xtype, config, credentials, cb) => {
info!(target: "wallet_command_executor", "Create command received");
self.create(&pool_name, &name, xtype.as_ref().map(String::as_str),
config.as_ref().map(String::as_str), credentials.as_ref().map(String::as_str), cb);
}
WalletCommand::Open(name, runtime_config, credentials, cb) => {
info!(target: "wallet_command_executor", "Open command received");
self.open(&name, runtime_config.as_ref().map(String::as_str),
credentials.as_ref().map(String::as_str), cb);
}
WalletCommand::Close(handle, cb) => {
info!(target: "wallet_command_executor", "Close command received");
self.close(handle, cb);
}
WalletCommand::Delete(name, credentials, cb) => {
info!(target: "wallet_command_executor", "Delete command received");
self.delete(&name, credentials.as_ref().map(String::as_str), cb);
}
WalletCommand::SetSeqNoForValue(handle, key, seq_no, cb) => {
info!(target: "wallet_command_executor", "SetSeqNoForValue command received");
self.set_seq_no_for_value(handle, &key, seq_no, cb);
}
};
}
fn create(&self,
pool_name: &str,
name: &str,
xtype: Option<&str>,
config: Option<&str>,
credentials: Option<&str>,
cb: Box<Fn(Result<(), SovrinError>) + Send>) {
cb(self.wallet_service.create(pool_name, xtype, name, config, credentials)
.map_err(|err| SovrinError::WalletError(err)));
}
fn open(&self,
name: &str,
runtime_config: Option<&str>,
credentials: Option<&str>,
cb: Box<Fn(Result<i32, SovrinError>) + Send>) {
cb(self.wallet_service.open(name, runtime_config, credentials)
.map_err(|err| SovrinError::WalletError(err)));
}
fn close(&self,
handle: i32,
cb: Box<Fn(Result<(), SovrinError>) + Send>) {
cb(self.wallet_service.close(handle)
.map_err(|err| SovrinError::WalletError(err)));
}
fn delete(&self,
handle: &str,
credentials: Option<&str>,
cb: Box<Fn(Result<(), SovrinError>) + Send>) {
cb(self.wallet_service.delete(handle, credentials)
.map_err(|err| SovrinError::WalletError(err)));
}
fn set_seq_no_for_value(&self,
handle: i32,
key: &str,
seq_no: i32,
cb: Box<Fn(Result<(), SovrinError>) + Send>) {
cb(self.wallet_service.set(handle, &format!("seq_no::{}", seq_no), key)
.map_err(|err| SovrinError::WalletError(err)));
}
} |
// notty is a new kind of terminal emulator.
// Copyright (C) 2015 without boats
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::mem;
use datatypes::{Area, Coords, Direction, Region, move_within};
use datatypes::Area::*;
use datatypes::Direction::*;
use datatypes::Movement::To;
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct CoordsIter {
point: Coords,
back_point: Coords,
region: Region,
dir: Direction,
fin: bool,
}
impl CoordsIter {
pub fn from_area(area: Area, cursor: Coords, screen: Region, tab: u32) -> CoordsIter {
match area {
CursorCell => CoordsIter {
point: cursor,
back_point: cursor,
region: screen,
dir: Right,
fin: false
},
CursorRow => CoordsIter {
point: Coords {x: screen.left, y: cursor.y},
back_point: Coords {x: screen.right - 1, y: cursor.y},
region: screen,
dir: Right,
fin: false,
},
CursorColumn => CoordsIter {
point: Coords {x: cursor.x, y: screen.top},
back_point: Coords {x: cursor.x, y: screen.bottom - 1},
region: screen,
dir: Down,
fin: false,
},
CursorTo(mov) => CoordsIter {
point: cursor,
back_point: move_within(cursor, mov, screen, tab),
region: screen,
dir: mov.direction(cursor),
fin: false,
},
CursorBound(coords) if coords == cursor => {
CoordsIter::from_area(CursorCell, cursor, screen, tab)
}
CursorBound(coords) => {
CoordsIter::from_region(Region::new(cursor.x, cursor.y, coords.x, coords.y))
}
WholeScreen => CoordsIter::from_region(screen),
Bound(region) => CoordsIter::from_region(region),
Rows(top, bottom) => CoordsIter {
point: Coords {x: screen.left, y: top },
back_point: Coords {x: screen.right - 1, y: bottom - 1},
region: screen,
dir: Right,
fin: !(top < bottom),
},
Columns(left, right) => CoordsIter {
point: Coords {x: left, y: screen.top},
back_point: Coords {x: right - 1, y: screen.bottom - 1},
region: screen,
dir: Down,
fin: !(left < right),
},
BelowCursor(true) => {
CoordsIter::from_region(Region { top: cursor.y, ..screen})
}
BelowCursor(false) if cursor.y == screen.bottom - 1 => {
CoordsIter {
point: cursor,
back_point: cursor,
region: screen,
dir: Right,
fin: true,
}
}
BelowCursor(false) => {
CoordsIter::from_region(Region { top: cursor.y + 1, ..screen })
}
}
}
pub fn from_region(region: Region) -> CoordsIter {
CoordsIter {
point: Coords {x: region.left, y: region.top},
back_point: Coords {x: region.right-1, y: region.bottom-1},
region: region,
dir: Right,
fin: false,
}
}
}
impl Iterator for CoordsIter {
type Item = Coords;
fn next(&mut self) -> Option<Coords> {
match (self.point == self.back_point, self.fin) {
(_, true) => None,
(true, _) => {
self.fin = true;
Some(self.point)
}
(false, _) => {
let point = move_within(self.point, To(self.dir, 1, true), self.region, 0);
Some(mem::replace(&mut self.point, point))
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl DoubleEndedIterator for CoordsIter {
fn next_back(&mut self) -> Option<Coords> {
match (self.point == self.back_point, self.fin) {
(_, true) => None,
(true, _) => {
self.fin = true;
Some(self.point)
}
(false, _) => {
let point = move_within(self.back_point, To(self.dir.rev(), 1, true), self.region,
0);
Some(mem::replace(&mut self.back_point, point))
}
}
}
}
impl ExactSizeIterator for CoordsIter {
fn len(&self) -> usize {
match self.dir {
Up if self.point.x == self.back_point.x => {
(self.point.y - self.back_point.y + 1) as usize
}
Up => {
let height = self.region.bottom - self.region.top;
let first = self.point.y - self.region.top + 1;
let mid = (self.point.x - self.back_point.x).saturating_sub(1) * height;
let last = self.region.bottom - self.back_point.y;
(first + mid + last) as usize
}
Down if self.point.x == self.back_point.x => {
(self.back_point.y - self.point.y + 1) as usize
}
Down => {
let height = self.region.bottom - self.region.top;
let first = self.region.bottom - self.point.y;
let mid = (self.back_point.x - self.point.x).saturating_sub(1) * height;
let last = self.back_point.y - self.region.top + 1;
(first + mid + last) as usize
}
Left if self.point.y == self.back_point.y => {
(self.point.x - self.back_point.x + 1) as usize
}
Left => {
let width = self.region.right - self.region.left;
let first = self.point.x - self.region.left + 1;
let mid = (self.point.y - self.back_point.y).saturating_sub(1) * width;
let last = self.region.right - self.back_point.x;
(first + mid + last) as usize
}
Right if self.point.y == self.back_point.y => {
(self.back_point.x - self.point.x + 1) as usize
}
Right => {
let width = self.region.right - self.region.left;
let first = self.region.right - self.point.x;
let mid = (self.back_point.y - self.point.y).saturating_sub(1) * width;
let last = self.back_point.x - self.region.left + 1;
(first + mid + last) as usize
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use datatypes::{Coords, Region};
use datatypes::Direction::*;
macro_rules! iter {
($x1:expr, $y1:expr, $x2:expr, $y2:expr, $l:expr, $t: expr, $r:expr, $b:expr, $d:expr) =>
(CoordsIter {
point: Coords {x: $x1, y: $y1},
back_point: Coords {x: $x2, y: $y2},
region: Region {left: $l, top: $t, right: $r, bottom: $b},
dir: $d,
fin: false,
});
}
macro_rules! array {
[$(($x:expr,$y:expr)),*] => [
&[$(Coords {x: $x, y: $y }),*]
];
}
static TEST_CASES: &'static [(CoordsIter, &'static [Coords])] = &[
(iter!(1,1, 0,0, 0,0,2,2, Up), array![(1,1), (1,0), (0,1), (0,0)]),
(iter!(0,1, 0,0, 0,0,2,2, Up), array![(0,1), (0,0)]),
(iter!(1,0, 0,1, 0,0,2,2, Up), array![(1,0), (0,1)]),
(iter!(0,0, 1,1, 0,0,2,2, Down), array![(0,0), (0,1), (1,0), (1,1)]),
(iter!(0,0, 0,1, 0,0,2,2, Down), array![(0,0), (0,1)]),
(iter!(0,1, 1,0, 0,0,2,2, Down), array![(0,1), (1,0)]),
(iter!(1,1, 0,0, 0,0,2,2, Left), array![(1,1), (0,1), (1,0), (0,0)]),
(iter!(1,0, 0,0, 0,0,2,2, Left), array![(1,0), (0,0)]),
(iter!(0,1, 1,0, 0,0,2,2, Left), array![(0,1), (1,0)]),
(iter!(0,0, 1,1, 0,0,2,2, Right), array![(0,0), (1,0), (0,1), (1,1)]),
(iter!(0,0, 1,0, 0,0,2,2, Right), array![(0,0), (1,0)]),
(iter!(1,0, 0,1, 0,0,2,2, Right), array![(1,0), (0,1)]),
];
#[test]
fn forward_iteration() {
for &(iter, array) in TEST_CASES {
assert_eq!(iter.collect::<Vec<_>>(), array.iter().cloned().collect::<Vec<_>>());
}
}
#[test]
fn backward_iteration() {
for &(iter, array) in TEST_CASES {
assert_eq!(iter.rev().collect::<Vec<_>>(),
array.iter().cloned().rev().collect::<Vec<_>>());
}
}
#[test]
fn length() {
for &(iter, array) in TEST_CASES {
assert!(iter.len() == array.len(), "{:?}; {} != {}", iter, iter.len(), array.len());
}
}
}
Fix off-by-one error.
// notty is a new kind of terminal emulator.
// Copyright (C) 2015 without boats
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::cmp;
use std::mem;
use datatypes::{Area, Coords, Direction, Region, move_within};
use datatypes::Area::*;
use datatypes::Direction::*;
use datatypes::Movement::To;
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct CoordsIter {
point: Coords,
back_point: Coords,
region: Region,
dir: Direction,
fin: bool,
}
impl CoordsIter {
pub fn from_area(area: Area, cursor: Coords, screen: Region, tab: u32) -> CoordsIter {
match area {
CursorCell => CoordsIter {
point: cursor,
back_point: cursor,
region: screen,
dir: Right,
fin: false
},
CursorRow => CoordsIter {
point: Coords {x: screen.left, y: cursor.y},
back_point: Coords {x: screen.right - 1, y: cursor.y},
region: screen,
dir: Right,
fin: false,
},
CursorColumn => CoordsIter {
point: Coords {x: cursor.x, y: screen.top},
back_point: Coords {x: cursor.x, y: screen.bottom - 1},
region: screen,
dir: Down,
fin: false,
},
CursorTo(mov) => CoordsIter {
point: cursor,
back_point: move_within(cursor, mov, screen, tab),
region: screen,
dir: mov.direction(cursor),
fin: false,
},
CursorBound(coords) if coords == cursor => {
CoordsIter::from_area(CursorCell, cursor, screen, tab)
}
CursorBound(coords) => {
let (l, r) = (cmp::min(coords.x, cursor.x), cmp::max(coords.x, cursor.x) + 1);
let (t, b) = (cmp::min(coords.y, cursor.y), cmp::max(coords.y, cursor.y) + 1);
CoordsIter::from_region(Region::new(l, t, r, b))
}
WholeScreen => CoordsIter::from_region(screen),
Bound(region) => CoordsIter::from_region(region),
Rows(top, bottom) => CoordsIter {
point: Coords {x: screen.left, y: top },
back_point: Coords {x: screen.right - 1, y: bottom - 1},
region: screen,
dir: Right,
fin: !(top < bottom),
},
Columns(left, right) => CoordsIter {
point: Coords {x: left, y: screen.top},
back_point: Coords {x: right - 1, y: screen.bottom - 1},
region: screen,
dir: Down,
fin: !(left < right),
},
BelowCursor(true) => {
CoordsIter::from_region(Region { top: cursor.y, ..screen})
}
BelowCursor(false) if cursor.y == screen.bottom - 1 => {
CoordsIter {
point: cursor,
back_point: cursor,
region: screen,
dir: Right,
fin: true,
}
}
BelowCursor(false) => {
CoordsIter::from_region(Region { top: cursor.y + 1, ..screen })
}
}
}
pub fn from_region(region: Region) -> CoordsIter {
CoordsIter {
point: Coords {x: region.left, y: region.top},
back_point: Coords {x: region.right-1, y: region.bottom-1},
region: region,
dir: Right,
fin: false,
}
}
}
impl Iterator for CoordsIter {
type Item = Coords;
fn next(&mut self) -> Option<Coords> {
match (self.point == self.back_point, self.fin) {
(_, true) => None,
(true, _) => {
self.fin = true;
Some(self.point)
}
(false, _) => {
let point = move_within(self.point, To(self.dir, 1, true), self.region, 0);
Some(mem::replace(&mut self.point, point))
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl DoubleEndedIterator for CoordsIter {
fn next_back(&mut self) -> Option<Coords> {
match (self.point == self.back_point, self.fin) {
(_, true) => None,
(true, _) => {
self.fin = true;
Some(self.point)
}
(false, _) => {
let point = move_within(self.back_point, To(self.dir.rev(), 1, true), self.region,
0);
Some(mem::replace(&mut self.back_point, point))
}
}
}
}
impl ExactSizeIterator for CoordsIter {
fn len(&self) -> usize {
match self.dir {
Up if self.point.x == self.back_point.x => {
(self.point.y - self.back_point.y + 1) as usize
}
Up => {
let height = self.region.bottom - self.region.top;
let first = self.point.y - self.region.top + 1;
let mid = (self.point.x - self.back_point.x).saturating_sub(1) * height;
let last = self.region.bottom - self.back_point.y;
(first + mid + last) as usize
}
Down if self.point.x == self.back_point.x => {
(self.back_point.y - self.point.y + 1) as usize
}
Down => {
let height = self.region.bottom - self.region.top;
let first = self.region.bottom - self.point.y;
let mid = (self.back_point.x - self.point.x).saturating_sub(1) * height;
let last = self.back_point.y - self.region.top + 1;
(first + mid + last) as usize
}
Left if self.point.y == self.back_point.y => {
(self.point.x - self.back_point.x + 1) as usize
}
Left => {
let width = self.region.right - self.region.left;
let first = self.point.x - self.region.left + 1;
let mid = (self.point.y - self.back_point.y).saturating_sub(1) * width;
let last = self.region.right - self.back_point.x;
(first + mid + last) as usize
}
Right if self.point.y == self.back_point.y => {
(self.back_point.x - self.point.x + 1) as usize
}
Right => {
let width = self.region.right - self.region.left;
let first = self.region.right - self.point.x;
let mid = (self.back_point.y - self.point.y).saturating_sub(1) * width;
let last = self.back_point.x - self.region.left + 1;
(first + mid + last) as usize
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use datatypes::{Coords, Region};
use datatypes::Direction::*;
macro_rules! iter {
($x1:expr, $y1:expr, $x2:expr, $y2:expr, $l:expr, $t: expr, $r:expr, $b:expr, $d:expr) =>
(CoordsIter {
point: Coords {x: $x1, y: $y1},
back_point: Coords {x: $x2, y: $y2},
region: Region {left: $l, top: $t, right: $r, bottom: $b},
dir: $d,
fin: false,
});
}
macro_rules! array {
[$(($x:expr,$y:expr)),*] => [
&[$(Coords {x: $x, y: $y }),*]
];
}
static TEST_CASES: &'static [(CoordsIter, &'static [Coords])] = &[
(iter!(1,1, 0,0, 0,0,2,2, Up), array![(1,1), (1,0), (0,1), (0,0)]),
(iter!(0,1, 0,0, 0,0,2,2, Up), array![(0,1), (0,0)]),
(iter!(1,0, 0,1, 0,0,2,2, Up), array![(1,0), (0,1)]),
(iter!(0,0, 1,1, 0,0,2,2, Down), array![(0,0), (0,1), (1,0), (1,1)]),
(iter!(0,0, 0,1, 0,0,2,2, Down), array![(0,0), (0,1)]),
(iter!(0,1, 1,0, 0,0,2,2, Down), array![(0,1), (1,0)]),
(iter!(1,1, 0,0, 0,0,2,2, Left), array![(1,1), (0,1), (1,0), (0,0)]),
(iter!(1,0, 0,0, 0,0,2,2, Left), array![(1,0), (0,0)]),
(iter!(0,1, 1,0, 0,0,2,2, Left), array![(0,1), (1,0)]),
(iter!(0,0, 1,1, 0,0,2,2, Right), array![(0,0), (1,0), (0,1), (1,1)]),
(iter!(0,0, 1,0, 0,0,2,2, Right), array![(0,0), (1,0)]),
(iter!(1,0, 0,1, 0,0,2,2, Right), array![(1,0), (0,1)]),
];
#[test]
fn forward_iteration() {
for &(iter, array) in TEST_CASES {
assert_eq!(iter.collect::<Vec<_>>(), array.iter().cloned().collect::<Vec<_>>());
}
}
#[test]
fn backward_iteration() {
for &(iter, array) in TEST_CASES {
assert_eq!(iter.rev().collect::<Vec<_>>(),
array.iter().cloned().rev().collect::<Vec<_>>());
}
}
#[test]
fn length() {
for &(iter, array) in TEST_CASES {
assert!(iter.len() == array.len(), "{:?}; {} != {}", iter, iter.len(), array.len());
}
}
}
|
use bitmap_font::BitmapFont;
use line_renderer::LineRenderer;
use text_renderer::TextRenderer;
use gfx;
use gfx::traits::*;
use gfx_texture;
use image;
pub enum DebugRendererError {
ShaderProgramError(gfx::ProgramError),
BitmapFontTextureError,
}
impl From<gfx::ProgramError> for DebugRendererError {
fn from(err: gfx::ProgramError) -> DebugRendererError {
DebugRendererError::ShaderProgramError(err)
}
}
pub struct DebugRenderer<R: gfx::Resources> {
line_renderer: LineRenderer<R>,
text_renderer: TextRenderer<R>,
}
impl<R: gfx::Resources> DebugRenderer<R> {
pub fn from_canvas<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
O: gfx::render::target::Output<R>,
D: Device<Resources = R, CommandBuffer = C>,
> (
canvas: &mut gfx::Canvas<O, D, F>,
initial_buffer_size: usize,
bitmap_font: Option<BitmapFont>,
bitmap_font_texture: Option<gfx::TextureHandle<R>>,
) -> Result<DebugRenderer<R>, DebugRendererError> {
let (w, h) = canvas.output.get_size();
DebugRenderer::new(&canvas.device, &mut canvas.factory,
[w as u32, h as u32], initial_buffer_size,
bitmap_font, bitmap_font_texture)
}
pub fn new<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
D: Device<Resources = R, CommandBuffer = C>,
> (
device: &D,
factory: &mut F,
frame_size: [u32; 2],
initial_buffer_size: usize,
bitmap_font: Option<BitmapFont>,
bitmap_font_texture: Option<gfx::TextureHandle<R>>,
) -> Result<DebugRenderer<R>, DebugRendererError> {
let device_capabilities = device.get_capabilities();
let bitmap_font = match bitmap_font {
Some(f) => f,
None => BitmapFont::from_string(include_str!("../assets/notosans.fnt")).unwrap()
};
let bitmap_font_texture = match bitmap_font_texture {
Some(t) => t,
None => {
if let image::DynamicImage::ImageRgba8(rgba_image) = image::load_from_memory_with_format(include_bytes!("../assets/notosans.png"), image::ImageFormat::PNG).unwrap() {
gfx_texture::Texture::from_image(factory, &rgba_image, false, false, false).handle()
} else {
return Err(DebugRendererError::BitmapFontTextureError)
}
}
};
let line_renderer = try!(LineRenderer::new(*device_capabilities, factory, initial_buffer_size));
let text_renderer = try!(TextRenderer::new(*device_capabilities, factory, frame_size, initial_buffer_size, bitmap_font, bitmap_font_texture));
Ok(DebugRenderer {
line_renderer: line_renderer,
text_renderer: text_renderer,
})
}
pub fn draw_line(&mut self, start: [f32; 3], end: [f32; 3], color: [f32; 4]) {
self.line_renderer.draw_line(start, end, color);
}
pub fn draw_text_on_screen (
&mut self,
text: &str,
screen_position: [i32; 2],
color: [f32; 4],
) {
self.text_renderer.draw_text_on_screen(text, screen_position, color);
}
pub fn draw_text_at_position (
&mut self,
text: &str,
world_position: [f32; 3],
color: [f32; 4],
) {
self.text_renderer.draw_text_at_position(text, world_position, color);
}
pub fn render_canvas<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
O: gfx::render::target::Output<R>,
D: Device<Resources = R, CommandBuffer = C>,
> (
&mut self,
canvas: &mut gfx::Canvas<O, D, F>,
projection: [[f32; 4]; 4],
) {
self.render(&mut canvas.renderer, &mut canvas.factory, &canvas.output, projection);
}
pub fn render<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
O: gfx::render::target::Output<R>,
> (
&mut self,
renderer: &mut gfx::Renderer<R, C>,
factory: &mut F,
output: &O,
projection: [[f32; 4]; 4],
) {
self.line_renderer.render(renderer, factory, output, projection);
self.text_renderer.render(renderer, factory, output, projection);
}
}
Auto-derive debug impl for DebugRendererError
use bitmap_font::BitmapFont;
use line_renderer::LineRenderer;
use text_renderer::TextRenderer;
use gfx;
use gfx::traits::*;
use gfx_texture;
use image;
#[derive(Debug)]
pub enum DebugRendererError {
ShaderProgramError(gfx::ProgramError),
BitmapFontTextureError,
}
impl From<gfx::ProgramError> for DebugRendererError {
fn from(err: gfx::ProgramError) -> DebugRendererError {
DebugRendererError::ShaderProgramError(err)
}
}
pub struct DebugRenderer<R: gfx::Resources> {
line_renderer: LineRenderer<R>,
text_renderer: TextRenderer<R>,
}
impl<R: gfx::Resources> DebugRenderer<R> {
pub fn from_canvas<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
O: gfx::render::target::Output<R>,
D: Device<Resources = R, CommandBuffer = C>,
> (
canvas: &mut gfx::Canvas<O, D, F>,
initial_buffer_size: usize,
bitmap_font: Option<BitmapFont>,
bitmap_font_texture: Option<gfx::TextureHandle<R>>,
) -> Result<DebugRenderer<R>, DebugRendererError> {
let (w, h) = canvas.output.get_size();
DebugRenderer::new(&canvas.device, &mut canvas.factory,
[w as u32, h as u32], initial_buffer_size,
bitmap_font, bitmap_font_texture)
}
pub fn new<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
D: Device<Resources = R, CommandBuffer = C>,
> (
device: &D,
factory: &mut F,
frame_size: [u32; 2],
initial_buffer_size: usize,
bitmap_font: Option<BitmapFont>,
bitmap_font_texture: Option<gfx::TextureHandle<R>>,
) -> Result<DebugRenderer<R>, DebugRendererError> {
let device_capabilities = device.get_capabilities();
let bitmap_font = match bitmap_font {
Some(f) => f,
None => BitmapFont::from_string(include_str!("../assets/notosans.fnt")).unwrap()
};
let bitmap_font_texture = match bitmap_font_texture {
Some(t) => t,
None => {
if let image::DynamicImage::ImageRgba8(rgba_image) = image::load_from_memory_with_format(include_bytes!("../assets/notosans.png"), image::ImageFormat::PNG).unwrap() {
gfx_texture::Texture::from_image(factory, &rgba_image, false, false, false).handle()
} else {
return Err(DebugRendererError::BitmapFontTextureError)
}
}
};
let line_renderer = try!(LineRenderer::new(*device_capabilities, factory, initial_buffer_size));
let text_renderer = try!(TextRenderer::new(*device_capabilities, factory, frame_size, initial_buffer_size, bitmap_font, bitmap_font_texture));
Ok(DebugRenderer {
line_renderer: line_renderer,
text_renderer: text_renderer,
})
}
pub fn draw_line(&mut self, start: [f32; 3], end: [f32; 3], color: [f32; 4]) {
self.line_renderer.draw_line(start, end, color);
}
pub fn draw_text_on_screen (
&mut self,
text: &str,
screen_position: [i32; 2],
color: [f32; 4],
) {
self.text_renderer.draw_text_on_screen(text, screen_position, color);
}
pub fn draw_text_at_position (
&mut self,
text: &str,
world_position: [f32; 3],
color: [f32; 4],
) {
self.text_renderer.draw_text_at_position(text, world_position, color);
}
pub fn render_canvas<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
O: gfx::render::target::Output<R>,
D: Device<Resources = R, CommandBuffer = C>,
> (
&mut self,
canvas: &mut gfx::Canvas<O, D, F>,
projection: [[f32; 4]; 4],
) {
self.render(&mut canvas.renderer, &mut canvas.factory, &canvas.output, projection);
}
pub fn render<
C: gfx::CommandBuffer<R>,
F: Factory<R>,
O: gfx::render::target::Output<R>,
> (
&mut self,
renderer: &mut gfx::Renderer<R, C>,
factory: &mut F,
output: &O,
projection: [[f32; 4]; 4],
) {
self.line_renderer.render(renderer, factory, output, projection);
self.text_renderer.render(renderer, factory, output, projection);
}
}
|
use metric::{Measured, Line, Grapheme, Metric};
use super::{NodeLink, LeafRepr };
use self::Value::*;
use std::cell::Cell;
use std::convert;
use std::default::Default;
use std::fmt;
use std::ops;
/// A lazily-evaluated field
#[derive(Clone)]
struct Lazy<T: Copy>(Cell<Option<T>>);
impl<T> Lazy<T>
where T: Copy {
/// Get the value of the field if it has been computed
///
/// # Returns
/// - `Some(T)` if the value of this field has been computed
/// - `None` if the field has yet to be computed
#[inline]
pub fn get(&self) -> Option<T> { self.0.get() }
/// Get the value of the field, or compute it
///
/// # Arguments
/// - `f`: a function returning type `T` to use to calculate the value of
/// the field if it has not already been calculated
///
/// # Returns
/// - If the field has already been evaluated, the value of the field.
/// - If the field has not been evaluated, the value of `f`
#[inline]
pub fn get_or_else<F>(&self, f: F) -> T
where F: FnOnce() -> T {
if let Some(value) = self.0.get() {
value
} else {
let value = f();
self.0.set(Some(value));
value
}
}
#[inline]
pub fn new() -> Self {
Lazy(Cell::new(None))
}
}
impl<T> Default for Lazy<T>
where T: Copy {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for Lazy<T>
where T: fmt::Debug
, T: Copy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0.get() { Some(value) => value.fmt(f)
, None => write!(f, "?")
}
}
}
macro_rules! lazy_field {
($method: ident, $field: ident, $ty:ty) => {
#[inline] fn $method(&self) -> $ty {
self.$field.get_or_else(|| { self.value.$method() })
}
}
}
/// A `Node`.
#[derive(Clone, Debug, Default)]
pub struct Node { len: Lazy<usize>
, weight: Lazy<usize>
, line_count: Lazy<Line>
, line_weight: Lazy<Line>
, grapheme_count: Lazy<Grapheme>
, grapheme_weight: Lazy<Grapheme>
, pub value: Value
}
impl Node {
pub fn new(value: Value) -> Self {
Node { value: value, ..Default::default() }
}
pub fn spanning(&self, i: usize, span_len: usize) -> (&Node, usize)
where Node: Measured<usize> {
assert!(self.len() >= span_len);
match **self {
Branch { ref right, ref left } if <Node as Measured<usize>>::measure_weight(self) < i => {
// if this node is a branch, and the weight is less than the
// index, where the span begins, then the first index of the
// span is on the right side
let span_i = or_zero!(i, left.len());
assert!(or_zero!(right.len(), span_i) >= span_len);
right.spanning(span_i, span_len)
}
, Branch { ref left, .. }
// if the left child is long enough to contain the entire span,
// walk to the left child
if or_zero!(left.len(), i) >= span_len => left.spanning(i, span_len)
, Leaf(_) | Branch {..} =>
// if this function has walked as far as a leaf node,
// then that leaf must be the spanning node. return it;
//
// otherwise, if the node is a branch node and the span is longer
// than the left child, then this node must be the minimum
// spanning node
(self, i)
}
}
}
impl fmt::Display for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.strings()
.fold(Ok(()), |r, string| r.and_then(|_| write!(f, "{}", string)))
}
}
impl convert::Into<NodeLink> for Node {
#[inline] fn into(self) -> NodeLink {
NodeLink::new(self)
}
}
impl ops::Deref for Node {
type Target = Value;
fn deref(&self) -> &Value { &self.value }
}
impl Measured<usize> for Node {
#[inline] fn to_byte_index(&self, index: usize) -> Option<usize> {
Some(index)
}
lazy_field!(measure, len, usize);
lazy_field!(measure_weight, weight, usize);
}
impl Measured<Grapheme> for Node {
#[inline] fn to_byte_index(&self, index: Grapheme) -> Option<usize> {
self.value.to_byte_index(index)
}
lazy_field!(measure, grapheme_count, Grapheme);
lazy_field!(measure_weight, grapheme_weight, Grapheme);
}
impl Measured<Line> for Node {
#[inline] fn to_byte_index(&self, index: Line) -> Option<usize> {
self.value.to_byte_index(index)
}
lazy_field!(measure, line_count, Line);
lazy_field!(measure_weight, line_weight, Line);
}
impl<M> ops::Index<M> for Node
where M: Metric
, Node: Measured<M>
, LeafRepr: Measured<M>
{
type Output = str;
fn index(&self, i: M) -> &str {
let len = self.measure();
assert!( i < len
, "Node::index: index {:?} out of bounds (length {:?})", i, len);
match **self {
Leaf(ref string) => {
let idx = string.to_byte_index(i)
.expect("index out of bounds!");
&string[idx..idx+1]
}
, Branch { ref right, .. } if len < i =>
&right[i - len]
, Branch { ref left, .. } => &left[i]
}
}
}
/// A `Node` in the `Rope`'s tree.
///
/// A `Node` is either a `Leaf` holding a `String`, or a
/// a `Branch` concatenating together two `Node`s.
#[derive(Clone, Debug)]
pub enum Value {
/// A leaf node
Leaf(LeafRepr)
, /// A branch concatenating together `l`eft and `r`ight nodes.
Branch { /// The left branch node
left: NodeLink
, /// The right branch node
right: NodeLink }
}
impl Value {
#[inline]
pub fn new_branch(left: NodeLink, right: NodeLink) -> Self {
Branch { left: left, right: right }
}
}
impl<M> Measured<M> for Value
where M: Metric
, LeafRepr: Measured<M>
, Node: Measured<M>
{
fn to_byte_index(&self, index: M) -> Option<usize> {
unimplemented!()
}
fn measure(&self) -> M {
match *self {
Leaf(ref r) => r.measure()
, Branch { ref left, ref right } =>
left.measure() + right.measure()
}
}
fn measure_weight(&self) -> M {
match *self {
Leaf(ref r) => r.measure_weight()
, Branch { ref left, ref right } =>
left.measure()
}
}
}
impl convert::Into<Node> for Value {
#[inline] fn into(self) -> Node {
Node::new(self)
}
}
impl Default for Value {
fn default() -> Self {
Leaf(LeafRepr::default())
}
}
fix(rope internals): make node fmt::Debug implementations less wordy
use metric::{Measured, Line, Grapheme, Metric};
use super::{NodeLink, LeafRepr };
use self::Value::*;
use std::cell::Cell;
use std::convert;
use std::default::Default;
use std::fmt;
use std::ops;
/// A lazily-evaluated field
#[derive(Clone)]
struct Lazy<T: Copy>(Cell<Option<T>>);
impl<T> Lazy<T>
where T: Copy {
/// Get the value of the field if it has been computed
///
/// # Returns
/// - `Some(T)` if the value of this field has been computed
/// - `None` if the field has yet to be computed
#[inline]
pub fn get(&self) -> Option<T> { self.0.get() }
/// Get the value of the field, or compute it
///
/// # Arguments
/// - `f`: a function returning type `T` to use to calculate the value of
/// the field if it has not already been calculated
///
/// # Returns
/// - If the field has already been evaluated, the value of the field.
/// - If the field has not been evaluated, the value of `f`
#[inline]
pub fn get_or_else<F>(&self, f: F) -> T
where F: FnOnce() -> T {
if let Some(value) = self.0.get() {
value
} else {
let value = f();
self.0.set(Some(value));
value
}
}
#[inline]
pub fn new() -> Self {
Lazy(Cell::new(None))
}
}
impl<T> Default for Lazy<T>
where T: Copy {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for Lazy<T>
where T: fmt::Debug
, T: Copy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0.get() { Some(value) => value.fmt(f)
, None => write!(f, "?")
}
}
}
macro_rules! lazy_field {
($method: ident, $field: ident, $ty:ty) => {
#[inline] fn $method(&self) -> $ty {
self.$field.get_or_else(|| { self.value.$method() })
}
}
}
/// A `Node`.
#[derive(Clone, Default)]
pub struct Node { len: Lazy<usize>
, weight: Lazy<usize>
, line_count: Lazy<Line>
, line_weight: Lazy<Line>
, grapheme_count: Lazy<Grapheme>
, grapheme_weight: Lazy<Grapheme>
, pub value: Value
}
impl Node {
pub fn new(value: Value) -> Self {
Node { value: value, ..Default::default() }
}
pub fn spanning(&self, i: usize, span_len: usize) -> (&Node, usize)
where Node: Measured<usize> {
assert!(self.len() >= span_len);
match **self {
Branch { ref right, ref left } if < Node as Measured<usize>>::measure_weight(self) < i => {
// if this node is a branch, and the weight is less than the
// index, where the span begins, then the first index of the
// span is on the right side
let span_i = or_zero!(i, left.len());
assert!(or_zero!(right.len(), span_i) >= span_len);
right.spanning(span_i, span_len)
}
, Branch { ref left, .. }
// if the left child is long enough to contain the entire span,
// walk to the left child
if or_zero!(left.len(), i) >= span_len => left.spanning(i, span_len)
, Leaf(_) | Branch {..} =>
// if this function has walked as far as a leaf node,
// then that leaf must be the spanning node. return it;
//
// otherwise, if the node is a branch node and the span is longer
// than the left child, then this node must be the minimum
// spanning node
(self, i)
}
}
}
impl fmt::Display for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.strings()
.fold(Ok(()), |r, string| r.and_then(|_| write!(f, "{}", string)))
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!( f, "{:?}{}{}{}{}{}{}"
, self.value
, self.len.get().map(|l| format!(" length: {:?},", l))
.unwrap_or_else(|| { String::new() })
, self.weight.get().map(|w| format!(" weight: {:?},", w))
.unwrap_or_else(|| { String::new() })
, self.grapheme_count.get().map(|w| format!("length: {:?},", w))
.unwrap_or_else(|| { String::new() })
, self.grapheme_weight.get().map(|w| format!("weight: {:?},", w))
.unwrap_or_else(|| { String::new() })
, self.line_count.get().map(|w| format!("length: {:?},", w))
.unwrap_or_else(|| { String::new() })
, self.line_weight.get().map(|w| format!("weight: {:?},", w))
.unwrap_or_else(|| { String::new() })
)
}
}
impl convert::Into<NodeLink> for Node {
#[inline] fn into(self) -> NodeLink {
NodeLink::new(self)
}
}
impl ops::Deref for Node {
type Target = Value;
fn deref(&self) -> &Value { &self.value }
}
impl Measured<usize> for Node {
#[inline] fn to_byte_index(&self, index: usize) -> Option<usize> {
Some(index)
}
lazy_field!(measure, len, usize);
lazy_field!(measure_weight, weight, usize);
}
impl Measured<Grapheme> for Node {
#[inline] fn to_byte_index(&self, index: Grapheme) -> Option<usize> {
self.value.to_byte_index(index)
}
lazy_field!(measure, grapheme_count, Grapheme);
lazy_field!(measure_weight, grapheme_weight, Grapheme);
}
impl Measured<Line> for Node {
#[inline] fn to_byte_index(&self, index: Line) -> Option<usize> {
self.value.to_byte_index(index)
}
lazy_field!(measure, line_count, Line);
lazy_field!(measure_weight, line_weight, Line);
}
impl<M> ops::Index<M> for Node
where M: Metric
, Node: Measured<M>
, LeafRepr: Measured<M>
{
type Output = str;
fn index(&self, i: M) -> &str {
let len = self.measure();
assert!( i < len
, "Node::index: index {:?} out of bounds (length {:?})", i, len);
match **self {
Leaf(ref string) => {
let idx = string.to_byte_index(i)
.expect("index out of bounds!");
&string[idx..idx+1]
}
, Branch { ref right, .. } if len < i =>
&right[i - len]
, Branch { ref left, .. } => &left[i]
}
}
}
/// A `Node` in the `Rope`'s tree.
///
/// A `Node` is either a `Leaf` holding a `String`, or a
/// a `Branch` concatenating together two `Node`s.
#[derive(Clone)]
pub enum Value {
/// A leaf node
Leaf(LeafRepr)
, /// A branch concatenating together `l`eft and `r`ight nodes.
Branch { /// The left branch node
left: NodeLink
, /// The right branch node
right: NodeLink }
}
impl Value {
#[inline]
pub fn new_branch(left: NodeLink, right: NodeLink) -> Self {
Branch { left: left, right: right }
}
}
impl<M> Measured<M> for Value
where M: Metric
, LeafRepr: Measured<M>
, Node: Measured<M>
{
fn to_byte_index(&self, index: M) -> Option<usize> {
unimplemented!()
}
fn measure(&self) -> M {
match *self {
Leaf(ref r) => r.measure()
, Branch { ref left, ref right } =>
left.measure() + right.measure()
}
}
fn measure_weight(&self) -> M {
match *self {
Leaf(ref r) => r.measure_weight()
, Branch { ref left, ref right } =>
left.measure()
}
}
}
impl convert::Into<Node> for Value {
#[inline] fn into(self) -> Node {
Node::new(self)
}
}
impl Default for Value {
fn default() -> Self {
Leaf(LeafRepr::default())
}
}
impl fmt::Debug for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Leaf(ref r) => write!(f, "Leaf({:?})", r)
, Branch { ref left, ref right } => write!( f, "Branch({:?}, {:?})"
, left
, right)
}
}
}
|
use super::*;
use std::str::FromStr;
use std::ops::Deref;
use lsp::request::Request;
use lsp::notification::Notification;
use vim::*;
pub trait ILanguageClient {
fn get<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&State) -> Result<T>;
fn update<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&mut State) -> Result<T>;
fn loop_message<T: BufRead>(&self, input: T, languageId: Option<String>) -> Result<()>;
fn handle_message(&self, languageId: Option<String>, message: String) -> Result<()>;
fn write(&self, languageId: Option<&str>, message: &str) -> Result<()>;
fn output(&self, languageId: Option<&str>, id: Id, result: Result<Value>) -> Result<()>;
fn call<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<Value>;
fn notify<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<()>;
// Utils.
fn gather_args<E: VimExp + std::fmt::Debug, T: DeserializeOwned>(
&self,
exps: &[E],
params: &Option<Params>,
) -> Result<T>;
fn sync_settings(&self) -> Result<()>;
fn define_signs(&self) -> Result<()>;
fn apply_WorkspaceEdit(&self, edit: &WorkspaceEdit, params: &Option<Params>) -> Result<()>;
fn apply_TextEdits<P: AsRef<Path>>(&self, path: P, edits: &[TextEdit]) -> Result<()>;
fn display_diagnostics(&self, filename: &str, diagnostics: &[Diagnostic]) -> Result<()>;
fn display_locations(&self, locations: &[Location], languageId: &str) -> Result<()>;
fn registerCMSource(&self, languageId: &str, result: &Value) -> Result<()>;
fn get_line<P: AsRef<Path>>(&self, path: P, line: u64) -> Result<String>;
fn try_handle_command_by_client(&self, cmd: &Command) -> Result<bool>;
fn cleanup(&self, languageId: &str) -> Result<()>;
fn initialize(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_hover(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_definition(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_rename(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_documentSymbol(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_codeAction(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_completion(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_signatureHelp(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_references(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_formatting(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_rangeFormatting(&self, params: &Option<Params>) -> Result<Value>;
fn completionItem_resolve(&self, params: &Option<Params>) -> Result<Value>;
fn workspace_symbol(&self, params: &Option<Params>) -> Result<Value>;
fn workspace_executeCommand(&self, params: &Option<Params>) -> Result<Value>;
fn workspace_applyEdit(&self, params: &Option<Params>) -> Result<Value>;
fn rustDocument_implementations(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_didOpen(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_didChange(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_didSave(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_didClose(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_publishDiagnostics(&self, params: &Option<Params>) -> Result<()>;
fn window_logMessage(&self, params: &Option<Params>) -> Result<()>;
fn exit(&self, params: &Option<Params>) -> Result<()>;
// Extensions.
fn languageClient_getState(&self, &Option<Params>) -> Result<Value>;
fn languageClient_isAlive(&self, &Option<Params>) -> Result<Value>;
fn languageClient_startServer(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_registerServerCommands(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_setLoggingLevel(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_omniComplete(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_handleBufReadPost(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleTextChanged(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleBufWritePost(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleBufDelete(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleCursorMoved(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_FZFSinkLocation(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_FZFSinkCommand(&self, params: &Option<Params>) -> Result<()>;
fn NCM_refresh(&self, params: &Option<Params>) -> Result<()>;
// Extensions by languge servers.
fn language_status(&self, params: &Option<Params>) -> Result<()>;
fn rust_handleBeginBuild(&self, params: &Option<Params>) -> Result<()>;
fn rust_handleDiagnosticsBegin(&self, params: &Option<Params>) -> Result<()>;
fn rust_handleDiagnosticsEnd(&self, params: &Option<Params>) -> Result<()>;
fn cquery_handleProgress(&self, params: &Option<Params>) -> Result<()>;
}
impl ILanguageClient for Arc<Mutex<State>> {
fn get<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&State) -> Result<T>,
{
let state = self.lock()
.or_else(|_| Err(err_msg("Failed to lock state")))?;
f(&state)
}
fn update<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&mut State) -> Result<T>,
{
use log::LogLevel;
let mut state = self.lock()
.or_else(|_| Err(err_msg("Failed to lock state")))?;
let before = if log_enabled!(LogLevel::Debug) {
let s = serde_json::to_string(state.deref())?;
serde_json::from_str(&s)?
} else {
Value::default()
};
let result = f(&mut state);
let after = if log_enabled!(LogLevel::Debug) {
let s = serde_json::to_string(state.deref())?;
serde_json::from_str(&s)?
} else {
Value::default()
};
for (k, (v1, v2)) in diff_value(&before, &after, "state") {
debug!("{}: {} ==> {}", k, v1, v2);
}
result
}
fn loop_message<T: BufRead>(&self, input: T, languageId: Option<String>) -> Result<()> {
// Count how many consequent empty lines.
let mut count_empty_lines = 0;
let mut input = input;
let mut content_length = 0;
loop {
let mut message = String::new();
let mut line = String::new();
if let Some(languageId) = languageId.clone() {
input.read_line(&mut line)?;
line = line.strip();
if line.is_empty() {
count_empty_lines += 1;
if count_empty_lines > 5 {
if let Err(err) = self.cleanup(&languageId) {
error!("Error when cleanup: {:?}", err);
}
let mut message = format!("Language server ({}) exited unexpectedly!", languageId);
match get_log_server() {
Ok(log_server) => {
message += "\n\nlanguage server stderr:\n";
message += &log_server;
}
Err(err) => error!("Error when get_log_server: {:?}", err),
}
if let Err(err) = self.echoerr(&message) {
error!("Error in echoerr: {:?}", err);
};
bail!("{}", message);
}
let mut buf = vec![0; content_length];
input.read_exact(buf.as_mut_slice())?;
message = String::from_utf8(buf)?;
} else {
count_empty_lines = 0;
if !line.starts_with("Content-Length") {
continue;
}
let tokens: Vec<&str> = line.splitn(2, ':').collect();
let len = tokens
.get(1)
.ok_or_else(|| format_err!("Failed to get length! tokens: {:?}", tokens))?
.trim();
content_length = usize::from_str(len)?;
}
} else if input.read_line(&mut message)? == 0 {
break;
}
message = message.strip();
if message.is_empty() {
continue;
}
info!("<= {}", message);
let state = Arc::clone(self);
let languageId_clone = languageId.clone();
let spawn_result = std::thread::Builder::new()
.name(format!(
"Handler-{}",
languageId.clone().unwrap_or_else(|| "main".to_owned())
))
.spawn(move || {
if let Err(err) = state.handle_message(languageId_clone, message.clone()) {
if err.downcast_ref::<LCError>().is_none() {
error!(
"Error handling message. Message: {}. Error: {:?}",
message, err
);
}
}
});
if let Err(err) = spawn_result {
error!("Failed to spawn handler: {:?}", err);
}
}
Ok(())
}
/// Handle an incoming message.
fn handle_message(&self, languageId: Option<String>, message: String) -> Result<()> {
if let Ok(output) = serde_json::from_str::<Output>(&message) {
let tx = self.update(|state| {
state
.txs
.remove(&output.id().to_int()?)
.ok_or_else(|| format_err!("Failed to get channel sender! id: {:?}", output.id()))
})?;
let result = match output {
Output::Success(success) => Ok(success.result),
Output::Failure(failure) => Err(format_err!("{}", failure.error.message)),
};
tx.send(result)?;
return Ok(());
}
// FIXME
let message = message.replace(r#","meta":{}"#, "");
let call = serde_json::from_str(&message)?;
match call {
Call::MethodCall(method_call) => {
let result: Result<Value> = match method_call.method.as_str() {
lsp::request::HoverRequest::METHOD => self.textDocument_hover(&method_call.params),
lsp::request::GotoDefinition::METHOD => self.textDocument_definition(&method_call.params),
lsp::request::Rename::METHOD => self.textDocument_rename(&method_call.params),
lsp::request::DocumentSymbol::METHOD => self.textDocument_documentSymbol(&method_call.params),
lsp::request::WorkspaceSymbol::METHOD => self.workspace_symbol(&method_call.params),
lsp::request::CodeActionRequest::METHOD => self.textDocument_codeAction(&method_call.params),
lsp::request::Completion::METHOD => self.textDocument_completion(&method_call.params),
lsp::request::SignatureHelpRequest::METHOD => self.textDocument_signatureHelp(&method_call.params),
lsp::request::References::METHOD => self.textDocument_references(&method_call.params),
lsp::request::Formatting::METHOD => self.textDocument_formatting(&method_call.params),
lsp::request::RangeFormatting::METHOD => self.textDocument_rangeFormatting(&method_call.params),
lsp::request::ResolveCompletionItem::METHOD => self.completionItem_resolve(&method_call.params),
lsp::request::ExecuteCommand::METHOD => self.workspace_executeCommand(&method_call.params),
lsp::request::ApplyWorkspaceEdit::METHOD => self.workspace_applyEdit(&method_call.params),
REQUEST__RustImplementations => self.rustDocument_implementations(&method_call.params),
// Extensions.
REQUEST__GetState => self.languageClient_getState(&method_call.params),
REQUEST__IsAlive => self.languageClient_isAlive(&method_call.params),
REQUEST__StartServer => self.languageClient_startServer(&method_call.params),
REQUEST__RegisterServerCommands => self.languageClient_registerServerCommands(&method_call.params),
REQUEST__SetLoggingLevel => self.languageClient_setLoggingLevel(&method_call.params),
REQUEST__OmniComplete => self.languageClient_omniComplete(&method_call.params),
_ => Err(format_err!("Unknown method call: {}", method_call.method)),
};
if let Err(err) = result.as_ref() {
if err.downcast_ref::<LCError>().is_none() {
error!(
"Error handling message. Message: {}. Error: {:?}",
message, result
);
}
}
self.output(
languageId.as_ref().map(|s| s.as_str()),
method_call.id,
result,
)?
}
Call::Notification(notification) => {
match notification.method.as_str() {
lsp::notification::DidOpenTextDocument::METHOD => self.textDocument_didOpen(¬ification.params)?,
lsp::notification::DidChangeTextDocument::METHOD => {
self.textDocument_didChange(¬ification.params)?
}
lsp::notification::DidSaveTextDocument::METHOD => self.textDocument_didSave(¬ification.params)?,
lsp::notification::DidCloseTextDocument::METHOD => {
self.textDocument_didClose(¬ification.params)?
}
lsp::notification::PublishDiagnostics::METHOD => {
self.textDocument_publishDiagnostics(¬ification.params)?
}
lsp::notification::LogMessage::METHOD => self.window_logMessage(¬ification.params)?,
lsp::notification::Exit::METHOD => self.exit(¬ification.params)?,
// Extensions.
NOTIFICATION__HandleBufReadPost => self.languageClient_handleBufReadPost(¬ification.params)?,
NOTIFICATION__HandleTextChanged => self.languageClient_handleTextChanged(¬ification.params)?,
NOTIFICATION__HandleBufWritePost => self.languageClient_handleBufWritePost(¬ification.params)?,
NOTIFICATION__HandleBufDelete => self.languageClient_handleBufDelete(¬ification.params)?,
NOTIFICATION__HandleCursorMoved => self.languageClient_handleCursorMoved(¬ification.params)?,
NOTIFICATION__FZFSinkLocation => self.languageClient_FZFSinkLocation(¬ification.params)?,
NOTIFICATION__FZFSinkCommand => self.languageClient_FZFSinkCommand(¬ification.params)?,
NOTIFICATION__NCMRefresh => self.NCM_refresh(¬ification.params)?,
// Extensions by language servers.
NOTIFICATION__LanguageStatus => self.language_status(¬ification.params)?,
NOTIFICATION__RustBeginBuild => self.rust_handleBeginBuild(¬ification.params)?,
NOTIFICATION__RustDiagnosticsBegin => self.rust_handleDiagnosticsBegin(¬ification.params)?,
NOTIFICATION__RustDiagnosticsEnd => self.rust_handleDiagnosticsEnd(¬ification.params)?,
NOTIFICATION__CqueryProgress => self.cquery_handleProgress(¬ification.params)?,
_ => warn!("Unknown notification: {:?}", notification.method),
}
}
Call::Invalid(id) => bail!("Invalid message of id: {:?}", id),
}
Ok(())
}
/// Send message to RPC server.
fn write(&self, languageId: Option<&str>, message: &str) -> Result<()> {
if let Some(languageId) = languageId {
self.update(|state| {
let writer = state
.writers
.get_mut(languageId)
.ok_or(LCError::NoLanguageServer {
languageId: languageId.to_owned(),
})?;
write!(
writer,
"Content-Length: {}\r\n\r\n{}",
message.len(),
message
)?;
Ok(writer.flush()?)
})?;
} else {
println!("Content-Length: {}\n\n{}", message.len(), message);
}
Ok(())
}
/// Write an RPC call output.
fn output(&self, languageId: Option<&str>, id: Id, result: Result<Value>) -> Result<()> {
let response = match result {
Ok(ok) => Output::Success(Success {
jsonrpc: Some(Version::V2),
id,
result: ok,
}),
Err(err) => Output::Failure(Failure {
jsonrpc: Some(Version::V2),
id,
error: err.to_rpc_error(),
}),
};
let message = serde_json::to_string(&response)?;
info!("=> {}", message);
self.write(languageId, &message)?;
Ok(())
}
/// RPC method call.
fn call<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<Value> {
let id = self.update(|state| {
state.id += 1;
Ok(state.id)
})?;
let method_call = MethodCall {
jsonrpc: Some(Version::V2),
id: Id::Num(id),
method: method.into(),
params: Some(params.to_params()?),
};
let (tx, cx) = channel();
self.update(|state| {
state.txs.insert(id, tx);
Ok(())
})?;
let message = serde_json::to_string(&method_call)?;
info!("=> {}", message);
self.write(languageId, &message)?;
cx.recv_timeout(std::time::Duration::from_secs(60 * 5))?
}
/// RPC notification.
fn notify<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<()> {
let notification = rpc::Notification {
jsonrpc: Some(Version::V2),
method: method.to_owned(),
params: Some(params.to_params()?),
};
let message = serde_json::to_string(¬ification)?;
info!("=> {}", message);
self.write(languageId, &message)?;
Ok(())
}
fn gather_args<E: VimExp + std::fmt::Debug, T: DeserializeOwned>(
&self,
exps: &[E],
map: &Option<Params>,
) -> Result<T> {
let mut map = match *map {
None | Some(Params::None) => serde_json::map::Map::new(),
Some(Params::Array(_)) => bail!("Params should be dict!"),
Some(Params::Map(ref map)) => map.clone(),
};
let mut keys_request = vec![];
let mut exps_request = vec![];
for e in exps {
let k = e.to_key();
if !map.contains_key(&k) {
keys_request.push(k);
exps_request.push(e.to_exp());
}
}
let values_request: Vec<Value> = if keys_request.is_empty() {
vec![]
} else {
info!(
"Some arguments are not available. Requesting from vim. Keys: {:?}. Exps: {:?}",
keys_request, exps_request,
);
self.eval(&exps_request[..])?
};
for (k, v) in keys_request.into_iter().zip(values_request.into_iter()) {
map.insert(k, v);
}
let mut result = vec![];
for e in exps {
let k = e.to_key();
result.push(map.remove(&k)
.ok_or_else(|| format_err!("Failed to get value! k: {}", k))?);
}
info!("gather_args: {:?} = {:?}", exps, result);
Ok(serde_json::from_value(Value::Array(result))?)
}
fn sync_settings(&self) -> Result<()> {
let (autoStart, serverCommands, mut selectionUI, trace, settingsPath, loadSettings, loggingLevel, rootMarkers): (
u64,
HashMap<String, Vec<String>>,
String,
String,
String,
u64,
String,
Option<RootMarkers>,
) = self.eval(
&[
"!!get(g:, 'LanguageClient_autoStart', 1)",
"get(g:, 'LanguageClient_serverCommands', {})",
"get(g:, 'LanguageClient_selectionUI', '')",
"get(g:, 'LanguageClient_trace', 'Off')",
"get(g:, 'LanguageClient_settingsPath', '.vim/settings.json')",
"!!get(g:, 'LanguageClient_loadSettings', 1)",
"get(g:, 'LanguageClient_loggingLevel', 'WARN')",
"get(g:, 'LanguageClient_rootMarkers', v:null)",
][..],
)?;
// vimscript use 1 for true, 0 for false.
let autoStart = autoStart == 1;
let loadSettings = loadSettings == 1;
let trace = match trace.to_uppercase().as_str() {
"OFF" => TraceOption::Off,
"MESSAGES" => TraceOption::Messages,
"VERBOSE" => TraceOption::Verbose,
_ => bail!("Unknown trace option: {:?}", trace),
};
if selectionUI == "" {
let loaded_fzf: u64 = self.eval("get(g:, 'loaded_fzf')")?;
if loaded_fzf == 1 {
selectionUI = "FZF".into();
}
}
let selectionUI = match selectionUI.to_uppercase().as_str() {
"FZF" => SelectionUI::FZF,
"" | "LOCATIONLIST" | "LOCATION-LIST" => SelectionUI::LocationList,
_ => bail!("Unknown selectionUI option: {:?}", selectionUI),
};
let logger = LOGGER
.deref()
.as_ref()
.or_else(|_| Err(err_msg("No logger")))?;
logger::set_logging_level(logger, &loggingLevel)?;
let (diagnosticsEnable, diagnosticsList, diagnosticsDisplay, windowLogMessageLevel): (
u64,
DiagnosticsList,
Value,
String,
) = self.eval(
&[
"!!get(g:, 'LanguageClient_diagnosticsEnable', v:true)",
"get(g:, 'LanguageClient_diagnosticsList', 'Quickfix')",
"get(g:, 'LanguageClient_diagnosticsDisplay', {})",
"get(g:, 'LanguageClient_windowLogMessageLevel', 'Warning')",
][..],
)?;
let diagnosticsEnable = diagnosticsEnable == 1;
let windowLogMessageLevel = match windowLogMessageLevel.to_uppercase().as_str() {
"ERROR" => MessageType::Error,
"WARNING" => MessageType::Warning,
"INFO" => MessageType::Info,
"LOG" => MessageType::Log,
_ => bail!("Unknown windowLogMessageLevel: {}", windowLogMessageLevel),
};
self.update(|state| {
state.autoStart = autoStart;
state.serverCommands.merge(serverCommands);
state.selectionUI = selectionUI;
state.trace = trace;
state.diagnosticsEnable = diagnosticsEnable;
state.diagnosticsList = diagnosticsList;
state.diagnosticsDisplay =
serde_json::from_value(serde_json::to_value(&state.diagnosticsDisplay)?.combine(diagnosticsDisplay))?;
state.windowLogMessageLevel = windowLogMessageLevel;
state.settingsPath = settingsPath;
state.loadSettings = loadSettings;
state.rootMarkers = rootMarkers;
Ok(())
})?;
Ok(())
}
fn define_signs(&self) -> Result<()> {
info!("Define signs");
let cmd = self.get(|state| {
let mut cmd = "echo".to_owned();
for entry in state.diagnosticsDisplay.values() {
cmd += &format!(
" | execute 'sign define LanguageClient{} text={} texthl={}'",
entry.name, entry.signText, entry.signTexthl,
);
}
Ok(cmd)
})?;
self.command(&cmd)?;
info!("Define signs");
Ok(())
}
fn apply_WorkspaceEdit(&self, edit: &WorkspaceEdit, params: &Option<Params>) -> Result<()> {
debug!(
"Begin apply WorkspaceEdit: {:?}. Params: {:?}",
edit, params
);
let (filename, line, character): (String, u64, u64) =
self.gather_args(&[VimVar::Filename, VimVar::Line, VimVar::Character], params)?;
if let Some(ref changes) = edit.document_changes {
for e in changes {
self.apply_TextEdits(&e.text_document.uri.filepath()?, &e.edits)?;
}
}
if let Some(ref changes) = edit.changes {
for (uri, edits) in changes {
self.apply_TextEdits(&uri.filepath()?, edits)?;
}
}
debug!("End apply WorkspaceEdit");
self.goto_location(&Some("buffer".to_string()), &filename, line, character)?;
Ok(())
}
fn apply_TextEdits<P: AsRef<Path>>(&self, path: P, edits: &[TextEdit]) -> Result<()> {
debug!("Begin apply TextEdits: {:?}", edits);
let mut edits = edits.to_vec();
edits.reverse();
edits.sort_by_key(|edit| (edit.range.start.line, edit.range.start.character));
edits.reverse();
self.goto_location(&None, &path, 0, 0)?;
let mut lines: Vec<String> = self.getbufline(&path)?;
let lines_len = lines.len();
lines = apply_TextEdits(&lines, &edits)?;
let fixendofline: u64 = self.eval("&fixendofline")?;
if fixendofline == 1 && lines[lines.len() - 1].is_empty() {
lines.pop();
}
self.notify(None, "setline", json!([1, lines]))?;
if lines.len() < lines_len {
self.command(&format!("{},{}d", lines.len() + 1, lines_len))?;
}
debug!("End apply TextEdits");
Ok(())
}
fn display_diagnostics(&self, filename: &str, diagnostics: &[Diagnostic]) -> Result<()> {
// Line diagnostics.
self.update(|state| {
state
.line_diagnostics
.retain(|&(ref f, _), _| f != filename);
Ok(())
})?;
let mut line_diagnostics = HashMap::new();
for entry in diagnostics {
let line = entry.range.start.line;
let mut msg = String::new();
if let Some(severity) = entry.severity {
msg += &format!("[{:?}]", severity);
}
if let Some(ref code) = entry.code {
let s = code.to_string();
if !s.is_empty() {
msg += &format!("[{}]", s);
}
}
msg += &entry.message;
line_diagnostics.insert((filename.to_owned(), line), msg);
}
self.update(|state| {
state.line_diagnostics.merge(line_diagnostics);
Ok(())
})?;
// Signs.
let texts = self.get(|state| {
let text_document = state
.text_documents
.get(filename)
.ok_or_else(|| format_err!("TextDocumentItem not found! filename: {}", filename))?;
Ok(text_document.text.clone())
})?;
let texts: Vec<&str> = texts.split('\n').collect();
let mut signs: Vec<_> = diagnostics
.iter()
.map(|dn| {
let line = dn.range.start.line;
let text = texts
.get(line as usize)
.map(|l| l.to_string())
.unwrap_or_default();
let severity = dn.severity.unwrap_or(DiagnosticSeverity::Information);
Sign::new(line + 1, text, severity)
})
.collect();
signs.sort_unstable();
let cmd = self.update(|state| {
let signs_prev = state.signs.remove(filename).unwrap_or_default();
let (signs_next, cmd) = get_command_update_signs(&signs_prev, &signs, filename);
state.signs.insert(filename.to_string(), signs_next);
Ok(cmd)
})?;
info!("Command to update signs: {}", cmd);
self.command(&cmd)?;
// Quickfix.
let qflist: Vec<_> = diagnostics
.iter()
.map(|dn| QuickfixEntry {
filename: filename.to_owned(),
lnum: dn.range.start.line + 1,
col: Some(dn.range.start.character + 1),
nr: dn.code.clone().map(|ns| ns.to_string()),
text: Some(dn.message.to_owned()),
typee: dn.severity.map(|sev| sev.to_quickfix_entry_type()),
})
.collect();
let diagnosticsList = self.get(|state| Ok(state.diagnosticsList.clone()))?;
match diagnosticsList {
DiagnosticsList::Quickfix => {
self.call(None, "setqflist", [qflist])?;
}
DiagnosticsList::Location => {
self.call(None, "setloclist", json!([0, qflist]))?;
}
};
let is_nvim: u64 = self.eval("has('nvim')")?;
if is_nvim != 1 {
return Ok(());
}
let mut source: Option<u64> = self.get(|state| Ok(state.highlight_source))?;
if source.is_none() {
let exp = format!(
"nvim_buf_add_highlight({}, {}, {}, {}, {}, {})",
0, 0, "''", 1, 1, 1
);
source = Some(self.eval(exp)?);
self.update(|state| {
state.highlight_source = source;
Ok(())
})?;
}
let source = source.ok_or_else(|| err_msg("Empty highlight source id"))?;
let diagnosticsDisplay = self.get(|state| Ok(state.diagnosticsDisplay.clone()))?;
// Highlight.
// TODO: Optimize.
self.call(None, "nvim_buf_clear_highlight", json!([0, source, 1, -1]))?;
for dn in diagnostics.iter() {
let severity = dn.severity.unwrap_or(DiagnosticSeverity::Information);
let hl_group = diagnosticsDisplay
.get(&severity.to_int()?)
.ok_or_else(|| err_msg("Failed to get display"))?
.texthl
.clone();
self.notify(
None,
"nvim_buf_add_highlight",
json!([
0,
source,
hl_group,
dn.range.start.line,
dn.range.start.character,
dn.range.end.character,
]),
)?;
}
Ok(())
}
fn display_locations(&self, locations: &[Location], _languageId: &str) -> Result<()> {
match self.get(|state| Ok(state.selectionUI.clone()))? {
SelectionUI::FZF => {
let cwd: String = self.eval("getcwd()")?;
let source: Result<Vec<_>> = locations
.iter()
.map(|loc| {
let filename = loc.uri.filepath()?;
let start = loc.range.start;
let text = self.get_line(&filename, start.line).unwrap_or_default();
let relpath = diff_paths(&filename, Path::new(&cwd)).unwrap_or(filename);
Ok(format!(
"{}:{}:{}:\t{}",
relpath.to_str().unwrap_or_default(),
start.line + 1,
start.character + 1,
text
))
})
.collect();
let source = source?;
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkLocation)]),
)?;
}
SelectionUI::LocationList => {
let loclist: Result<Vec<_>> = locations
.iter()
.map(|loc| {
let filename = loc.uri.filepath()?;
let start = loc.range.start;
let text = self.get_line(&filename, start.line).unwrap_or_default();
Ok(json!({
"filename": filename,
"lnum": start.line + 1,
"col": start.character + 1,
"text": text,
}))
})
.collect();
let loclist = loclist?;
self.notify(None, "setloclist", json!([0, loclist]))?;
self.echo("Location list updated.")?;
}
}
Ok(())
}
fn languageClient_getState(&self, _params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__GetState);
let s = self.get(|state| Ok(serde_json::to_string(state)?))?;
info!("End {}", REQUEST__GetState);
Ok(Value::String(s))
}
fn languageClient_isAlive(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__IsAlive);
let (languageId,): (String,) = self.gather_args(&[VimVar::LanguageId], params)?;
let is_alive = self.get(|state| Ok(state.writers.contains_key(&languageId)))?;
info!("End {}", REQUEST__IsAlive);
Ok(Value::Bool(is_alive))
}
fn languageClient_startServer(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__StartServer);
let (cmdargs,): (Vec<String>,) = self.gather_args(&[("cmdargs", "[]")], params)?;
let cmdparams = vim_cmd_args_to_value(&cmdargs)?;
let params = &Some(params.clone().to_value().combine(cmdparams).to_params()?);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || filename.is_empty() {
return Ok(Value::Null);
}
if self.get(|state| Ok(state.writers.contains_key(&languageId)))? {
bail!(
"Language client has already started for language {}.",
&languageId
);
}
self.sync_settings()?;
let command = self.get(|state| {
state
.serverCommands
.get(&languageId)
.cloned()
.ok_or_else(|| {
format_err!(
"No language server command found for type: {}.",
&languageId
)
})
})?;
let home = env::home_dir().ok_or_else(|| err_msg("Failed to get home dir"))?;
let home = home.to_str()
.ok_or_else(|| err_msg("Failed to convert PathBuf to str"))?;
let command: Vec<_> = command
.into_iter()
.map(|cmd| {
if cmd.starts_with('~') {
cmd.replacen('~', home, 1)
} else {
cmd
}
})
.collect();
let stderr = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(&get_logpath_server())?;
let process = std::process::Command::new(command.get(0).ok_or_else(|| err_msg("Empty command!"))?)
.args(&command[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(stderr)
.spawn()?;
let child_id = process.id();
let reader = BufReader::new(process
.stdout
.ok_or_else(|| err_msg("Failed to get subprocess stdout"))?);
let writer = BufWriter::new(process
.stdin
.ok_or_else(|| err_msg("Failed to get subprocess stdin"))?);
self.update(|state| {
state.child_ids.insert(languageId.clone(), child_id);
state.writers.insert(languageId.clone(), writer);
Ok(())
})?;
let state = Arc::clone(self);
let languageId_clone = languageId.clone();
let thread_name = format!("RPC-{}", languageId);
std::thread::Builder::new()
.name(thread_name.clone())
.spawn(move || {
if let Err(err) = state.loop_message(reader, Some(languageId_clone)) {
error!("{} thread error: {}", thread_name, err);
}
})?;
info!("End {}", REQUEST__StartServer);
if self.get(|state| Ok(state.writers.len()))? == 1 {
self.define_signs()?;
}
self.initialize(params)?;
self.textDocument_didOpen(params)?;
self.textDocument_didChange(params)?;
if self.eval::<_, u64>("exists('#User#LanguageClientStarted')")? == 1 {
self.command("doautocmd User LanguageClientStarted")?;
}
Ok(Value::Null)
}
// TODO: verify.
fn languageClient_registerServerCommands(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__RegisterServerCommands);
let params = params.clone().ok_or_else(|| err_msg("Empty params!"))?;
let map = match params {
Params::Map(map) => Value::Object(map),
_ => bail!("Unexpected params type!"),
};
let map = serde_json::from_value(map)?;
self.update(|state| Ok(state.serverCommands.merge(map)))?;
let exp = format!(
"let g:LanguageClient_serverCommands={}",
serde_json::to_string(&self.get(|state| Ok(state.serverCommands.clone()))?)?
);
self.command(&exp)?;
info!("End {}", REQUEST__RegisterServerCommands);
Ok(Value::Null)
}
fn languageClient_setLoggingLevel(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__SetLoggingLevel);
let (loggingLevel,): (String,) = self.gather_args(&["loggingLevel"], params)?;
let logger = LOGGER
.deref()
.as_ref()
.or_else(|_| Err(err_msg("No logger")))?;
logger::set_logging_level(logger, &loggingLevel)?;
info!("End {}", REQUEST__SetLoggingLevel);
Ok(Value::Null)
}
fn languageClient_handleBufReadPost(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleBufReadPost);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() || filename.is_empty() {
return Ok(());
}
// File opened before.
if self.get(|state| Ok(state.text_documents.contains_key(&filename)))? {
return Ok(());
}
if self.get(|state| Ok(state.writers.contains_key(&languageId)))? {
// Language server is running but file is not within project root.
let is_in_root = self.get(|state| {
let root = state
.roots
.get(&languageId)
.ok_or_else(|| format_err!("Failed to get root! languageId: {}", languageId))?;
Ok(filename.starts_with(root))
})?;
if !is_in_root {
return Ok(());
}
self.textDocument_didOpen(params)?;
let diagnostics = self.get(|state| {
state
.diagnostics
.get(&filename.canonicalize())
.cloned()
.ok_or_else(|| format_err!("No diagnostics! filename: {}", filename))
}).unwrap_or_default();
self.display_diagnostics(&filename, &diagnostics)?;
self.languageClient_handleCursorMoved(params)?;
} else {
let autoStart: i32 = self.eval("!!get(g:, 'LanguageClient_autoStart', v:true)")?;
if autoStart == 1 {
if let Err(err) = self.languageClient_startServer(params) {
warn!("{}", err);
}
}
}
info!("End {}", NOTIFICATION__HandleBufReadPost);
Ok(())
}
fn languageClient_handleTextChanged(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleTextChanged);
self.textDocument_didChange(params)?;
info!("End {}", NOTIFICATION__HandleTextChanged);
Ok(())
}
fn languageClient_handleBufWritePost(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleBufWritePost);
self.textDocument_didSave(params)?;
info!("End {}", NOTIFICATION__HandleBufWritePost);
Ok(())
}
fn languageClient_handleBufDelete(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleBufWritePost);
let (filename,): (String,) = self.gather_args(&[VimVar::Filename], params)?;
self.update(|state| {
state.text_documents.retain(|f, _| f != &filename);
state.diagnostics.retain(|f, _| f != &filename);
state.line_diagnostics.retain(|fl, _| fl.0 != filename);
state.signs.retain(|f, _| f != &filename);
Ok(())
})?;
self.textDocument_didClose(params)?;
info!("End {}", NOTIFICATION__HandleBufWritePost);
Ok(())
}
fn languageClient_handleCursorMoved(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleCursorMoved);
let (buftype, filename, line): (String, String, u64) =
self.gather_args(&[VimVar::Buftype, VimVar::Filename, VimVar::Line], params)?;
if !buftype.is_empty() || line == self.get(|state| Ok(state.last_cursor_line))? {
return Ok(());
}
self.update(|state| {
state.last_cursor_line = line;
Ok(())
})?;
let message = self.get(|state| {
state
.line_diagnostics
.get(&(filename.clone(), line))
.cloned()
.ok_or_else(|| {
format_err!(
"Line diagnostic message not found! filename: {}, line: {}",
filename,
line
)
})
}).unwrap_or_default();
if message == self.get(|state| Ok(state.last_line_diagnostic.clone()))? {
return Ok(());
}
self.update(|state| {
state.last_line_diagnostic = message.clone();
Ok(())
})?;
self.echo_ellipsis(&message)?;
info!("End {}", NOTIFICATION__HandleCursorMoved);
Ok(())
}
fn initialize(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Initialize::METHOD);
let (languageId, filename): (String, String) =
self.gather_args(&[VimVar::LanguageId, VimVar::Filename], params)?;
let (rootPath, has_snippet_support): (Option<String>, u64) = self.gather_args(
&[
("rootPath", "v:null"),
("hasSnippetSupport", "s:hasSnippetSupport()"),
],
params,
)?;
let root = match rootPath {
Some(r) => r,
_ => {
let rootMarkers = self.get(|state| Ok(state.rootMarkers.clone()))?;
get_rootPath(Path::new(&filename), &languageId, &rootMarkers)?
.to_str()
.ok_or_else(|| err_msg("Failed to convert &Path to &str"))?
.to_owned()
}
};
info!("Project root: {}", root);
let has_snippet_support = has_snippet_support > 0;
self.update(|state| Ok(state.roots.insert(languageId.clone(), root.clone())))?;
let settings = || -> Result<Value> {
if !self.get(|state| Ok(state.loadSettings))? {
return Ok(json!({}));
}
let mut f = File::open(Path::new(&root).join(self.get(|state| Ok(state.settingsPath.clone()))?))?;
let mut buffer = String::new();
f.read_to_string(&mut buffer)?;
Ok(serde_json::from_str(&buffer)?)
}()
.unwrap_or_else(|_| json!({}));
debug!("Project settings: {}", serde_json::to_string(&settings)?);
let initialization_options = Some(settings["initializationOptions"].clone());
debug!(
"Project settings.initializationOptions: {}",
serde_json::to_string(&initialization_options)?
);
let result = self.call(
Some(&languageId),
lsp::request::Initialize::METHOD,
InitializeParams {
process_id: Some(unsafe { libc::getpid() } as u64),
root_path: Some(root.clone()),
root_uri: Some(root.to_url()?),
initialization_options,
capabilities: ClientCapabilities {
workspace: None,
text_document: Some(TextDocumentClientCapabilities {
synchronization: None,
completion: Some(CompletionCapability {
dynamic_registration: None,
completion_item: Some(CompletionItemCapability {
snippet_support: Some(has_snippet_support),
commit_characters_support: None,
documentation_format: None,
}),
}),
hover: None,
signature_help: None,
references: None,
document_highlight: None,
document_symbol: None,
formatting: None,
range_formatting: None,
on_type_formatting: None,
definition: None,
code_action: None,
code_lens: None,
document_link: None,
rename: None,
}),
experimental: None,
},
trace: TraceOption::default(),
},
)?;
self.update(|state| {
state
.capabilities
.insert(languageId.clone(), result.clone());
Ok(())
})?;
info!("End {}", lsp::request::Initialize::METHOD);
if let Err(e) = self.registerCMSource(&languageId, &result) {
let message = "LanguageClient: failed to register as NCM source!";
debug!("{}: {:?}", message, e);
self.echoerr(message)?;
}
Ok(result)
}
fn registerCMSource(&self, languageId: &str, result: &Value) -> Result<()> {
info!("Begin register NCM source");
let exists_CMRegister: u64 = self.eval("exists('g:cm_matcher')")?;
if exists_CMRegister == 0 {
return Ok(());
}
let result: InitializeResult = serde_json::from_value(result.clone())?;
if result.capabilities.completion_provider.is_none() {
return Ok(());
}
let trigger_patterns = result
.capabilities
.completion_provider
.map(|opt| {
let strings: Vec<_> = opt.trigger_characters
.iter()
.map(|c| regex::escape(c))
.collect();
strings
})
.unwrap_or_default();
self.notify(
None,
"cm#register_source",
json!([{
"name": format!("LanguageClient_{}", languageId),
"priority": 9,
"scopes": [languageId],
"cm_refresh_patterns": trigger_patterns,
"abbreviation": "LC",
"cm_refresh": NOTIFICATION__NCMRefresh,
}]),
)?;
info!("End register NCM source");
Ok(())
}
fn get_line<P: AsRef<Path>>(&self, path: P, line: u64) -> Result<String> {
let value = self.call(
None,
"getbufline",
json!([path.as_ref().to_str().unwrap_or_default(), line + 1]),
)?;
let mut texts: Vec<String> = serde_json::from_value(value)?;
let mut text = texts.pop().unwrap_or_default();
if text.is_empty() {
let reader = BufReader::new(File::open(path)?);
text = reader
.lines()
.nth(line.to_usize()?)
.ok_or_else(|| format_err!("Failed to get line! line: {}", line))??;
}
Ok(text.strip())
}
fn NCM_refresh(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__NCMRefresh);
let params = match *params {
None | Some(Params::None) => bail!("Empty params!"),
Some(Params::Map(_)) => bail!("Expecting array. Got dict."),
Some(Params::Array(ref arr)) => Value::Array(arr.clone()),
};
let (info, ctx): (NCMInfo, NCMContext) = serde_json::from_value(params)?;
if ctx.typed.is_empty() {
return Ok(());
}
let result = self.textDocument_completion(&Some(json!({
"line": ctx.lnum - 1,
"character": ctx.col - 1,
}).to_params()?))?;
let result: Option<CompletionResponse> = serde_json::from_value(result)?;
let result = result.unwrap_or_else(|| CompletionResponse::Array(vec![]));
let is_incomplete = match result {
CompletionResponse::Array(_) => false,
CompletionResponse::List(ref list) => list.is_incomplete,
};
let matches: Vec<VimCompleteItem> = match result {
CompletionResponse::Array(arr) => arr,
CompletionResponse::List(list) => list.items,
}.into_iter()
.map(|lspitem| lspitem.into())
.collect();
self.notify(
None,
"cm#complete",
json!([info.name, ctx, ctx.startcol, matches, is_incomplete]),
)?;
info!("End {}", NOTIFICATION__NCMRefresh);
Ok(())
}
fn languageClient_omniComplete(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__OmniComplete);
let result = self.textDocument_completion(params)?;
let result: Option<CompletionResponse> = serde_json::from_value(result)?;
let result = result.unwrap_or_else(|| CompletionResponse::Array(vec![]));
let matches: Vec<VimCompleteItem> = match result {
CompletionResponse::Array(arr) => arr,
CompletionResponse::List(list) => list.items,
}.into_iter()
.map(|lspitem| lspitem.into())
.collect();
info!("End {}", REQUEST__OmniComplete);
Ok(serde_json::to_value(matches)?)
}
fn textDocument_references(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::References::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::References::METHOD,
ReferenceParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
context: ReferenceContext {
include_declaration: true,
},
},
)?;
if !handle {
return Ok(result);
}
let locations: Vec<Location> = serde_json::from_value(result.clone())?;
self.display_locations(&locations, &languageId)?;
info!("End {}", lsp::request::References::METHOD);
Ok(result)
}
fn textDocument_formatting(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Formatting::METHOD);
let (buftype, languageId, filename, handle): (String, String, String, bool) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (tab_size, insert_spaces): (u64, u64) = self.eval(&["&tabstop", "&expandtab"][..])?;
let insert_spaces = insert_spaces == 1;
let result = self.call(
Some(&languageId),
lsp::request::Formatting::METHOD,
DocumentFormattingParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
options: FormattingOptions {
tab_size,
insert_spaces,
properties: HashMap::new(),
},
},
)?;
if !handle {
return Ok(result);
}
let text_edits: Option<Vec<TextEdit>> = serde_json::from_value(result.clone())?;
let text_edits = text_edits.unwrap_or_default();
let edit = lsp::WorkspaceEdit {
changes: Some(hashmap!{filename.to_url()? => text_edits}),
document_changes: None,
};
self.apply_WorkspaceEdit(&edit, params)?;
info!("End {}", lsp::request::Formatting::METHOD);
Ok(result)
}
fn textDocument_rangeFormatting(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::RangeFormatting::METHOD);
let (buftype, languageId, filename, handle): (String, String, String, bool) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (tab_size, insert_spaces, start_line, end_line, end_character): (u64, u64, u64, u64, u64) = self.eval(
&[
"&tabstop",
"&expandtab",
"v:lnum - 1",
"v:lnum - 1 + v:count",
"len(getline(v:lnum + v:count)) - 1",
][..],
)?;
let insert_spaces = insert_spaces == 1;
let result = self.call(
Some(&languageId),
lsp::request::RangeFormatting::METHOD,
DocumentRangeFormattingParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
options: FormattingOptions {
tab_size,
insert_spaces,
properties: HashMap::new(),
},
range: Range {
start: Position {
line: start_line,
character: 0,
},
end: Position {
line: end_line,
character: end_character,
},
},
},
)?;
if !handle {
return Ok(result);
}
let text_edits: Option<Vec<TextEdit>> = serde_json::from_value(result.clone())?;
let text_edits = text_edits.unwrap_or_default();
let edit = lsp::WorkspaceEdit {
changes: Some(hashmap!{filename.to_url()? => text_edits}),
document_changes: None,
};
self.apply_WorkspaceEdit(&edit, params)?;
info!("End {}", lsp::request::RangeFormatting::METHOD);
Ok(result)
}
fn completionItem_resolve(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::ResolveCompletionItem::METHOD);
let (buftype, languageId, handle): (String, String, bool) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Handle],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (completion_item,): (CompletionItem,) = self.gather_args(&["completionItem"], params)?;
let result = self.call(
Some(&languageId),
lsp::request::ResolveCompletionItem::METHOD,
completion_item,
)?;
if !handle {
return Ok(result);
}
// TODO: proper integration.
let msg = format!("comletionItem/resolve result not handled: {:?}", result);
warn!("{}", msg);
self.echowarn(&msg)?;
info!("End {}", lsp::request::ResolveCompletionItem::METHOD);
Ok(Value::Null)
}
fn textDocument_didOpen(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidOpenTextDocument::METHOD);
let (buftype, languageId, filename, text): (String, String, String, Vec<String>) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Text,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
let text_document = TextDocumentItem {
uri: filename.to_url()?,
language_id: languageId.clone(),
version: 0,
text: text.join("\n"),
};
self.update(|state| {
Ok(state
.text_documents
.insert(filename.clone(), text_document.clone()))
})?;
self.notify(
Some(&languageId),
lsp::notification::DidOpenTextDocument::METHOD,
DidOpenTextDocumentParams { text_document },
)?;
self.command("setlocal omnifunc=LanguageClient#complete")?;
info!("End {}", lsp::notification::DidOpenTextDocument::METHOD);
Ok(())
}
fn textDocument_didChange(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidChangeTextDocument::METHOD);
let (buftype, languageId, filename, text): (String, String, String, Vec<String>) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Text,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
if !self.get(|state| Ok(state.text_documents.contains_key(&filename)))? {
warn!("Not opened yet. Switching to didOpen.");
return self.textDocument_didOpen(params);
}
let text = text.join("\n");
let text_state = self.get(|state| {
state
.text_documents
.get(&filename)
.ok_or_else(|| format_err!("TextDocumentItem not found! filename: {}", filename))
.map(|doc| doc.text.clone())
}).unwrap_or_default();
if text == text_state {
info!("Texts equal. Skipping didChange.");
return Ok(());
}
let version = self.update(|state| {
let document = state
.text_documents
.get_mut(&filename)
.ok_or_else(|| format_err!("Failed to get TextDocumentItem! filename: {}", filename))?;
let version = document.version + 1;
document.version = version;
document.text = text.clone();
Ok(version)
})?;
self.notify(
Some(&languageId),
lsp::notification::DidChangeTextDocument::METHOD,
DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier {
uri: filename.to_url()?,
version: Some(version),
},
content_changes: vec![
TextDocumentContentChangeEvent {
range: None,
range_length: None,
text,
},
],
},
)?;
info!("End {}", lsp::notification::DidChangeTextDocument::METHOD);
Ok(())
}
fn textDocument_didSave(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidSaveTextDocument::METHOD);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
self.notify(
Some(&languageId),
lsp::notification::DidSaveTextDocument::METHOD,
DidSaveTextDocumentParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
},
)?;
info!("End {}", lsp::notification::DidSaveTextDocument::METHOD);
Ok(())
}
fn textDocument_didClose(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidCloseTextDocument::METHOD);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
self.notify(
Some(&languageId),
lsp::notification::DidCloseTextDocument::METHOD,
DidCloseTextDocumentParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
},
)?;
info!("End {}", lsp::notification::DidCloseTextDocument::METHOD);
Ok(())
}
fn textDocument_publishDiagnostics(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::PublishDiagnostics::METHOD);
let params: PublishDiagnosticsParams = serde_json::from_value(params.clone().to_value())?;
if !self.get(|state| Ok(state.diagnosticsEnable))? {
return Ok(());
}
let mut filename = params
.uri
.filepath()?
.to_str()
.ok_or_else(|| err_msg("Failed to convert PathBuf to str"))?
.to_owned();
// Workaround bug: remove first '/' in case of '/C:/blabla'.
if filename.chars().nth(0) == Some('/') && filename.chars().nth(2) == Some(':') {
filename.remove(0);
}
// Unify name to avoid mismatch due to case insensitivity.
let filename = filename.canonicalize();
self.update(|state| {
state
.diagnostics
.insert(filename.clone(), params.diagnostics.clone());
Ok(())
})?;
info!("End {}", lsp::notification::PublishDiagnostics::METHOD);
let current_filename: String = self.eval(VimVar::Filename)?;
if filename != current_filename.canonicalize() {
return Ok(());
}
self.display_diagnostics(¤t_filename, ¶ms.diagnostics)?;
self.languageClient_handleCursorMoved(&None)?;
if self.eval::<_, u64>("exists('#User#LanguageClientDiagnosticsChanged')")? == 1 {
self.command("doautocmd User LanguageClientDiagnosticsChanged")?;
}
Ok(())
}
fn window_logMessage(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::LogMessage::METHOD);
let params: LogMessageParams = serde_json::from_value(params.clone().to_value())?;
let threshold = self.get(|state| state.windowLogMessageLevel.to_int())?;
if params.typ.to_int()? > threshold {
return Ok(());
}
let msg = format!("[{:?}] {}", params.typ, params.message);
self.echomsg(&msg)?;
info!("End {}", lsp::notification::LogMessage::METHOD);
Ok(())
}
fn textDocument_hover(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::HoverRequest::METHOD);
let (languageId, filename, line, character, handle): (String, String, u64, u64, bool) = self.gather_args(
&[
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
let result = self.call(
Some(&languageId),
lsp::request::HoverRequest::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
let hover: Hover = serde_json::from_value(result.clone())?;
let message = hover.to_string();
self.echo(&message)?;
info!("End {}", lsp::request::HoverRequest::METHOD);
Ok(result)
}
fn textDocument_definition(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::GotoDefinition::METHOD);
let (buftype, languageId, filename, line, character, goto_cmd, handle): (
String,
String,
String,
u64,
u64,
Option<String>,
bool,
) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::GotoCmd,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::GotoDefinition::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
let response: GotoDefinitionResponse = serde_json::from_value(result.clone())?;
match response {
GotoDefinitionResponse::None => {
self.echowarn("Not found!")?;
return Ok(Value::Null);
}
GotoDefinitionResponse::Scalar(loc) => {
self.goto_location(
&goto_cmd,
loc.uri.filepath()?.to_str().unwrap_or_default(),
loc.range.start.line,
loc.range.start.character,
)?;
}
GotoDefinitionResponse::Array(arr) => match arr.len() {
0 => self.echowarn("Not found!")?,
1 => {
let loc = arr.get(0).ok_or_else(|| err_msg("Not found!"))?;
self.goto_location(
&goto_cmd,
loc.uri.filepath()?.to_str().unwrap_or_default(),
loc.range.start.line,
loc.range.start.character,
)?;
}
_ => self.display_locations(&arr, &languageId)?,
},
};
info!("End {}", lsp::request::GotoDefinition::METHOD);
Ok(result)
}
fn textDocument_rename(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Rename::METHOD);
let (buftype, languageId, filename, line, character, cword, new_name, handle): (
String,
String,
String,
u64,
u64,
String,
Option<String>,
bool,
) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Cword,
VimVar::NewName,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let mut new_name = new_name.unwrap_or_default();
if new_name.is_empty() {
let value = self.call(None, "s:getInput", ["Rename to: ".to_owned(), cword])?;
new_name = serde_json::from_value(value)?;
}
if new_name.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::Rename::METHOD,
RenameParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
new_name,
},
)?;
if !handle || result == Value::Null {
return Ok(result);
}
let edit: WorkspaceEdit = serde_json::from_value(result.clone())?;
self.apply_WorkspaceEdit(&edit, params)?;
info!("End {}", lsp::request::Rename::METHOD);
Ok(result)
}
fn textDocument_documentSymbol(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::DocumentSymbol::METHOD);
let (buftype, languageId, filename, handle): (String, String, String, bool) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::DocumentSymbol::METHOD,
DocumentSymbolParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
},
)?;
if !handle {
return Ok(result);
}
let symbols: Vec<SymbolInformation> = serde_json::from_value(result.clone())?;
match self.get(|state| Ok(state.selectionUI.clone()))? {
SelectionUI::FZF => {
let source: Vec<_> = symbols
.iter()
.map(|sym| {
let start = sym.location.range.start;
format!("{}:{}:\t{}", start.line + 1, start.character + 1, sym.name)
})
.collect();
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkLocation)]),
)?;
}
SelectionUI::LocationList => {
let loclist: Vec<_> = symbols
.iter()
.map(|sym| {
let start = sym.location.range.start;
json!({
"filename": filename,
"lnum": start.line + 1,
"col": start.character + 1,
"text": sym.name,
})
})
.collect();
self.notify(None, "setloclist", json!([0, loclist]))?;
self.echo("Document symbols populated to location list.")?;
}
}
info!("End {}", lsp::request::DocumentSymbol::METHOD);
Ok(result)
}
fn workspace_symbol(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::WorkspaceSymbol::METHOD);
let (buftype, languageId, handle): (String, String, bool) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Handle],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (query,): (String,) = self.gather_args(&[("query", "")], params)?;
let result = self.call(
Some(&languageId),
lsp::request::WorkspaceSymbol::METHOD,
WorkspaceSymbolParams { query },
)?;
if !handle {
return Ok(result);
}
let symbols: Vec<SymbolInformation> = serde_json::from_value(result.clone())?;
match self.get(|state| Ok(state.selectionUI.clone()))? {
SelectionUI::FZF => {
let cwd: String = self.eval("getcwd()")?;
let source: Result<Vec<_>> = symbols
.iter()
.map(|sym| {
let filename = sym.location.uri.filepath()?;
let relpath = diff_paths(&filename, Path::new(&cwd)).unwrap_or(filename);
let start = sym.location.range.start;
Ok(format!(
"{}:{}:{}:\t{}",
relpath.to_str().unwrap_or_default(),
start.line + 1,
start.character + 1,
sym.name
))
})
.collect();
let source = source?;
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkLocation)]),
)?;
}
SelectionUI::LocationList => {
let loclist: Vec<_> = symbols
.iter()
.map(|sym| {
let start = sym.location.range.start;
json!({
"filename": sym.location.uri.to_file_path(),
"lnum": start.line + 1,
"col": start.character + 1,
"text": sym.name,
})
})
.collect();
self.notify(None, "setloclist", json!([0, loclist]))?;
self.echo("Workspace symbols populated to location list.")?;
}
}
info!("End {}", lsp::request::WorkspaceSymbol::METHOD);
Ok(result)
}
fn languageClient_FZFSinkLocation(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__FZFSinkLocation);
let params = match *params {
None | Some(Params::None) | Some(Params::Map(_)) => {
bail!("Expecting array params!");
}
Some(Params::Array(ref arr)) => Value::Array(arr.clone()),
};
let lines: Vec<String> = serde_json::from_value(params)?;
if lines.is_empty() {
err_msg("No selection!");
}
let mut tokens: Vec<&str> = lines
.get(0)
.ok_or_else(|| format_err!("Failed to get line! lines: {:?}", lines))?
.split(':')
.collect();
tokens.reverse();
let filename: String = if tokens.len() > 3 {
let relpath = tokens
.pop()
.ok_or_else(|| format_err!("Failed to get file path! tokens: {:?}", tokens))?
.to_owned();
let languageId: String = self.eval(VimVar::LanguageId)?;
let root = self.get(|state| {
state
.roots
.get(&languageId)
.cloned()
.ok_or_else(|| format_err!("Failed to get root! languageId: {}", languageId))
})?;
Path::new(&root)
.join(relpath)
.to_str()
.ok_or_else(|| err_msg("Failed to convert PathBuf to str"))?
.to_owned()
} else {
self.eval(VimVar::Filename)?
};
let line = tokens
.pop()
.ok_or_else(|| format_err!("Failed to get line! tokens: {:?}", tokens))?
.to_int()? - 1;
let character = tokens
.pop()
.ok_or_else(|| format_err!("Failed to get character! tokens: {:?}", tokens))?
.to_int()? - 1;
self.goto_location(&None, &filename, line, character)?;
info!("End {}", NOTIFICATION__FZFSinkLocation);
Ok(())
}
fn languageClient_FZFSinkCommand(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__FZFSinkCommand);
let (selection,): (String,) = self.gather_args(&["selection"], params)?;
let tokens: Vec<&str> = selection.split(": ").collect();
let command = tokens
.get(0)
.cloned()
.ok_or_else(|| format_err!("Failed to get command! tokens: {:?}", tokens))?;
let title = tokens
.get(1)
.cloned()
.ok_or_else(|| format_err!("Failed to get title! tokens: {:?}", tokens))?;
let entry = self.get(|state| {
state
.stashed_codeAction_commands
.iter()
.find(|e| e.command == command && e.title == title)
.cloned()
.ok_or_else(|| err_msg("No stashed command found!"))
})?;
if self.try_handle_command_by_client(&entry)? {
return Ok(());
}
self.workspace_executeCommand(&Some(json!({
"command": entry.command,
"arguments": entry.arguments,
}).to_params()?))?;
self.update(|state| {
state.stashed_codeAction_commands = vec![];
Ok(())
})?;
info!("End {}", NOTIFICATION__FZFSinkCommand);
Ok(())
}
fn try_handle_command_by_client(&self, cmd: &Command) -> Result<bool> {
if !CommandsClient.contains(&cmd.command.as_str()) {
return Ok(false);
}
if cmd.command == "java.apply.workspaceEdit" {
if let Some(ref edits) = cmd.arguments {
for edit in edits {
let edit: WorkspaceEdit = serde_json::from_value(edit.clone())?;
self.apply_WorkspaceEdit(&edit, &None)?;
}
}
} else {
bail!("Not implemented: {}", cmd.command);
}
Ok(true)
}
fn textDocument_codeAction(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::CodeActionRequest::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let diagnostics: Vec<_> = self.get(|state| {
Ok(state
.diagnostics
.get(&filename)
.ok_or_else(|| err_msg("No diagnostics found!"))?
.iter()
.filter(|dn| {
let start = dn.range.start;
let end = dn.range.end;
start.line <= line && start.character <= character && end.line >= line && end.character >= character
})
.cloned()
.collect())
})?;
let result = self.call(
Some(&languageId),
lsp::request::CodeActionRequest::METHOD,
CodeActionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
//TODO: is this correct?
range: diagnostics
.get(0)
.ok_or_else(|| err_msg("No diagnostics found!"))?
.range,
context: CodeActionContext { diagnostics },
},
)?;
if !handle {
return Ok(result);
}
let commands: Vec<Command> = serde_json::from_value(result.clone())?;
let source: Vec<_> = commands
.iter()
.map(|cmd| format!("{}: {}", cmd.command, cmd.title))
.collect();
self.update(|state| {
state.stashed_codeAction_commands = commands;
Ok(())
})?;
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkCommand)]),
)?;
info!("End {}", lsp::request::CodeActionRequest::METHOD);
Ok(result)
}
fn textDocument_completion(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Completion::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::Completion::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
info!("End {}", lsp::request::Completion::METHOD);
Ok(result)
}
fn textDocument_signatureHelp(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::SignatureHelpRequest::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::SignatureHelpRequest::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle || result == Value::Null {
return Ok(result);
}
let help: SignatureHelp = serde_json::from_value(result)?;
if help.signatures.is_empty() {
return Ok(Value::Null);
}
let active_signature = help.signatures
.get(help.active_signature.unwrap_or(0).to_usize()?)
.ok_or_else(|| err_msg("Failed to get active signature"))?;
let active_parameter: Option<&ParameterInformation>;
if let Some(ref parameters) = active_signature.parameters {
active_parameter = parameters.get(help.active_parameter.unwrap_or(0).to_usize()?);
} else {
active_parameter = None;
}
if let Some(active_parameter) = active_parameter {
let mut cmd = "echo".to_owned();
let chunks: Vec<&str> = active_signature
.label
.split(&active_parameter.label)
.collect();
if chunks.len() == 2 {
let begin = chunks.get(0).cloned().unwrap_or_default();
let end = chunks.get(1).cloned().unwrap_or_default();
cmd += &format!(
" | echon '{}' | echohl WarningMsg | echon '{}' | echohl None | echon '{}'",
begin, active_parameter.label, end
);
} else {
// Active parameter is not part of signature.
cmd += &format!(" | echo '{}'", active_signature.label);
}
self.command(&cmd)?;
} else {
self.echo(&active_signature.label)?;
}
info!("End {}", lsp::request::SignatureHelpRequest::METHOD);
Ok(Value::Null)
}
fn workspace_executeCommand(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::ExecuteCommand::METHOD);
let (languageId,): (String,) = self.gather_args(&[VimVar::LanguageId], params)?;
let (command, arguments): (String, Vec<Value>) = self.gather_args(&["command", "arguments"], params)?;
let result = self.call(
Some(&languageId),
lsp::request::ExecuteCommand::METHOD,
ExecuteCommandParams { command, arguments },
)?;
info!("End {}", lsp::request::ExecuteCommand::METHOD);
Ok(result)
}
fn workspace_applyEdit(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::ApplyWorkspaceEdit::METHOD);
let params: ApplyWorkspaceEditParams = serde_json::from_value(params.clone().to_value())?;
self.apply_WorkspaceEdit(¶ms.edit, &None)?;
info!("End {}", lsp::request::ApplyWorkspaceEdit::METHOD);
Ok(serde_json::to_value(ApplyWorkspaceEditResponse {
applied: true,
})?)
}
fn rustDocument_implementations(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__RustImplementations);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
REQUEST__RustImplementations,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
let locations: Vec<Location> = serde_json::from_value(result.clone())?;
self.display_locations(&locations, &languageId)?;
info!("End {}", REQUEST__RustImplementations);
Ok(result)
}
fn exit(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::Exit::METHOD);
let (languageId,): (String,) = self.gather_args(&[VimVar::LanguageId], params)?;
self.notify(
Some(&languageId),
lsp::notification::Exit::METHOD,
Value::Null,
)?;
self.cleanup(&languageId)?;
info!("End {}", lsp::notification::Exit::METHOD);
Ok(())
}
fn cleanup(&self, languageId: &str) -> Result<()> {
self.update(|state| {
state.child_ids.remove(languageId);
state.last_cursor_line = 0;
Ok(())
})?;
let signsmap = self.update(|state| {
state.writers.remove(languageId);
let root = state
.roots
.remove(languageId)
.ok_or_else(|| format_err!("No project root found! languageId: {}", languageId))?;
state.text_documents.retain(|f, _| !f.starts_with(&root));
state.diagnostics.retain(|f, _| !f.starts_with(&root));
let mut signsmap = HashMap::new();
state.signs.retain(|f, s| {
if f.starts_with(&root) {
signsmap.insert(f.clone(), s.clone());
false
} else {
true
}
});
state
.line_diagnostics
.retain(|fl, _| !fl.0.starts_with(&root));
Ok(signsmap)
})?;
for (filename, signs) in signsmap {
let (_, cmd) = get_command_update_signs(&signs, &[], &filename);
self.command(&cmd)?;
}
let hlsource = self.update(|state| {
state
.highlight_source
.ok_or_else(|| err_msg("No highlight source"))
});
if let Ok(hlsource) = hlsource {
self.call(
None,
"nvim_buf_clear_highlight",
json!([0, hlsource, 1, -1]),
)?;
}
if self.eval::<_, u64>("exists('#User#LanguageClientStopped')")? == 1 {
self.command("doautocmd User LanguageClientStopped")?;
}
self.command(&format!("let {}=0", VIM__ServerStatus))?;
self.command(&format!("let {}=''", VIM__ServerStatusMessage))?;
Ok(())
}
fn language_status(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__LanguageStatus);
let params: LanguageStatusParams = serde_json::from_value(params.clone().to_value())?;
let msg = format!("{} {}", params.typee, params.message);
self.echomsg(&msg)?;
info!("End {}", NOTIFICATION__LanguageStatus);
Ok(())
}
fn rust_handleBeginBuild(&self, _params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__RustBeginBuild);
self.command(&format!(
"let {}=1 | let {}='Rust: build begin'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
info!("End {}", NOTIFICATION__RustBeginBuild);
Ok(())
}
fn rust_handleDiagnosticsBegin(&self, _params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__RustDiagnosticsBegin);
self.command(&format!(
"let {}=1 | let {}='Rust: diagnostics begin'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
info!("End {}", NOTIFICATION__RustDiagnosticsBegin);
Ok(())
}
fn rust_handleDiagnosticsEnd(&self, _params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__RustDiagnosticsEnd);
self.command(&format!(
"let {}=0 | let {}='Rust: diagnostics end'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
info!("End {}", NOTIFICATION__RustDiagnosticsEnd);
Ok(())
}
fn cquery_handleProgress(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__CqueryProgress);
let params: CqueryProgressParams = serde_json::from_value(params.clone().to_value())?;
let total = params.indexRequestCount + params.doIdMapCount + params.loadPreviousIndexCount
+ params.onIdMappedCount + params.onIndexedCount;
if total != 0 {
self.command(&format!(
"let {}=1 | let {}='cquery: indexing ({} jobs)'",
VIM__ServerStatus, VIM__ServerStatusMessage, params.indexRequestCount
))?;
} else {
self.command(&format!(
"let {}=0 | let {}='cquery: idle'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
}
info!("End {}", NOTIFICATION__CqueryProgress);
Ok(())
}
}
Hover response could be null.
Close #292.
use super::*;
use std::str::FromStr;
use std::ops::Deref;
use lsp::request::Request;
use lsp::notification::Notification;
use vim::*;
pub trait ILanguageClient {
fn get<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&State) -> Result<T>;
fn update<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&mut State) -> Result<T>;
fn loop_message<T: BufRead>(&self, input: T, languageId: Option<String>) -> Result<()>;
fn handle_message(&self, languageId: Option<String>, message: String) -> Result<()>;
fn write(&self, languageId: Option<&str>, message: &str) -> Result<()>;
fn output(&self, languageId: Option<&str>, id: Id, result: Result<Value>) -> Result<()>;
fn call<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<Value>;
fn notify<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<()>;
// Utils.
fn gather_args<E: VimExp + std::fmt::Debug, T: DeserializeOwned>(
&self,
exps: &[E],
params: &Option<Params>,
) -> Result<T>;
fn sync_settings(&self) -> Result<()>;
fn define_signs(&self) -> Result<()>;
fn apply_WorkspaceEdit(&self, edit: &WorkspaceEdit, params: &Option<Params>) -> Result<()>;
fn apply_TextEdits<P: AsRef<Path>>(&self, path: P, edits: &[TextEdit]) -> Result<()>;
fn display_diagnostics(&self, filename: &str, diagnostics: &[Diagnostic]) -> Result<()>;
fn display_locations(&self, locations: &[Location], languageId: &str) -> Result<()>;
fn registerCMSource(&self, languageId: &str, result: &Value) -> Result<()>;
fn get_line<P: AsRef<Path>>(&self, path: P, line: u64) -> Result<String>;
fn try_handle_command_by_client(&self, cmd: &Command) -> Result<bool>;
fn cleanup(&self, languageId: &str) -> Result<()>;
fn initialize(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_hover(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_definition(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_rename(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_documentSymbol(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_codeAction(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_completion(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_signatureHelp(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_references(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_formatting(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_rangeFormatting(&self, params: &Option<Params>) -> Result<Value>;
fn completionItem_resolve(&self, params: &Option<Params>) -> Result<Value>;
fn workspace_symbol(&self, params: &Option<Params>) -> Result<Value>;
fn workspace_executeCommand(&self, params: &Option<Params>) -> Result<Value>;
fn workspace_applyEdit(&self, params: &Option<Params>) -> Result<Value>;
fn rustDocument_implementations(&self, params: &Option<Params>) -> Result<Value>;
fn textDocument_didOpen(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_didChange(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_didSave(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_didClose(&self, params: &Option<Params>) -> Result<()>;
fn textDocument_publishDiagnostics(&self, params: &Option<Params>) -> Result<()>;
fn window_logMessage(&self, params: &Option<Params>) -> Result<()>;
fn exit(&self, params: &Option<Params>) -> Result<()>;
// Extensions.
fn languageClient_getState(&self, &Option<Params>) -> Result<Value>;
fn languageClient_isAlive(&self, &Option<Params>) -> Result<Value>;
fn languageClient_startServer(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_registerServerCommands(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_setLoggingLevel(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_omniComplete(&self, params: &Option<Params>) -> Result<Value>;
fn languageClient_handleBufReadPost(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleTextChanged(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleBufWritePost(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleBufDelete(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_handleCursorMoved(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_FZFSinkLocation(&self, params: &Option<Params>) -> Result<()>;
fn languageClient_FZFSinkCommand(&self, params: &Option<Params>) -> Result<()>;
fn NCM_refresh(&self, params: &Option<Params>) -> Result<()>;
// Extensions by languge servers.
fn language_status(&self, params: &Option<Params>) -> Result<()>;
fn rust_handleBeginBuild(&self, params: &Option<Params>) -> Result<()>;
fn rust_handleDiagnosticsBegin(&self, params: &Option<Params>) -> Result<()>;
fn rust_handleDiagnosticsEnd(&self, params: &Option<Params>) -> Result<()>;
fn cquery_handleProgress(&self, params: &Option<Params>) -> Result<()>;
}
impl ILanguageClient for Arc<Mutex<State>> {
fn get<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&State) -> Result<T>,
{
let state = self.lock()
.or_else(|_| Err(err_msg("Failed to lock state")))?;
f(&state)
}
fn update<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&mut State) -> Result<T>,
{
use log::LogLevel;
let mut state = self.lock()
.or_else(|_| Err(err_msg("Failed to lock state")))?;
let before = if log_enabled!(LogLevel::Debug) {
let s = serde_json::to_string(state.deref())?;
serde_json::from_str(&s)?
} else {
Value::default()
};
let result = f(&mut state);
let after = if log_enabled!(LogLevel::Debug) {
let s = serde_json::to_string(state.deref())?;
serde_json::from_str(&s)?
} else {
Value::default()
};
for (k, (v1, v2)) in diff_value(&before, &after, "state") {
debug!("{}: {} ==> {}", k, v1, v2);
}
result
}
fn loop_message<T: BufRead>(&self, input: T, languageId: Option<String>) -> Result<()> {
// Count how many consequent empty lines.
let mut count_empty_lines = 0;
let mut input = input;
let mut content_length = 0;
loop {
let mut message = String::new();
let mut line = String::new();
if let Some(languageId) = languageId.clone() {
input.read_line(&mut line)?;
line = line.strip();
if line.is_empty() {
count_empty_lines += 1;
if count_empty_lines > 5 {
if let Err(err) = self.cleanup(&languageId) {
error!("Error when cleanup: {:?}", err);
}
let mut message = format!("Language server ({}) exited unexpectedly!", languageId);
match get_log_server() {
Ok(log_server) => {
message += "\n\nlanguage server stderr:\n";
message += &log_server;
}
Err(err) => error!("Error when get_log_server: {:?}", err),
}
if let Err(err) = self.echoerr(&message) {
error!("Error in echoerr: {:?}", err);
};
bail!("{}", message);
}
let mut buf = vec![0; content_length];
input.read_exact(buf.as_mut_slice())?;
message = String::from_utf8(buf)?;
} else {
count_empty_lines = 0;
if !line.starts_with("Content-Length") {
continue;
}
let tokens: Vec<&str> = line.splitn(2, ':').collect();
let len = tokens
.get(1)
.ok_or_else(|| format_err!("Failed to get length! tokens: {:?}", tokens))?
.trim();
content_length = usize::from_str(len)?;
}
} else if input.read_line(&mut message)? == 0 {
break;
}
message = message.strip();
if message.is_empty() {
continue;
}
info!("<= {}", message);
let state = Arc::clone(self);
let languageId_clone = languageId.clone();
let spawn_result = std::thread::Builder::new()
.name(format!(
"Handler-{}",
languageId.clone().unwrap_or_else(|| "main".to_owned())
))
.spawn(move || {
if let Err(err) = state.handle_message(languageId_clone, message.clone()) {
if err.downcast_ref::<LCError>().is_none() {
error!(
"Error handling message. Message: {}. Error: {:?}",
message, err
);
}
}
});
if let Err(err) = spawn_result {
error!("Failed to spawn handler: {:?}", err);
}
}
Ok(())
}
/// Handle an incoming message.
fn handle_message(&self, languageId: Option<String>, message: String) -> Result<()> {
if let Ok(output) = serde_json::from_str::<Output>(&message) {
let tx = self.update(|state| {
state
.txs
.remove(&output.id().to_int()?)
.ok_or_else(|| format_err!("Failed to get channel sender! id: {:?}", output.id()))
})?;
let result = match output {
Output::Success(success) => Ok(success.result),
Output::Failure(failure) => Err(format_err!("{}", failure.error.message)),
};
tx.send(result)?;
return Ok(());
}
// FIXME
let message = message.replace(r#","meta":{}"#, "");
let call = serde_json::from_str(&message)?;
match call {
Call::MethodCall(method_call) => {
let result: Result<Value> = match method_call.method.as_str() {
lsp::request::HoverRequest::METHOD => self.textDocument_hover(&method_call.params),
lsp::request::GotoDefinition::METHOD => self.textDocument_definition(&method_call.params),
lsp::request::Rename::METHOD => self.textDocument_rename(&method_call.params),
lsp::request::DocumentSymbol::METHOD => self.textDocument_documentSymbol(&method_call.params),
lsp::request::WorkspaceSymbol::METHOD => self.workspace_symbol(&method_call.params),
lsp::request::CodeActionRequest::METHOD => self.textDocument_codeAction(&method_call.params),
lsp::request::Completion::METHOD => self.textDocument_completion(&method_call.params),
lsp::request::SignatureHelpRequest::METHOD => self.textDocument_signatureHelp(&method_call.params),
lsp::request::References::METHOD => self.textDocument_references(&method_call.params),
lsp::request::Formatting::METHOD => self.textDocument_formatting(&method_call.params),
lsp::request::RangeFormatting::METHOD => self.textDocument_rangeFormatting(&method_call.params),
lsp::request::ResolveCompletionItem::METHOD => self.completionItem_resolve(&method_call.params),
lsp::request::ExecuteCommand::METHOD => self.workspace_executeCommand(&method_call.params),
lsp::request::ApplyWorkspaceEdit::METHOD => self.workspace_applyEdit(&method_call.params),
REQUEST__RustImplementations => self.rustDocument_implementations(&method_call.params),
// Extensions.
REQUEST__GetState => self.languageClient_getState(&method_call.params),
REQUEST__IsAlive => self.languageClient_isAlive(&method_call.params),
REQUEST__StartServer => self.languageClient_startServer(&method_call.params),
REQUEST__RegisterServerCommands => self.languageClient_registerServerCommands(&method_call.params),
REQUEST__SetLoggingLevel => self.languageClient_setLoggingLevel(&method_call.params),
REQUEST__OmniComplete => self.languageClient_omniComplete(&method_call.params),
_ => Err(format_err!("Unknown method call: {}", method_call.method)),
};
if let Err(err) = result.as_ref() {
if err.downcast_ref::<LCError>().is_none() {
error!(
"Error handling message. Message: {}. Error: {:?}",
message, result
);
}
}
self.output(
languageId.as_ref().map(|s| s.as_str()),
method_call.id,
result,
)?
}
Call::Notification(notification) => {
match notification.method.as_str() {
lsp::notification::DidOpenTextDocument::METHOD => self.textDocument_didOpen(¬ification.params)?,
lsp::notification::DidChangeTextDocument::METHOD => {
self.textDocument_didChange(¬ification.params)?
}
lsp::notification::DidSaveTextDocument::METHOD => self.textDocument_didSave(¬ification.params)?,
lsp::notification::DidCloseTextDocument::METHOD => {
self.textDocument_didClose(¬ification.params)?
}
lsp::notification::PublishDiagnostics::METHOD => {
self.textDocument_publishDiagnostics(¬ification.params)?
}
lsp::notification::LogMessage::METHOD => self.window_logMessage(¬ification.params)?,
lsp::notification::Exit::METHOD => self.exit(¬ification.params)?,
// Extensions.
NOTIFICATION__HandleBufReadPost => self.languageClient_handleBufReadPost(¬ification.params)?,
NOTIFICATION__HandleTextChanged => self.languageClient_handleTextChanged(¬ification.params)?,
NOTIFICATION__HandleBufWritePost => self.languageClient_handleBufWritePost(¬ification.params)?,
NOTIFICATION__HandleBufDelete => self.languageClient_handleBufDelete(¬ification.params)?,
NOTIFICATION__HandleCursorMoved => self.languageClient_handleCursorMoved(¬ification.params)?,
NOTIFICATION__FZFSinkLocation => self.languageClient_FZFSinkLocation(¬ification.params)?,
NOTIFICATION__FZFSinkCommand => self.languageClient_FZFSinkCommand(¬ification.params)?,
NOTIFICATION__NCMRefresh => self.NCM_refresh(¬ification.params)?,
// Extensions by language servers.
NOTIFICATION__LanguageStatus => self.language_status(¬ification.params)?,
NOTIFICATION__RustBeginBuild => self.rust_handleBeginBuild(¬ification.params)?,
NOTIFICATION__RustDiagnosticsBegin => self.rust_handleDiagnosticsBegin(¬ification.params)?,
NOTIFICATION__RustDiagnosticsEnd => self.rust_handleDiagnosticsEnd(¬ification.params)?,
NOTIFICATION__CqueryProgress => self.cquery_handleProgress(¬ification.params)?,
_ => warn!("Unknown notification: {:?}", notification.method),
}
}
Call::Invalid(id) => bail!("Invalid message of id: {:?}", id),
}
Ok(())
}
/// Send message to RPC server.
fn write(&self, languageId: Option<&str>, message: &str) -> Result<()> {
if let Some(languageId) = languageId {
self.update(|state| {
let writer = state
.writers
.get_mut(languageId)
.ok_or(LCError::NoLanguageServer {
languageId: languageId.to_owned(),
})?;
write!(
writer,
"Content-Length: {}\r\n\r\n{}",
message.len(),
message
)?;
Ok(writer.flush()?)
})?;
} else {
println!("Content-Length: {}\n\n{}", message.len(), message);
}
Ok(())
}
/// Write an RPC call output.
fn output(&self, languageId: Option<&str>, id: Id, result: Result<Value>) -> Result<()> {
let response = match result {
Ok(ok) => Output::Success(Success {
jsonrpc: Some(Version::V2),
id,
result: ok,
}),
Err(err) => Output::Failure(Failure {
jsonrpc: Some(Version::V2),
id,
error: err.to_rpc_error(),
}),
};
let message = serde_json::to_string(&response)?;
info!("=> {}", message);
self.write(languageId, &message)?;
Ok(())
}
/// RPC method call.
fn call<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<Value> {
let id = self.update(|state| {
state.id += 1;
Ok(state.id)
})?;
let method_call = MethodCall {
jsonrpc: Some(Version::V2),
id: Id::Num(id),
method: method.into(),
params: Some(params.to_params()?),
};
let (tx, cx) = channel();
self.update(|state| {
state.txs.insert(id, tx);
Ok(())
})?;
let message = serde_json::to_string(&method_call)?;
info!("=> {}", message);
self.write(languageId, &message)?;
cx.recv_timeout(std::time::Duration::from_secs(60 * 5))?
}
/// RPC notification.
fn notify<P: Serialize>(&self, languageId: Option<&str>, method: &str, params: P) -> Result<()> {
let notification = rpc::Notification {
jsonrpc: Some(Version::V2),
method: method.to_owned(),
params: Some(params.to_params()?),
};
let message = serde_json::to_string(¬ification)?;
info!("=> {}", message);
self.write(languageId, &message)?;
Ok(())
}
fn gather_args<E: VimExp + std::fmt::Debug, T: DeserializeOwned>(
&self,
exps: &[E],
map: &Option<Params>,
) -> Result<T> {
let mut map = match *map {
None | Some(Params::None) => serde_json::map::Map::new(),
Some(Params::Array(_)) => bail!("Params should be dict!"),
Some(Params::Map(ref map)) => map.clone(),
};
let mut keys_request = vec![];
let mut exps_request = vec![];
for e in exps {
let k = e.to_key();
if !map.contains_key(&k) {
keys_request.push(k);
exps_request.push(e.to_exp());
}
}
let values_request: Vec<Value> = if keys_request.is_empty() {
vec![]
} else {
info!(
"Some arguments are not available. Requesting from vim. Keys: {:?}. Exps: {:?}",
keys_request, exps_request,
);
self.eval(&exps_request[..])?
};
for (k, v) in keys_request.into_iter().zip(values_request.into_iter()) {
map.insert(k, v);
}
let mut result = vec![];
for e in exps {
let k = e.to_key();
result.push(map.remove(&k)
.ok_or_else(|| format_err!("Failed to get value! k: {}", k))?);
}
info!("gather_args: {:?} = {:?}", exps, result);
Ok(serde_json::from_value(Value::Array(result))?)
}
fn sync_settings(&self) -> Result<()> {
let (autoStart, serverCommands, mut selectionUI, trace, settingsPath, loadSettings, loggingLevel, rootMarkers): (
u64,
HashMap<String, Vec<String>>,
String,
String,
String,
u64,
String,
Option<RootMarkers>,
) = self.eval(
&[
"!!get(g:, 'LanguageClient_autoStart', 1)",
"get(g:, 'LanguageClient_serverCommands', {})",
"get(g:, 'LanguageClient_selectionUI', '')",
"get(g:, 'LanguageClient_trace', 'Off')",
"get(g:, 'LanguageClient_settingsPath', '.vim/settings.json')",
"!!get(g:, 'LanguageClient_loadSettings', 1)",
"get(g:, 'LanguageClient_loggingLevel', 'WARN')",
"get(g:, 'LanguageClient_rootMarkers', v:null)",
][..],
)?;
// vimscript use 1 for true, 0 for false.
let autoStart = autoStart == 1;
let loadSettings = loadSettings == 1;
let trace = match trace.to_uppercase().as_str() {
"OFF" => TraceOption::Off,
"MESSAGES" => TraceOption::Messages,
"VERBOSE" => TraceOption::Verbose,
_ => bail!("Unknown trace option: {:?}", trace),
};
if selectionUI == "" {
let loaded_fzf: u64 = self.eval("get(g:, 'loaded_fzf')")?;
if loaded_fzf == 1 {
selectionUI = "FZF".into();
}
}
let selectionUI = match selectionUI.to_uppercase().as_str() {
"FZF" => SelectionUI::FZF,
"" | "LOCATIONLIST" | "LOCATION-LIST" => SelectionUI::LocationList,
_ => bail!("Unknown selectionUI option: {:?}", selectionUI),
};
let logger = LOGGER
.deref()
.as_ref()
.or_else(|_| Err(err_msg("No logger")))?;
logger::set_logging_level(logger, &loggingLevel)?;
let (diagnosticsEnable, diagnosticsList, diagnosticsDisplay, windowLogMessageLevel): (
u64,
DiagnosticsList,
Value,
String,
) = self.eval(
&[
"!!get(g:, 'LanguageClient_diagnosticsEnable', v:true)",
"get(g:, 'LanguageClient_diagnosticsList', 'Quickfix')",
"get(g:, 'LanguageClient_diagnosticsDisplay', {})",
"get(g:, 'LanguageClient_windowLogMessageLevel', 'Warning')",
][..],
)?;
let diagnosticsEnable = diagnosticsEnable == 1;
let windowLogMessageLevel = match windowLogMessageLevel.to_uppercase().as_str() {
"ERROR" => MessageType::Error,
"WARNING" => MessageType::Warning,
"INFO" => MessageType::Info,
"LOG" => MessageType::Log,
_ => bail!("Unknown windowLogMessageLevel: {}", windowLogMessageLevel),
};
self.update(|state| {
state.autoStart = autoStart;
state.serverCommands.merge(serverCommands);
state.selectionUI = selectionUI;
state.trace = trace;
state.diagnosticsEnable = diagnosticsEnable;
state.diagnosticsList = diagnosticsList;
state.diagnosticsDisplay =
serde_json::from_value(serde_json::to_value(&state.diagnosticsDisplay)?.combine(diagnosticsDisplay))?;
state.windowLogMessageLevel = windowLogMessageLevel;
state.settingsPath = settingsPath;
state.loadSettings = loadSettings;
state.rootMarkers = rootMarkers;
Ok(())
})?;
Ok(())
}
fn define_signs(&self) -> Result<()> {
info!("Define signs");
let cmd = self.get(|state| {
let mut cmd = "echo".to_owned();
for entry in state.diagnosticsDisplay.values() {
cmd += &format!(
" | execute 'sign define LanguageClient{} text={} texthl={}'",
entry.name, entry.signText, entry.signTexthl,
);
}
Ok(cmd)
})?;
self.command(&cmd)?;
info!("Define signs");
Ok(())
}
fn apply_WorkspaceEdit(&self, edit: &WorkspaceEdit, params: &Option<Params>) -> Result<()> {
debug!(
"Begin apply WorkspaceEdit: {:?}. Params: {:?}",
edit, params
);
let (filename, line, character): (String, u64, u64) =
self.gather_args(&[VimVar::Filename, VimVar::Line, VimVar::Character], params)?;
if let Some(ref changes) = edit.document_changes {
for e in changes {
self.apply_TextEdits(&e.text_document.uri.filepath()?, &e.edits)?;
}
}
if let Some(ref changes) = edit.changes {
for (uri, edits) in changes {
self.apply_TextEdits(&uri.filepath()?, edits)?;
}
}
debug!("End apply WorkspaceEdit");
self.goto_location(&Some("buffer".to_string()), &filename, line, character)?;
Ok(())
}
fn apply_TextEdits<P: AsRef<Path>>(&self, path: P, edits: &[TextEdit]) -> Result<()> {
debug!("Begin apply TextEdits: {:?}", edits);
let mut edits = edits.to_vec();
edits.reverse();
edits.sort_by_key(|edit| (edit.range.start.line, edit.range.start.character));
edits.reverse();
self.goto_location(&None, &path, 0, 0)?;
let mut lines: Vec<String> = self.getbufline(&path)?;
let lines_len = lines.len();
lines = apply_TextEdits(&lines, &edits)?;
let fixendofline: u64 = self.eval("&fixendofline")?;
if fixendofline == 1 && lines[lines.len() - 1].is_empty() {
lines.pop();
}
self.notify(None, "setline", json!([1, lines]))?;
if lines.len() < lines_len {
self.command(&format!("{},{}d", lines.len() + 1, lines_len))?;
}
debug!("End apply TextEdits");
Ok(())
}
fn display_diagnostics(&self, filename: &str, diagnostics: &[Diagnostic]) -> Result<()> {
// Line diagnostics.
self.update(|state| {
state
.line_diagnostics
.retain(|&(ref f, _), _| f != filename);
Ok(())
})?;
let mut line_diagnostics = HashMap::new();
for entry in diagnostics {
let line = entry.range.start.line;
let mut msg = String::new();
if let Some(severity) = entry.severity {
msg += &format!("[{:?}]", severity);
}
if let Some(ref code) = entry.code {
let s = code.to_string();
if !s.is_empty() {
msg += &format!("[{}]", s);
}
}
msg += &entry.message;
line_diagnostics.insert((filename.to_owned(), line), msg);
}
self.update(|state| {
state.line_diagnostics.merge(line_diagnostics);
Ok(())
})?;
// Signs.
let texts = self.get(|state| {
let text_document = state
.text_documents
.get(filename)
.ok_or_else(|| format_err!("TextDocumentItem not found! filename: {}", filename))?;
Ok(text_document.text.clone())
})?;
let texts: Vec<&str> = texts.split('\n').collect();
let mut signs: Vec<_> = diagnostics
.iter()
.map(|dn| {
let line = dn.range.start.line;
let text = texts
.get(line as usize)
.map(|l| l.to_string())
.unwrap_or_default();
let severity = dn.severity.unwrap_or(DiagnosticSeverity::Information);
Sign::new(line + 1, text, severity)
})
.collect();
signs.sort_unstable();
let cmd = self.update(|state| {
let signs_prev = state.signs.remove(filename).unwrap_or_default();
let (signs_next, cmd) = get_command_update_signs(&signs_prev, &signs, filename);
state.signs.insert(filename.to_string(), signs_next);
Ok(cmd)
})?;
info!("Command to update signs: {}", cmd);
self.command(&cmd)?;
// Quickfix.
let qflist: Vec<_> = diagnostics
.iter()
.map(|dn| QuickfixEntry {
filename: filename.to_owned(),
lnum: dn.range.start.line + 1,
col: Some(dn.range.start.character + 1),
nr: dn.code.clone().map(|ns| ns.to_string()),
text: Some(dn.message.to_owned()),
typee: dn.severity.map(|sev| sev.to_quickfix_entry_type()),
})
.collect();
let diagnosticsList = self.get(|state| Ok(state.diagnosticsList.clone()))?;
match diagnosticsList {
DiagnosticsList::Quickfix => {
self.call(None, "setqflist", [qflist])?;
}
DiagnosticsList::Location => {
self.call(None, "setloclist", json!([0, qflist]))?;
}
};
let is_nvim: u64 = self.eval("has('nvim')")?;
if is_nvim != 1 {
return Ok(());
}
let mut source: Option<u64> = self.get(|state| Ok(state.highlight_source))?;
if source.is_none() {
let exp = format!(
"nvim_buf_add_highlight({}, {}, {}, {}, {}, {})",
0, 0, "''", 1, 1, 1
);
source = Some(self.eval(exp)?);
self.update(|state| {
state.highlight_source = source;
Ok(())
})?;
}
let source = source.ok_or_else(|| err_msg("Empty highlight source id"))?;
let diagnosticsDisplay = self.get(|state| Ok(state.diagnosticsDisplay.clone()))?;
// Highlight.
// TODO: Optimize.
self.call(None, "nvim_buf_clear_highlight", json!([0, source, 1, -1]))?;
for dn in diagnostics.iter() {
let severity = dn.severity.unwrap_or(DiagnosticSeverity::Information);
let hl_group = diagnosticsDisplay
.get(&severity.to_int()?)
.ok_or_else(|| err_msg("Failed to get display"))?
.texthl
.clone();
self.notify(
None,
"nvim_buf_add_highlight",
json!([
0,
source,
hl_group,
dn.range.start.line,
dn.range.start.character,
dn.range.end.character,
]),
)?;
}
Ok(())
}
fn display_locations(&self, locations: &[Location], _languageId: &str) -> Result<()> {
match self.get(|state| Ok(state.selectionUI.clone()))? {
SelectionUI::FZF => {
let cwd: String = self.eval("getcwd()")?;
let source: Result<Vec<_>> = locations
.iter()
.map(|loc| {
let filename = loc.uri.filepath()?;
let start = loc.range.start;
let text = self.get_line(&filename, start.line).unwrap_or_default();
let relpath = diff_paths(&filename, Path::new(&cwd)).unwrap_or(filename);
Ok(format!(
"{}:{}:{}:\t{}",
relpath.to_str().unwrap_or_default(),
start.line + 1,
start.character + 1,
text
))
})
.collect();
let source = source?;
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkLocation)]),
)?;
}
SelectionUI::LocationList => {
let loclist: Result<Vec<_>> = locations
.iter()
.map(|loc| {
let filename = loc.uri.filepath()?;
let start = loc.range.start;
let text = self.get_line(&filename, start.line).unwrap_or_default();
Ok(json!({
"filename": filename,
"lnum": start.line + 1,
"col": start.character + 1,
"text": text,
}))
})
.collect();
let loclist = loclist?;
self.notify(None, "setloclist", json!([0, loclist]))?;
self.echo("Location list updated.")?;
}
}
Ok(())
}
fn languageClient_getState(&self, _params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__GetState);
let s = self.get(|state| Ok(serde_json::to_string(state)?))?;
info!("End {}", REQUEST__GetState);
Ok(Value::String(s))
}
fn languageClient_isAlive(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__IsAlive);
let (languageId,): (String,) = self.gather_args(&[VimVar::LanguageId], params)?;
let is_alive = self.get(|state| Ok(state.writers.contains_key(&languageId)))?;
info!("End {}", REQUEST__IsAlive);
Ok(Value::Bool(is_alive))
}
fn languageClient_startServer(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__StartServer);
let (cmdargs,): (Vec<String>,) = self.gather_args(&[("cmdargs", "[]")], params)?;
let cmdparams = vim_cmd_args_to_value(&cmdargs)?;
let params = &Some(params.clone().to_value().combine(cmdparams).to_params()?);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || filename.is_empty() {
return Ok(Value::Null);
}
if self.get(|state| Ok(state.writers.contains_key(&languageId)))? {
bail!(
"Language client has already started for language {}.",
&languageId
);
}
self.sync_settings()?;
let command = self.get(|state| {
state
.serverCommands
.get(&languageId)
.cloned()
.ok_or_else(|| {
format_err!(
"No language server command found for type: {}.",
&languageId
)
})
})?;
let home = env::home_dir().ok_or_else(|| err_msg("Failed to get home dir"))?;
let home = home.to_str()
.ok_or_else(|| err_msg("Failed to convert PathBuf to str"))?;
let command: Vec<_> = command
.into_iter()
.map(|cmd| {
if cmd.starts_with('~') {
cmd.replacen('~', home, 1)
} else {
cmd
}
})
.collect();
let stderr = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(&get_logpath_server())?;
let process = std::process::Command::new(command.get(0).ok_or_else(|| err_msg("Empty command!"))?)
.args(&command[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(stderr)
.spawn()?;
let child_id = process.id();
let reader = BufReader::new(process
.stdout
.ok_or_else(|| err_msg("Failed to get subprocess stdout"))?);
let writer = BufWriter::new(process
.stdin
.ok_or_else(|| err_msg("Failed to get subprocess stdin"))?);
self.update(|state| {
state.child_ids.insert(languageId.clone(), child_id);
state.writers.insert(languageId.clone(), writer);
Ok(())
})?;
let state = Arc::clone(self);
let languageId_clone = languageId.clone();
let thread_name = format!("RPC-{}", languageId);
std::thread::Builder::new()
.name(thread_name.clone())
.spawn(move || {
if let Err(err) = state.loop_message(reader, Some(languageId_clone)) {
error!("{} thread error: {}", thread_name, err);
}
})?;
info!("End {}", REQUEST__StartServer);
if self.get(|state| Ok(state.writers.len()))? == 1 {
self.define_signs()?;
}
self.initialize(params)?;
self.textDocument_didOpen(params)?;
self.textDocument_didChange(params)?;
if self.eval::<_, u64>("exists('#User#LanguageClientStarted')")? == 1 {
self.command("doautocmd User LanguageClientStarted")?;
}
Ok(Value::Null)
}
// TODO: verify.
fn languageClient_registerServerCommands(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__RegisterServerCommands);
let params = params.clone().ok_or_else(|| err_msg("Empty params!"))?;
let map = match params {
Params::Map(map) => Value::Object(map),
_ => bail!("Unexpected params type!"),
};
let map = serde_json::from_value(map)?;
self.update(|state| Ok(state.serverCommands.merge(map)))?;
let exp = format!(
"let g:LanguageClient_serverCommands={}",
serde_json::to_string(&self.get(|state| Ok(state.serverCommands.clone()))?)?
);
self.command(&exp)?;
info!("End {}", REQUEST__RegisterServerCommands);
Ok(Value::Null)
}
fn languageClient_setLoggingLevel(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__SetLoggingLevel);
let (loggingLevel,): (String,) = self.gather_args(&["loggingLevel"], params)?;
let logger = LOGGER
.deref()
.as_ref()
.or_else(|_| Err(err_msg("No logger")))?;
logger::set_logging_level(logger, &loggingLevel)?;
info!("End {}", REQUEST__SetLoggingLevel);
Ok(Value::Null)
}
fn languageClient_handleBufReadPost(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleBufReadPost);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() || filename.is_empty() {
return Ok(());
}
// File opened before.
if self.get(|state| Ok(state.text_documents.contains_key(&filename)))? {
return Ok(());
}
if self.get(|state| Ok(state.writers.contains_key(&languageId)))? {
// Language server is running but file is not within project root.
let is_in_root = self.get(|state| {
let root = state
.roots
.get(&languageId)
.ok_or_else(|| format_err!("Failed to get root! languageId: {}", languageId))?;
Ok(filename.starts_with(root))
})?;
if !is_in_root {
return Ok(());
}
self.textDocument_didOpen(params)?;
let diagnostics = self.get(|state| {
state
.diagnostics
.get(&filename.canonicalize())
.cloned()
.ok_or_else(|| format_err!("No diagnostics! filename: {}", filename))
}).unwrap_or_default();
self.display_diagnostics(&filename, &diagnostics)?;
self.languageClient_handleCursorMoved(params)?;
} else {
let autoStart: i32 = self.eval("!!get(g:, 'LanguageClient_autoStart', v:true)")?;
if autoStart == 1 {
if let Err(err) = self.languageClient_startServer(params) {
warn!("{}", err);
}
}
}
info!("End {}", NOTIFICATION__HandleBufReadPost);
Ok(())
}
fn languageClient_handleTextChanged(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleTextChanged);
self.textDocument_didChange(params)?;
info!("End {}", NOTIFICATION__HandleTextChanged);
Ok(())
}
fn languageClient_handleBufWritePost(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleBufWritePost);
self.textDocument_didSave(params)?;
info!("End {}", NOTIFICATION__HandleBufWritePost);
Ok(())
}
fn languageClient_handleBufDelete(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleBufWritePost);
let (filename,): (String,) = self.gather_args(&[VimVar::Filename], params)?;
self.update(|state| {
state.text_documents.retain(|f, _| f != &filename);
state.diagnostics.retain(|f, _| f != &filename);
state.line_diagnostics.retain(|fl, _| fl.0 != filename);
state.signs.retain(|f, _| f != &filename);
Ok(())
})?;
self.textDocument_didClose(params)?;
info!("End {}", NOTIFICATION__HandleBufWritePost);
Ok(())
}
fn languageClient_handleCursorMoved(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__HandleCursorMoved);
let (buftype, filename, line): (String, String, u64) =
self.gather_args(&[VimVar::Buftype, VimVar::Filename, VimVar::Line], params)?;
if !buftype.is_empty() || line == self.get(|state| Ok(state.last_cursor_line))? {
return Ok(());
}
self.update(|state| {
state.last_cursor_line = line;
Ok(())
})?;
let message = self.get(|state| {
state
.line_diagnostics
.get(&(filename.clone(), line))
.cloned()
.ok_or_else(|| {
format_err!(
"Line diagnostic message not found! filename: {}, line: {}",
filename,
line
)
})
}).unwrap_or_default();
if message == self.get(|state| Ok(state.last_line_diagnostic.clone()))? {
return Ok(());
}
self.update(|state| {
state.last_line_diagnostic = message.clone();
Ok(())
})?;
self.echo_ellipsis(&message)?;
info!("End {}", NOTIFICATION__HandleCursorMoved);
Ok(())
}
fn initialize(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Initialize::METHOD);
let (languageId, filename): (String, String) =
self.gather_args(&[VimVar::LanguageId, VimVar::Filename], params)?;
let (rootPath, has_snippet_support): (Option<String>, u64) = self.gather_args(
&[
("rootPath", "v:null"),
("hasSnippetSupport", "s:hasSnippetSupport()"),
],
params,
)?;
let root = match rootPath {
Some(r) => r,
_ => {
let rootMarkers = self.get(|state| Ok(state.rootMarkers.clone()))?;
get_rootPath(Path::new(&filename), &languageId, &rootMarkers)?
.to_str()
.ok_or_else(|| err_msg("Failed to convert &Path to &str"))?
.to_owned()
}
};
info!("Project root: {}", root);
let has_snippet_support = has_snippet_support > 0;
self.update(|state| Ok(state.roots.insert(languageId.clone(), root.clone())))?;
let settings = || -> Result<Value> {
if !self.get(|state| Ok(state.loadSettings))? {
return Ok(json!({}));
}
let mut f = File::open(Path::new(&root).join(self.get(|state| Ok(state.settingsPath.clone()))?))?;
let mut buffer = String::new();
f.read_to_string(&mut buffer)?;
Ok(serde_json::from_str(&buffer)?)
}()
.unwrap_or_else(|_| json!({}));
debug!("Project settings: {}", serde_json::to_string(&settings)?);
let initialization_options = Some(settings["initializationOptions"].clone());
debug!(
"Project settings.initializationOptions: {}",
serde_json::to_string(&initialization_options)?
);
let result = self.call(
Some(&languageId),
lsp::request::Initialize::METHOD,
InitializeParams {
process_id: Some(unsafe { libc::getpid() } as u64),
root_path: Some(root.clone()),
root_uri: Some(root.to_url()?),
initialization_options,
capabilities: ClientCapabilities {
workspace: None,
text_document: Some(TextDocumentClientCapabilities {
synchronization: None,
completion: Some(CompletionCapability {
dynamic_registration: None,
completion_item: Some(CompletionItemCapability {
snippet_support: Some(has_snippet_support),
commit_characters_support: None,
documentation_format: None,
}),
}),
hover: None,
signature_help: None,
references: None,
document_highlight: None,
document_symbol: None,
formatting: None,
range_formatting: None,
on_type_formatting: None,
definition: None,
code_action: None,
code_lens: None,
document_link: None,
rename: None,
}),
experimental: None,
},
trace: TraceOption::default(),
},
)?;
self.update(|state| {
state
.capabilities
.insert(languageId.clone(), result.clone());
Ok(())
})?;
info!("End {}", lsp::request::Initialize::METHOD);
if let Err(e) = self.registerCMSource(&languageId, &result) {
let message = "LanguageClient: failed to register as NCM source!";
debug!("{}: {:?}", message, e);
self.echoerr(message)?;
}
Ok(result)
}
fn registerCMSource(&self, languageId: &str, result: &Value) -> Result<()> {
info!("Begin register NCM source");
let exists_CMRegister: u64 = self.eval("exists('g:cm_matcher')")?;
if exists_CMRegister == 0 {
return Ok(());
}
let result: InitializeResult = serde_json::from_value(result.clone())?;
if result.capabilities.completion_provider.is_none() {
return Ok(());
}
let trigger_patterns = result
.capabilities
.completion_provider
.map(|opt| {
let strings: Vec<_> = opt.trigger_characters
.iter()
.map(|c| regex::escape(c))
.collect();
strings
})
.unwrap_or_default();
self.notify(
None,
"cm#register_source",
json!([{
"name": format!("LanguageClient_{}", languageId),
"priority": 9,
"scopes": [languageId],
"cm_refresh_patterns": trigger_patterns,
"abbreviation": "LC",
"cm_refresh": NOTIFICATION__NCMRefresh,
}]),
)?;
info!("End register NCM source");
Ok(())
}
fn get_line<P: AsRef<Path>>(&self, path: P, line: u64) -> Result<String> {
let value = self.call(
None,
"getbufline",
json!([path.as_ref().to_str().unwrap_or_default(), line + 1]),
)?;
let mut texts: Vec<String> = serde_json::from_value(value)?;
let mut text = texts.pop().unwrap_or_default();
if text.is_empty() {
let reader = BufReader::new(File::open(path)?);
text = reader
.lines()
.nth(line.to_usize()?)
.ok_or_else(|| format_err!("Failed to get line! line: {}", line))??;
}
Ok(text.strip())
}
fn NCM_refresh(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__NCMRefresh);
let params = match *params {
None | Some(Params::None) => bail!("Empty params!"),
Some(Params::Map(_)) => bail!("Expecting array. Got dict."),
Some(Params::Array(ref arr)) => Value::Array(arr.clone()),
};
let (info, ctx): (NCMInfo, NCMContext) = serde_json::from_value(params)?;
if ctx.typed.is_empty() {
return Ok(());
}
let result = self.textDocument_completion(&Some(json!({
"line": ctx.lnum - 1,
"character": ctx.col - 1,
}).to_params()?))?;
let result: Option<CompletionResponse> = serde_json::from_value(result)?;
let result = result.unwrap_or_else(|| CompletionResponse::Array(vec![]));
let is_incomplete = match result {
CompletionResponse::Array(_) => false,
CompletionResponse::List(ref list) => list.is_incomplete,
};
let matches: Vec<VimCompleteItem> = match result {
CompletionResponse::Array(arr) => arr,
CompletionResponse::List(list) => list.items,
}.into_iter()
.map(|lspitem| lspitem.into())
.collect();
self.notify(
None,
"cm#complete",
json!([info.name, ctx, ctx.startcol, matches, is_incomplete]),
)?;
info!("End {}", NOTIFICATION__NCMRefresh);
Ok(())
}
fn languageClient_omniComplete(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__OmniComplete);
let result = self.textDocument_completion(params)?;
let result: Option<CompletionResponse> = serde_json::from_value(result)?;
let result = result.unwrap_or_else(|| CompletionResponse::Array(vec![]));
let matches: Vec<VimCompleteItem> = match result {
CompletionResponse::Array(arr) => arr,
CompletionResponse::List(list) => list.items,
}.into_iter()
.map(|lspitem| lspitem.into())
.collect();
info!("End {}", REQUEST__OmniComplete);
Ok(serde_json::to_value(matches)?)
}
fn textDocument_references(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::References::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::References::METHOD,
ReferenceParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
context: ReferenceContext {
include_declaration: true,
},
},
)?;
if !handle {
return Ok(result);
}
let locations: Vec<Location> = serde_json::from_value(result.clone())?;
self.display_locations(&locations, &languageId)?;
info!("End {}", lsp::request::References::METHOD);
Ok(result)
}
fn textDocument_formatting(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Formatting::METHOD);
let (buftype, languageId, filename, handle): (String, String, String, bool) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (tab_size, insert_spaces): (u64, u64) = self.eval(&["&tabstop", "&expandtab"][..])?;
let insert_spaces = insert_spaces == 1;
let result = self.call(
Some(&languageId),
lsp::request::Formatting::METHOD,
DocumentFormattingParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
options: FormattingOptions {
tab_size,
insert_spaces,
properties: HashMap::new(),
},
},
)?;
if !handle {
return Ok(result);
}
let text_edits: Option<Vec<TextEdit>> = serde_json::from_value(result.clone())?;
let text_edits = text_edits.unwrap_or_default();
let edit = lsp::WorkspaceEdit {
changes: Some(hashmap!{filename.to_url()? => text_edits}),
document_changes: None,
};
self.apply_WorkspaceEdit(&edit, params)?;
info!("End {}", lsp::request::Formatting::METHOD);
Ok(result)
}
fn textDocument_rangeFormatting(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::RangeFormatting::METHOD);
let (buftype, languageId, filename, handle): (String, String, String, bool) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (tab_size, insert_spaces, start_line, end_line, end_character): (u64, u64, u64, u64, u64) = self.eval(
&[
"&tabstop",
"&expandtab",
"v:lnum - 1",
"v:lnum - 1 + v:count",
"len(getline(v:lnum + v:count)) - 1",
][..],
)?;
let insert_spaces = insert_spaces == 1;
let result = self.call(
Some(&languageId),
lsp::request::RangeFormatting::METHOD,
DocumentRangeFormattingParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
options: FormattingOptions {
tab_size,
insert_spaces,
properties: HashMap::new(),
},
range: Range {
start: Position {
line: start_line,
character: 0,
},
end: Position {
line: end_line,
character: end_character,
},
},
},
)?;
if !handle {
return Ok(result);
}
let text_edits: Option<Vec<TextEdit>> = serde_json::from_value(result.clone())?;
let text_edits = text_edits.unwrap_or_default();
let edit = lsp::WorkspaceEdit {
changes: Some(hashmap!{filename.to_url()? => text_edits}),
document_changes: None,
};
self.apply_WorkspaceEdit(&edit, params)?;
info!("End {}", lsp::request::RangeFormatting::METHOD);
Ok(result)
}
fn completionItem_resolve(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::ResolveCompletionItem::METHOD);
let (buftype, languageId, handle): (String, String, bool) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Handle],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (completion_item,): (CompletionItem,) = self.gather_args(&["completionItem"], params)?;
let result = self.call(
Some(&languageId),
lsp::request::ResolveCompletionItem::METHOD,
completion_item,
)?;
if !handle {
return Ok(result);
}
// TODO: proper integration.
let msg = format!("comletionItem/resolve result not handled: {:?}", result);
warn!("{}", msg);
self.echowarn(&msg)?;
info!("End {}", lsp::request::ResolveCompletionItem::METHOD);
Ok(Value::Null)
}
fn textDocument_didOpen(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidOpenTextDocument::METHOD);
let (buftype, languageId, filename, text): (String, String, String, Vec<String>) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Text,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
let text_document = TextDocumentItem {
uri: filename.to_url()?,
language_id: languageId.clone(),
version: 0,
text: text.join("\n"),
};
self.update(|state| {
Ok(state
.text_documents
.insert(filename.clone(), text_document.clone()))
})?;
self.notify(
Some(&languageId),
lsp::notification::DidOpenTextDocument::METHOD,
DidOpenTextDocumentParams { text_document },
)?;
self.command("setlocal omnifunc=LanguageClient#complete")?;
info!("End {}", lsp::notification::DidOpenTextDocument::METHOD);
Ok(())
}
fn textDocument_didChange(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidChangeTextDocument::METHOD);
let (buftype, languageId, filename, text): (String, String, String, Vec<String>) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Text,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
if !self.get(|state| Ok(state.text_documents.contains_key(&filename)))? {
warn!("Not opened yet. Switching to didOpen.");
return self.textDocument_didOpen(params);
}
let text = text.join("\n");
let text_state = self.get(|state| {
state
.text_documents
.get(&filename)
.ok_or_else(|| format_err!("TextDocumentItem not found! filename: {}", filename))
.map(|doc| doc.text.clone())
}).unwrap_or_default();
if text == text_state {
info!("Texts equal. Skipping didChange.");
return Ok(());
}
let version = self.update(|state| {
let document = state
.text_documents
.get_mut(&filename)
.ok_or_else(|| format_err!("Failed to get TextDocumentItem! filename: {}", filename))?;
let version = document.version + 1;
document.version = version;
document.text = text.clone();
Ok(version)
})?;
self.notify(
Some(&languageId),
lsp::notification::DidChangeTextDocument::METHOD,
DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier {
uri: filename.to_url()?,
version: Some(version),
},
content_changes: vec![
TextDocumentContentChangeEvent {
range: None,
range_length: None,
text,
},
],
},
)?;
info!("End {}", lsp::notification::DidChangeTextDocument::METHOD);
Ok(())
}
fn textDocument_didSave(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidSaveTextDocument::METHOD);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
self.notify(
Some(&languageId),
lsp::notification::DidSaveTextDocument::METHOD,
DidSaveTextDocumentParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
},
)?;
info!("End {}", lsp::notification::DidSaveTextDocument::METHOD);
Ok(())
}
fn textDocument_didClose(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::DidCloseTextDocument::METHOD);
let (buftype, languageId, filename): (String, String, String) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Filename],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(());
}
self.notify(
Some(&languageId),
lsp::notification::DidCloseTextDocument::METHOD,
DidCloseTextDocumentParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
},
)?;
info!("End {}", lsp::notification::DidCloseTextDocument::METHOD);
Ok(())
}
fn textDocument_publishDiagnostics(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::PublishDiagnostics::METHOD);
let params: PublishDiagnosticsParams = serde_json::from_value(params.clone().to_value())?;
if !self.get(|state| Ok(state.diagnosticsEnable))? {
return Ok(());
}
let mut filename = params
.uri
.filepath()?
.to_str()
.ok_or_else(|| err_msg("Failed to convert PathBuf to str"))?
.to_owned();
// Workaround bug: remove first '/' in case of '/C:/blabla'.
if filename.chars().nth(0) == Some('/') && filename.chars().nth(2) == Some(':') {
filename.remove(0);
}
// Unify name to avoid mismatch due to case insensitivity.
let filename = filename.canonicalize();
self.update(|state| {
state
.diagnostics
.insert(filename.clone(), params.diagnostics.clone());
Ok(())
})?;
info!("End {}", lsp::notification::PublishDiagnostics::METHOD);
let current_filename: String = self.eval(VimVar::Filename)?;
if filename != current_filename.canonicalize() {
return Ok(());
}
self.display_diagnostics(¤t_filename, ¶ms.diagnostics)?;
self.languageClient_handleCursorMoved(&None)?;
if self.eval::<_, u64>("exists('#User#LanguageClientDiagnosticsChanged')")? == 1 {
self.command("doautocmd User LanguageClientDiagnosticsChanged")?;
}
Ok(())
}
fn window_logMessage(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::LogMessage::METHOD);
let params: LogMessageParams = serde_json::from_value(params.clone().to_value())?;
let threshold = self.get(|state| state.windowLogMessageLevel.to_int())?;
if params.typ.to_int()? > threshold {
return Ok(());
}
let msg = format!("[{:?}] {}", params.typ, params.message);
self.echomsg(&msg)?;
info!("End {}", lsp::notification::LogMessage::METHOD);
Ok(())
}
fn textDocument_hover(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::HoverRequest::METHOD);
let (languageId, filename, line, character, handle): (String, String, u64, u64, bool) = self.gather_args(
&[
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
let result = self.call(
Some(&languageId),
lsp::request::HoverRequest::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
let hover: Option<Hover> = serde_json::from_value(result.clone())?;
if let Some(hover) = hover {
let message = hover.to_string();
self.echo(&message)?;
}
info!("End {}", lsp::request::HoverRequest::METHOD);
Ok(result)
}
fn textDocument_definition(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::GotoDefinition::METHOD);
let (buftype, languageId, filename, line, character, goto_cmd, handle): (
String,
String,
String,
u64,
u64,
Option<String>,
bool,
) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::GotoCmd,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::GotoDefinition::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
let response: GotoDefinitionResponse = serde_json::from_value(result.clone())?;
match response {
GotoDefinitionResponse::None => {
self.echowarn("Not found!")?;
return Ok(Value::Null);
}
GotoDefinitionResponse::Scalar(loc) => {
self.goto_location(
&goto_cmd,
loc.uri.filepath()?.to_str().unwrap_or_default(),
loc.range.start.line,
loc.range.start.character,
)?;
}
GotoDefinitionResponse::Array(arr) => match arr.len() {
0 => self.echowarn("Not found!")?,
1 => {
let loc = arr.get(0).ok_or_else(|| err_msg("Not found!"))?;
self.goto_location(
&goto_cmd,
loc.uri.filepath()?.to_str().unwrap_or_default(),
loc.range.start.line,
loc.range.start.character,
)?;
}
_ => self.display_locations(&arr, &languageId)?,
},
};
info!("End {}", lsp::request::GotoDefinition::METHOD);
Ok(result)
}
fn textDocument_rename(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Rename::METHOD);
let (buftype, languageId, filename, line, character, cword, new_name, handle): (
String,
String,
String,
u64,
u64,
String,
Option<String>,
bool,
) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Cword,
VimVar::NewName,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let mut new_name = new_name.unwrap_or_default();
if new_name.is_empty() {
let value = self.call(None, "s:getInput", ["Rename to: ".to_owned(), cword])?;
new_name = serde_json::from_value(value)?;
}
if new_name.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::Rename::METHOD,
RenameParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
new_name,
},
)?;
if !handle || result == Value::Null {
return Ok(result);
}
let edit: WorkspaceEdit = serde_json::from_value(result.clone())?;
self.apply_WorkspaceEdit(&edit, params)?;
info!("End {}", lsp::request::Rename::METHOD);
Ok(result)
}
fn textDocument_documentSymbol(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::DocumentSymbol::METHOD);
let (buftype, languageId, filename, handle): (String, String, String, bool) = self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::DocumentSymbol::METHOD,
DocumentSymbolParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
},
)?;
if !handle {
return Ok(result);
}
let symbols: Vec<SymbolInformation> = serde_json::from_value(result.clone())?;
match self.get(|state| Ok(state.selectionUI.clone()))? {
SelectionUI::FZF => {
let source: Vec<_> = symbols
.iter()
.map(|sym| {
let start = sym.location.range.start;
format!("{}:{}:\t{}", start.line + 1, start.character + 1, sym.name)
})
.collect();
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkLocation)]),
)?;
}
SelectionUI::LocationList => {
let loclist: Vec<_> = symbols
.iter()
.map(|sym| {
let start = sym.location.range.start;
json!({
"filename": filename,
"lnum": start.line + 1,
"col": start.character + 1,
"text": sym.name,
})
})
.collect();
self.notify(None, "setloclist", json!([0, loclist]))?;
self.echo("Document symbols populated to location list.")?;
}
}
info!("End {}", lsp::request::DocumentSymbol::METHOD);
Ok(result)
}
fn workspace_symbol(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::WorkspaceSymbol::METHOD);
let (buftype, languageId, handle): (String, String, bool) = self.gather_args(
&[VimVar::Buftype, VimVar::LanguageId, VimVar::Handle],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let (query,): (String,) = self.gather_args(&[("query", "")], params)?;
let result = self.call(
Some(&languageId),
lsp::request::WorkspaceSymbol::METHOD,
WorkspaceSymbolParams { query },
)?;
if !handle {
return Ok(result);
}
let symbols: Vec<SymbolInformation> = serde_json::from_value(result.clone())?;
match self.get(|state| Ok(state.selectionUI.clone()))? {
SelectionUI::FZF => {
let cwd: String = self.eval("getcwd()")?;
let source: Result<Vec<_>> = symbols
.iter()
.map(|sym| {
let filename = sym.location.uri.filepath()?;
let relpath = diff_paths(&filename, Path::new(&cwd)).unwrap_or(filename);
let start = sym.location.range.start;
Ok(format!(
"{}:{}:{}:\t{}",
relpath.to_str().unwrap_or_default(),
start.line + 1,
start.character + 1,
sym.name
))
})
.collect();
let source = source?;
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkLocation)]),
)?;
}
SelectionUI::LocationList => {
let loclist: Vec<_> = symbols
.iter()
.map(|sym| {
let start = sym.location.range.start;
json!({
"filename": sym.location.uri.to_file_path(),
"lnum": start.line + 1,
"col": start.character + 1,
"text": sym.name,
})
})
.collect();
self.notify(None, "setloclist", json!([0, loclist]))?;
self.echo("Workspace symbols populated to location list.")?;
}
}
info!("End {}", lsp::request::WorkspaceSymbol::METHOD);
Ok(result)
}
fn languageClient_FZFSinkLocation(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__FZFSinkLocation);
let params = match *params {
None | Some(Params::None) | Some(Params::Map(_)) => {
bail!("Expecting array params!");
}
Some(Params::Array(ref arr)) => Value::Array(arr.clone()),
};
let lines: Vec<String> = serde_json::from_value(params)?;
if lines.is_empty() {
err_msg("No selection!");
}
let mut tokens: Vec<&str> = lines
.get(0)
.ok_or_else(|| format_err!("Failed to get line! lines: {:?}", lines))?
.split(':')
.collect();
tokens.reverse();
let filename: String = if tokens.len() > 3 {
let relpath = tokens
.pop()
.ok_or_else(|| format_err!("Failed to get file path! tokens: {:?}", tokens))?
.to_owned();
let languageId: String = self.eval(VimVar::LanguageId)?;
let root = self.get(|state| {
state
.roots
.get(&languageId)
.cloned()
.ok_or_else(|| format_err!("Failed to get root! languageId: {}", languageId))
})?;
Path::new(&root)
.join(relpath)
.to_str()
.ok_or_else(|| err_msg("Failed to convert PathBuf to str"))?
.to_owned()
} else {
self.eval(VimVar::Filename)?
};
let line = tokens
.pop()
.ok_or_else(|| format_err!("Failed to get line! tokens: {:?}", tokens))?
.to_int()? - 1;
let character = tokens
.pop()
.ok_or_else(|| format_err!("Failed to get character! tokens: {:?}", tokens))?
.to_int()? - 1;
self.goto_location(&None, &filename, line, character)?;
info!("End {}", NOTIFICATION__FZFSinkLocation);
Ok(())
}
fn languageClient_FZFSinkCommand(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__FZFSinkCommand);
let (selection,): (String,) = self.gather_args(&["selection"], params)?;
let tokens: Vec<&str> = selection.split(": ").collect();
let command = tokens
.get(0)
.cloned()
.ok_or_else(|| format_err!("Failed to get command! tokens: {:?}", tokens))?;
let title = tokens
.get(1)
.cloned()
.ok_or_else(|| format_err!("Failed to get title! tokens: {:?}", tokens))?;
let entry = self.get(|state| {
state
.stashed_codeAction_commands
.iter()
.find(|e| e.command == command && e.title == title)
.cloned()
.ok_or_else(|| err_msg("No stashed command found!"))
})?;
if self.try_handle_command_by_client(&entry)? {
return Ok(());
}
self.workspace_executeCommand(&Some(json!({
"command": entry.command,
"arguments": entry.arguments,
}).to_params()?))?;
self.update(|state| {
state.stashed_codeAction_commands = vec![];
Ok(())
})?;
info!("End {}", NOTIFICATION__FZFSinkCommand);
Ok(())
}
fn try_handle_command_by_client(&self, cmd: &Command) -> Result<bool> {
if !CommandsClient.contains(&cmd.command.as_str()) {
return Ok(false);
}
if cmd.command == "java.apply.workspaceEdit" {
if let Some(ref edits) = cmd.arguments {
for edit in edits {
let edit: WorkspaceEdit = serde_json::from_value(edit.clone())?;
self.apply_WorkspaceEdit(&edit, &None)?;
}
}
} else {
bail!("Not implemented: {}", cmd.command);
}
Ok(true)
}
fn textDocument_codeAction(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::CodeActionRequest::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let diagnostics: Vec<_> = self.get(|state| {
Ok(state
.diagnostics
.get(&filename)
.ok_or_else(|| err_msg("No diagnostics found!"))?
.iter()
.filter(|dn| {
let start = dn.range.start;
let end = dn.range.end;
start.line <= line && start.character <= character && end.line >= line && end.character >= character
})
.cloned()
.collect())
})?;
let result = self.call(
Some(&languageId),
lsp::request::CodeActionRequest::METHOD,
CodeActionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
//TODO: is this correct?
range: diagnostics
.get(0)
.ok_or_else(|| err_msg("No diagnostics found!"))?
.range,
context: CodeActionContext { diagnostics },
},
)?;
if !handle {
return Ok(result);
}
let commands: Vec<Command> = serde_json::from_value(result.clone())?;
let source: Vec<_> = commands
.iter()
.map(|cmd| format!("{}: {}", cmd.command, cmd.title))
.collect();
self.update(|state| {
state.stashed_codeAction_commands = commands;
Ok(())
})?;
self.notify(
None,
"s:FZF",
json!([source, format!("s:{}", NOTIFICATION__FZFSinkCommand)]),
)?;
info!("End {}", lsp::request::CodeActionRequest::METHOD);
Ok(result)
}
fn textDocument_completion(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::Completion::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::Completion::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
info!("End {}", lsp::request::Completion::METHOD);
Ok(result)
}
fn textDocument_signatureHelp(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::SignatureHelpRequest::METHOD);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
lsp::request::SignatureHelpRequest::METHOD,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle || result == Value::Null {
return Ok(result);
}
let help: SignatureHelp = serde_json::from_value(result)?;
if help.signatures.is_empty() {
return Ok(Value::Null);
}
let active_signature = help.signatures
.get(help.active_signature.unwrap_or(0).to_usize()?)
.ok_or_else(|| err_msg("Failed to get active signature"))?;
let active_parameter: Option<&ParameterInformation>;
if let Some(ref parameters) = active_signature.parameters {
active_parameter = parameters.get(help.active_parameter.unwrap_or(0).to_usize()?);
} else {
active_parameter = None;
}
if let Some(active_parameter) = active_parameter {
let mut cmd = "echo".to_owned();
let chunks: Vec<&str> = active_signature
.label
.split(&active_parameter.label)
.collect();
if chunks.len() == 2 {
let begin = chunks.get(0).cloned().unwrap_or_default();
let end = chunks.get(1).cloned().unwrap_or_default();
cmd += &format!(
" | echon '{}' | echohl WarningMsg | echon '{}' | echohl None | echon '{}'",
begin, active_parameter.label, end
);
} else {
// Active parameter is not part of signature.
cmd += &format!(" | echo '{}'", active_signature.label);
}
self.command(&cmd)?;
} else {
self.echo(&active_signature.label)?;
}
info!("End {}", lsp::request::SignatureHelpRequest::METHOD);
Ok(Value::Null)
}
fn workspace_executeCommand(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::ExecuteCommand::METHOD);
let (languageId,): (String,) = self.gather_args(&[VimVar::LanguageId], params)?;
let (command, arguments): (String, Vec<Value>) = self.gather_args(&["command", "arguments"], params)?;
let result = self.call(
Some(&languageId),
lsp::request::ExecuteCommand::METHOD,
ExecuteCommandParams { command, arguments },
)?;
info!("End {}", lsp::request::ExecuteCommand::METHOD);
Ok(result)
}
fn workspace_applyEdit(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", lsp::request::ApplyWorkspaceEdit::METHOD);
let params: ApplyWorkspaceEditParams = serde_json::from_value(params.clone().to_value())?;
self.apply_WorkspaceEdit(¶ms.edit, &None)?;
info!("End {}", lsp::request::ApplyWorkspaceEdit::METHOD);
Ok(serde_json::to_value(ApplyWorkspaceEditResponse {
applied: true,
})?)
}
fn rustDocument_implementations(&self, params: &Option<Params>) -> Result<Value> {
info!("Begin {}", REQUEST__RustImplementations);
let (buftype, languageId, filename, line, character, handle): (String, String, String, u64, u64, bool) =
self.gather_args(
&[
VimVar::Buftype,
VimVar::LanguageId,
VimVar::Filename,
VimVar::Line,
VimVar::Character,
VimVar::Handle,
],
params,
)?;
if !buftype.is_empty() || languageId.is_empty() {
return Ok(Value::Null);
}
let result = self.call(
Some(&languageId),
REQUEST__RustImplementations,
TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: filename.to_url()?,
},
position: Position { line, character },
},
)?;
if !handle {
return Ok(result);
}
let locations: Vec<Location> = serde_json::from_value(result.clone())?;
self.display_locations(&locations, &languageId)?;
info!("End {}", REQUEST__RustImplementations);
Ok(result)
}
fn exit(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", lsp::notification::Exit::METHOD);
let (languageId,): (String,) = self.gather_args(&[VimVar::LanguageId], params)?;
self.notify(
Some(&languageId),
lsp::notification::Exit::METHOD,
Value::Null,
)?;
self.cleanup(&languageId)?;
info!("End {}", lsp::notification::Exit::METHOD);
Ok(())
}
fn cleanup(&self, languageId: &str) -> Result<()> {
self.update(|state| {
state.child_ids.remove(languageId);
state.last_cursor_line = 0;
Ok(())
})?;
let signsmap = self.update(|state| {
state.writers.remove(languageId);
let root = state
.roots
.remove(languageId)
.ok_or_else(|| format_err!("No project root found! languageId: {}", languageId))?;
state.text_documents.retain(|f, _| !f.starts_with(&root));
state.diagnostics.retain(|f, _| !f.starts_with(&root));
let mut signsmap = HashMap::new();
state.signs.retain(|f, s| {
if f.starts_with(&root) {
signsmap.insert(f.clone(), s.clone());
false
} else {
true
}
});
state
.line_diagnostics
.retain(|fl, _| !fl.0.starts_with(&root));
Ok(signsmap)
})?;
for (filename, signs) in signsmap {
let (_, cmd) = get_command_update_signs(&signs, &[], &filename);
self.command(&cmd)?;
}
let hlsource = self.update(|state| {
state
.highlight_source
.ok_or_else(|| err_msg("No highlight source"))
});
if let Ok(hlsource) = hlsource {
self.call(
None,
"nvim_buf_clear_highlight",
json!([0, hlsource, 1, -1]),
)?;
}
if self.eval::<_, u64>("exists('#User#LanguageClientStopped')")? == 1 {
self.command("doautocmd User LanguageClientStopped")?;
}
self.command(&format!("let {}=0", VIM__ServerStatus))?;
self.command(&format!("let {}=''", VIM__ServerStatusMessage))?;
Ok(())
}
fn language_status(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__LanguageStatus);
let params: LanguageStatusParams = serde_json::from_value(params.clone().to_value())?;
let msg = format!("{} {}", params.typee, params.message);
self.echomsg(&msg)?;
info!("End {}", NOTIFICATION__LanguageStatus);
Ok(())
}
fn rust_handleBeginBuild(&self, _params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__RustBeginBuild);
self.command(&format!(
"let {}=1 | let {}='Rust: build begin'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
info!("End {}", NOTIFICATION__RustBeginBuild);
Ok(())
}
fn rust_handleDiagnosticsBegin(&self, _params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__RustDiagnosticsBegin);
self.command(&format!(
"let {}=1 | let {}='Rust: diagnostics begin'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
info!("End {}", NOTIFICATION__RustDiagnosticsBegin);
Ok(())
}
fn rust_handleDiagnosticsEnd(&self, _params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__RustDiagnosticsEnd);
self.command(&format!(
"let {}=0 | let {}='Rust: diagnostics end'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
info!("End {}", NOTIFICATION__RustDiagnosticsEnd);
Ok(())
}
fn cquery_handleProgress(&self, params: &Option<Params>) -> Result<()> {
info!("Begin {}", NOTIFICATION__CqueryProgress);
let params: CqueryProgressParams = serde_json::from_value(params.clone().to_value())?;
let total = params.indexRequestCount + params.doIdMapCount + params.loadPreviousIndexCount
+ params.onIdMappedCount + params.onIndexedCount;
if total != 0 {
self.command(&format!(
"let {}=1 | let {}='cquery: indexing ({} jobs)'",
VIM__ServerStatus, VIM__ServerStatusMessage, params.indexRequestCount
))?;
} else {
self.command(&format!(
"let {}=0 | let {}='cquery: idle'",
VIM__ServerStatus, VIM__ServerStatusMessage
))?;
}
info!("End {}", NOTIFICATION__CqueryProgress);
Ok(())
}
}
|
use super::Token;
use std::char;
pub struct TokenCollection {
pub open_brace: Token,
pub close_brace: Token,
pub open_bracket: Token,
pub close_bracket: Token,
pub comma: Token,
pub colon: Token,
pub identifier: Token,
pub stringlit: Token,
pub integerlit: Token,
pub floatlit: Token,
}
impl TokenCollection {
pub fn new() -> Self {
TokenCollection {
open_brace: Token::new(r"\{").unwrap(),
close_brace: Token::new(r"\}").unwrap(),
open_bracket: Token::new(r"\[").unwrap(),
close_bracket: Token::new(r"\]").unwrap(),
comma: Token::new(r",").unwrap(),
colon: Token::new(r":").unwrap(),
identifier: Token::new(r#"(?x)
[
\p{L} # letters
\p{M} # combining marks
\p{Pc} # connector punctuation (e.g. _)
]
\w* # all word characters
"#).unwrap(),
stringlit: Token::new(r#"(?x)
"(?: [^"\n\\] | \\n | \\r | \\t | \\\\ | \\b
| \\f | \\/ | \\" | \\' | \\u[0-9a-fA-F]{4} )*"
| '(?: [^'\n\\] | \\n | \\r | \\t | \\\\ | \\b
| \\f | \\/ | \\" | \\' | \\u[0-9a-fA-F]{4} )*'
"#).unwrap(),
integerlit: Token::new(r#"(?x)
0[xX][0-9a-fA-F_]+
| 0[oO][0-8_]+
| 0[bB][10_]+
| 0[dD][\d_]+
| [+-]?[\d_]+
"#).unwrap(),
floatlit: Token::new(r#"(?x)
[-+]? # optional sign
(?:
\d+\.\d* # numbers <dot> <optional numbers>
| \.\d+ # <dot> numbers
)
(?:
[eE] # begin exponent
[+-]? # sign
\d+ # exponent
)? # optional exponent part
"#).unwrap(),
}
}
}
pub fn parse_string(inp: String) -> String {
let mut escaped = false;
let mut unicode = false;
let mut uindex = 0;
let mut uvalue = 0;
let quote_char = inp.chars().next().expect("parse_string is parsing an empty string");
inp.chars().filter_map(|character| {
if escaped {
escaped = false;
match character {
'n' => Some('\n'),
'r' => Some('\r'),
't' => Some('\t'),
'b' => Some('\x08'),
'f' => Some('\x0c'),
'u' => { unicode = true; uvalue = 0; uindex = 0; None }
_ => Some(character),
}
} else if unicode {
// code adapted from https://github.com/rust-lang/rustc-serialize/blob/master/src/json.rs#L1608-L1624
// original licensed under MIT/Apache-2.0
uvalue = match character {
ctr @ '0'...'9' => uvalue * 16 + ((ctr as u16) - ('0' as u16)),
ctr @ 'a'...'f' => uvalue * 16 + (10 + (ctr as u16) - ('a' as u16)),
ctr @ 'A'...'F' => uvalue * 16 + (10 + (ctr as u16) - ('A' as u16)),
_ => return None,
};
if uindex < 3 {
uindex += 1;
None
} else {
unicode = false;
char::from_u32(uvalue as u32)
}
} else if character == quote_char {
None
} else if character == '\\' {
escaped = true;
None
} else {
Some(character)
}
}).collect()
}
pub fn parse_integer(inp: String) -> i64 {
if inp.starts_with("0d") || inp.starts_with("0D") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 10).unwrap()
} else if inp.starts_with("0x") || inp.starts_with("0X") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 16).unwrap()
} else if inp.starts_with("0o") || inp.starts_with("0O") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 8).unwrap()
} else if inp.starts_with("0b") || inp.starts_with("0B") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 2).unwrap()
} else if inp.starts_with("+") {
i64::from_str_radix(&inp.chars().skip(1).collect::<String>(), 10).unwrap()
} else {
i64::from_str_radix(&inp, 10).unwrap()
}
}
pub fn parse_float(inp: String) -> f64 {
inp.parse().unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_string() {
assert_eq!(parse_string("'hello'".to_string()), "hello");
assert_eq!(parse_string(r#""hello""#.to_string()), "hello");
assert_eq!(parse_string("'he\\nllo'".to_string()), "he\nllo");
assert_eq!(parse_string(r#"'he"llo'"#.to_string()), "he\"llo");
assert_eq!(parse_string(r"'\u0058'".to_string()), "\u{0058}");
}
#[test]
fn test_parse_integer() {
assert_eq!(parse_integer("0d5".to_string()), 5);
assert_eq!(parse_integer("0D5474".to_string()), 5474);
assert_eq!(parse_integer("0x4f3".to_string()), 0x4f3);
assert_eq!(parse_integer("0X7Df31".to_string()), 0x7df31);
assert_eq!(parse_integer("0o443".to_string()), 3 + (8*4) + (64*4));
assert_eq!(parse_integer("0O70131".to_string()), 28761);
assert_eq!(parse_integer("0b01101".to_string()), 0b01101);
assert_eq!(parse_integer("0B10010".to_string()), 0b10010);
assert_eq!(parse_integer("32353".to_string()), 32353);
assert_eq!(parse_integer("-1234".to_string()), -1234);
assert_eq!(parse_integer("+4321".to_string()), 4321);
}
#[test]
fn test_parse_float() {
assert_eq!(parse_float("+1.2".to_string()), 1.2);
assert_eq!(parse_float("-1.2".to_string()), -1.2);
assert_eq!(parse_float("3.4".to_string()), 3.4);
assert_eq!(parse_float("3.".to_string()), 3.0);
assert_eq!(parse_float(".2".to_string()), 0.2);
assert_eq!(parse_float("1.2e2".to_string()), 1.2E+2);
assert_eq!(parse_float("1.2e+2".to_string()), 1.2E+2);
assert_eq!(parse_float("1.2e-2".to_string()), 1.2E-2);
assert_eq!(parse_float("1.2E2".to_string()), 1.2E+2);
assert_eq!(parse_float("1.2E+2".to_string()), 1.2E+2);
assert_eq!(parse_float("1.2E-2".to_string()), 1.2E-2);
}
#[test]
fn literals_match() {
let tk = TokenCollection::new();
assert_eq!(tk.open_brace.get_match("{"), Some(1));
assert_eq!(tk.open_brace.get_match("}"), None);
assert_eq!(tk.close_brace.get_match("}"), Some(1));
assert_eq!(tk.close_brace.get_match("d"), None);
assert_eq!(tk.open_bracket.get_match("["), Some(1));
assert_eq!(tk.open_bracket.get_match(" "), None);
assert_eq!(tk.close_bracket.get_match("]"), Some(1));
assert_eq!(tk.close_bracket.get_match("["), None);
assert_eq!(tk.comma.get_match(","), Some(1));
assert_eq!(tk.comma.get_match(""), None);
assert_eq!(tk.colon.get_match(":"), Some(1));
assert_eq!(tk.colon.get_match(";"), None);
}
#[test]
fn identifier_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.identifier.get_match("hello"), Some(5));
assert_eq!(tk.identifier.get_match("HELLO"), Some(5));
assert_eq!(tk.identifier.get_match("45fefe"), None);
assert_eq!(tk.identifier.get_match("fefe45"), Some(6));
assert_eq!(tk.identifier.get_match("true"), Some(4));
assert_eq!(tk.identifier.get_match("$"), None);
}
#[test]
fn string_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.stringlit.get_match(""), None);
assert_eq!(tk.stringlit.get_match("hello"), None);
assert_eq!(tk.stringlit.get_match("'hello'"), Some(7));
assert_eq!(tk.stringlit.get_match("{}[],: 'string'"), None);
assert_eq!(tk.stringlit.get_match(r#""hello""#), Some(7));
assert_eq!(tk.stringlit.get_match(r"'hello\n'"), Some(9));
assert_eq!(tk.stringlit.get_match(r"'hello\''"), Some(9));
assert_eq!(tk.stringlit.get_match(r"'
no newline in strings'"), None);
}
#[test]
fn integer_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.integerlit.get_match("345"), Some(3));
assert_eq!(tk.integerlit.get_match("3_4_5"), Some(5));
assert_eq!(tk.integerlit.get_match("0d3_4_5"), Some(7));
assert_eq!(tk.integerlit.get_match("0x45"), Some(4));
assert_eq!(tk.integerlit.get_match("0Xff"), Some(4));
assert_eq!(tk.integerlit.get_match("0xf_f"), Some(5));
assert_eq!(tk.integerlit.get_match("0xgg"), Some(1));
assert_eq!(tk.integerlit.get_match("0o44"), Some(4));
assert_eq!(tk.integerlit.get_match("0O44"), Some(4));
assert_eq!(tk.integerlit.get_match("0o99"), Some(1));
}
#[test]
fn float_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.floatlit.get_match("300."), Some(4));
assert_eq!(tk.floatlit.get_match("30.0"), Some(4));
assert_eq!(tk.floatlit.get_match(".300"), Some(4));
assert_eq!(tk.floatlit.get_match("300"), None);
assert_eq!(tk.floatlit.get_match("3.e5"), Some(4));
assert_eq!(tk.floatlit.get_match("3.e-5"), Some(5));
assert_eq!(tk.floatlit.get_match("3.E-5"), Some(5));
}
}
Fix failing float tests for stable/beta rusts
use super::Token;
use std::char;
pub struct TokenCollection {
pub open_brace: Token,
pub close_brace: Token,
pub open_bracket: Token,
pub close_bracket: Token,
pub comma: Token,
pub colon: Token,
pub identifier: Token,
pub stringlit: Token,
pub integerlit: Token,
pub floatlit: Token,
}
impl TokenCollection {
pub fn new() -> Self {
TokenCollection {
open_brace: Token::new(r"\{").unwrap(),
close_brace: Token::new(r"\}").unwrap(),
open_bracket: Token::new(r"\[").unwrap(),
close_bracket: Token::new(r"\]").unwrap(),
comma: Token::new(r",").unwrap(),
colon: Token::new(r":").unwrap(),
identifier: Token::new(r#"(?x)
[
\p{L} # letters
\p{M} # combining marks
\p{Pc} # connector punctuation (e.g. _)
]
\w* # all word characters
"#).unwrap(),
stringlit: Token::new(r#"(?x)
"(?: [^"\n\\] | \\n | \\r | \\t | \\\\ | \\b
| \\f | \\/ | \\" | \\' | \\u[0-9a-fA-F]{4} )*"
| '(?: [^'\n\\] | \\n | \\r | \\t | \\\\ | \\b
| \\f | \\/ | \\" | \\' | \\u[0-9a-fA-F]{4} )*'
"#).unwrap(),
integerlit: Token::new(r#"(?x)
0[xX][0-9a-fA-F_]+
| 0[oO][0-8_]+
| 0[bB][10_]+
| 0[dD][\d_]+
| [+-]?[\d_]+
"#).unwrap(),
floatlit: Token::new(r#"(?x)
[-+]? # optional sign
(?:
\d+\.\d* # numbers <dot> <optional numbers>
| \.\d+ # <dot> numbers
)
(?:
[eE] # begin exponent
[+-]? # sign
\d+ # exponent
)? # optional exponent part
"#).unwrap(),
}
}
}
pub fn parse_string(inp: String) -> String {
let mut escaped = false;
let mut unicode = false;
let mut uindex = 0;
let mut uvalue = 0;
let quote_char = inp.chars().next().expect("parse_string is parsing an empty string");
inp.chars().filter_map(|character| {
if escaped {
escaped = false;
match character {
'n' => Some('\n'),
'r' => Some('\r'),
't' => Some('\t'),
'b' => Some('\x08'),
'f' => Some('\x0c'),
'u' => { unicode = true; uvalue = 0; uindex = 0; None }
_ => Some(character),
}
} else if unicode {
// code adapted from https://github.com/rust-lang/rustc-serialize/blob/master/src/json.rs#L1608-L1624
// original licensed under MIT/Apache-2.0
uvalue = match character {
ctr @ '0'...'9' => uvalue * 16 + ((ctr as u16) - ('0' as u16)),
ctr @ 'a'...'f' => uvalue * 16 + (10 + (ctr as u16) - ('a' as u16)),
ctr @ 'A'...'F' => uvalue * 16 + (10 + (ctr as u16) - ('A' as u16)),
_ => return None,
};
if uindex < 3 {
uindex += 1;
None
} else {
unicode = false;
char::from_u32(uvalue as u32)
}
} else if character == quote_char {
None
} else if character == '\\' {
escaped = true;
None
} else {
Some(character)
}
}).collect()
}
pub fn parse_integer(inp: String) -> i64 {
if inp.starts_with("0d") || inp.starts_with("0D") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 10).unwrap()
} else if inp.starts_with("0x") || inp.starts_with("0X") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 16).unwrap()
} else if inp.starts_with("0o") || inp.starts_with("0O") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 8).unwrap()
} else if inp.starts_with("0b") || inp.starts_with("0B") {
i64::from_str_radix(&inp.chars().skip(2).collect::<String>(), 2).unwrap()
} else if inp.starts_with("+") {
i64::from_str_radix(&inp.chars().skip(1).collect::<String>(), 10).unwrap()
} else {
i64::from_str_radix(&inp, 10).unwrap()
}
}
pub fn parse_float(inp: String) -> f64 {
if inp.starts_with("+") {
inp.chars().skip(1).collect::<String>().parse().unwrap()
} else {
inp.parse().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_string() {
assert_eq!(parse_string("'hello'".to_string()), "hello");
assert_eq!(parse_string(r#""hello""#.to_string()), "hello");
assert_eq!(parse_string("'he\\nllo'".to_string()), "he\nllo");
assert_eq!(parse_string(r#"'he"llo'"#.to_string()), "he\"llo");
assert_eq!(parse_string(r"'\u0058'".to_string()), "\u{0058}");
}
#[test]
fn test_parse_integer() {
assert_eq!(parse_integer("0d5".to_string()), 5);
assert_eq!(parse_integer("0D5474".to_string()), 5474);
assert_eq!(parse_integer("0x4f3".to_string()), 0x4f3);
assert_eq!(parse_integer("0X7Df31".to_string()), 0x7df31);
assert_eq!(parse_integer("0o443".to_string()), 3 + (8*4) + (64*4));
assert_eq!(parse_integer("0O70131".to_string()), 28761);
assert_eq!(parse_integer("0b01101".to_string()), 0b01101);
assert_eq!(parse_integer("0B10010".to_string()), 0b10010);
assert_eq!(parse_integer("32353".to_string()), 32353);
assert_eq!(parse_integer("-1234".to_string()), -1234);
assert_eq!(parse_integer("+4321".to_string()), 4321);
}
#[test]
fn test_parse_float() {
println!("run");
assert_eq!(parse_float("+1.2".to_string()), 1.2);
println!("run");
assert_eq!(parse_float("-1.2".to_string()), -1.2);
println!("run");
assert_eq!(parse_float("3.4".to_string()), 3.4);
println!("run");
assert_eq!(parse_float("3.".to_string()), 3.0);
println!("run");
assert_eq!(parse_float(".2".to_string()), 0.2);
println!("run");
assert_eq!(parse_float("1.2e2".to_string()), 1.2E+2);
println!("run");
assert_eq!(parse_float("1.2e+2".to_string()), 1.2E+2);
println!("run");
assert_eq!(parse_float("1.2e-2".to_string()), 1.2E-2);
println!("run");
assert_eq!(parse_float("1.2E2".to_string()), 1.2E+2);
println!("run");
assert_eq!(parse_float("1.2E+2".to_string()), 1.2E+2);
println!("run");
assert_eq!(parse_float("1.2E-2".to_string()), 1.2E-2);
}
#[test]
fn literals_match() {
let tk = TokenCollection::new();
assert_eq!(tk.open_brace.get_match("{"), Some(1));
assert_eq!(tk.open_brace.get_match("}"), None);
assert_eq!(tk.close_brace.get_match("}"), Some(1));
assert_eq!(tk.close_brace.get_match("d"), None);
assert_eq!(tk.open_bracket.get_match("["), Some(1));
assert_eq!(tk.open_bracket.get_match(" "), None);
assert_eq!(tk.close_bracket.get_match("]"), Some(1));
assert_eq!(tk.close_bracket.get_match("["), None);
assert_eq!(tk.comma.get_match(","), Some(1));
assert_eq!(tk.comma.get_match(""), None);
assert_eq!(tk.colon.get_match(":"), Some(1));
assert_eq!(tk.colon.get_match(";"), None);
}
#[test]
fn identifier_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.identifier.get_match("hello"), Some(5));
assert_eq!(tk.identifier.get_match("HELLO"), Some(5));
assert_eq!(tk.identifier.get_match("45fefe"), None);
assert_eq!(tk.identifier.get_match("fefe45"), Some(6));
assert_eq!(tk.identifier.get_match("true"), Some(4));
assert_eq!(tk.identifier.get_match("$"), None);
}
#[test]
fn string_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.stringlit.get_match(""), None);
assert_eq!(tk.stringlit.get_match("hello"), None);
assert_eq!(tk.stringlit.get_match("'hello'"), Some(7));
assert_eq!(tk.stringlit.get_match("{}[],: 'string'"), None);
assert_eq!(tk.stringlit.get_match(r#""hello""#), Some(7));
assert_eq!(tk.stringlit.get_match(r"'hello\n'"), Some(9));
assert_eq!(tk.stringlit.get_match(r"'hello\''"), Some(9));
assert_eq!(tk.stringlit.get_match(r"'
no newline in strings'"), None);
}
#[test]
fn integer_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.integerlit.get_match("345"), Some(3));
assert_eq!(tk.integerlit.get_match("3_4_5"), Some(5));
assert_eq!(tk.integerlit.get_match("0d3_4_5"), Some(7));
assert_eq!(tk.integerlit.get_match("0x45"), Some(4));
assert_eq!(tk.integerlit.get_match("0Xff"), Some(4));
assert_eq!(tk.integerlit.get_match("0xf_f"), Some(5));
assert_eq!(tk.integerlit.get_match("0xgg"), Some(1));
assert_eq!(tk.integerlit.get_match("0o44"), Some(4));
assert_eq!(tk.integerlit.get_match("0O44"), Some(4));
assert_eq!(tk.integerlit.get_match("0o99"), Some(1));
}
#[test]
fn float_matches() {
let tk = TokenCollection::new();
assert_eq!(tk.floatlit.get_match("300."), Some(4));
assert_eq!(tk.floatlit.get_match("30.0"), Some(4));
assert_eq!(tk.floatlit.get_match(".300"), Some(4));
assert_eq!(tk.floatlit.get_match("300"), None);
assert_eq!(tk.floatlit.get_match("3.e5"), Some(4));
assert_eq!(tk.floatlit.get_match("3.e-5"), Some(5));
assert_eq!(tk.floatlit.get_match("3.E-5"), Some(5));
}
}
|
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
//! its allocation. It is valid to convert both ways between a [`Box`] and a
//! raw pointer allocated with the [`Global`] allocator, given that the
//! [`Layout`] used with the allocator is correct for the type. More precisely,
//! a `value: *mut T` that has been allocated with the [`Global`] allocator
//! with `Layout::for_value(&*value)` may be converted into a box using
//! `Box::<T>::from_raw(value)`. Conversely, the memory backing a `value: *mut
//! T` obtained from `Box::<T>::into_raw` may be deallocated using the
//! [`Global`] allocator with `Layout::for_value(&*value)`.
//!
//! # Examples
//!
//! Move a value from the stack to the heap by creating a [`Box`]:
//!
//! ```
//! let val: u8 = 5;
//! let boxed: Box<u8> = Box::new(val);
//! ```
//!
//! Move a value from a [`Box`] back to the stack by [dereferencing]:
//!
//! ```
//! let boxed: Box<u8> = Box::new(5);
//! let val: u8 = *boxed;
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Cons(2, Nil))`.
//!
//! Recursive structures must be boxed, because if the definition of `Cons`
//! looked like this:
//!
//! ```compile_fail,E0072
//! # enum List<T> {
//! Cons(T, List<T>),
//! # }
//! ```
//!
//! It wouldn't work. This is because the size of a `List` depends on how many
//! elements are in the list, and so we don't know how much memory to allocate
//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how
//! big `Cons` needs to be.
//!
//! [dereferencing]: ../../std/ops/trait.Deref.html
//! [`Box`]: struct.Box.html
//! [`Global`]: ../alloc/struct.Global.html
//! [`Layout`]: ../alloc/struct.Layout.html
#![stable(feature = "rust1", since = "1.0.0")]
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
use core::convert::From;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
use core::iter::{Iterator, FromIterator, FusedIterator};
use core::marker::{Unpin, Unsize};
use core::mem;
use core::pin::Pin;
use core::ops::{
CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Receiver, Generator, GeneratorState
};
use core::ptr::{self, NonNull, Unique};
use core::task::{Context, Poll};
use crate::vec::Vec;
use crate::raw_vec::RawVec;
use crate::str::from_boxed_utf8_unchecked;
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[fundamental]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T: ?Sized>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then places `x` into it.
///
/// This doesn't actually allocate if `T` is zero-sized.
///
/// # Examples
///
/// ```
/// let five = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
/// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
/// `x` will be pinned in memory and unable to be moved.
#[stable(feature = "pin", since = "1.33.0")]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
(box x).into()
}
}
impl<T: ?Sized> Box<T> {
/// Constructs a box from a raw pointer.
///
/// After calling this function, the raw pointer is owned by the
/// resulting `Box`. Specifically, the `Box` destructor will call
/// the destructor of `T` and free the allocated memory. For this
/// to be safe, the memory must have been allocated in the precise
/// way that `Box` expects, namely, using the global allocator
/// with the correct [`Layout`] for holding a value of type `T`. In
/// particular, this will be satisfied for a pointer obtained
/// from a previously existing `Box` using [`Box::into_raw`].
///
/// # Safety
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
///
/// # Examples
/// Recreate a `Box` which was previously converted to a raw pointer using [`Box::into_raw`]:
/// ```
/// let x = Box::new(5);
/// let ptr = Box::into_raw(x);
/// let x = unsafe { Box::from_raw(ptr) };
/// ```
/// Manually create a `Box` from scratch by using the global allocator:
/// ```
/// use std::alloc::{Layout, alloc};
///
/// let ptr = unsafe{ alloc(Layout::new::<i32>()) } as *mut i32;
/// unsafe{ *ptr = 5; }
/// let x = unsafe{ Box::from_raw(ptr) };
/// ```
///
/// [`Layout`]: ../alloc/struct.Layout.html
/// [`Box::into_raw`]: struct.Box.html#method.into_raw
///
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
Box(Unique::new_unchecked(raw))
}
/// Consumes the `Box`, returning a wrapped raw pointer.
///
/// The pointer will be properly aligned and non-null.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// easiest way to do so is to convert the raw pointer back into a `Box`
/// with the [`Box::from_raw`] function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// # Examples
/// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
/// for automatic cleanup:
/// ```
/// let x = Box::new(String::from("Hello"));
/// let ptr = Box::into_raw(x);
/// let x = unsafe{ Box::from_raw(ptr) };
/// ```
/// Manual cleanup by running the destructor and deallocating the memory:
/// ```
/// use std::alloc::{Layout, dealloc};
/// use std::ptr;
///
/// let x = Box::new(String::from("Hello"));
/// let p = Box::into_raw(x);
/// unsafe{ ptr::drop_in_place(p); }
/// unsafe{ dealloc(p as *mut u8, Layout::new::<String>()); }
/// ```
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub fn into_raw(b: Box<T>) -> *mut T {
Box::into_raw_non_null(b).as_ptr()
}
/// Consumes the `Box`, returning the wrapped pointer as `NonNull<T>`.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// easiest way to do so is to convert the `NonNull<T>` pointer
/// into a raw pointer and back into a `Box` with the [`Box::from_raw`]
/// function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw_non_null(b)`
/// instead of `b.into_raw_non_null()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// ```
/// #![feature(box_into_raw_non_null)]
///
/// fn main() {
/// let x = Box::new(5);
/// let ptr = Box::into_raw_non_null(x);
///
/// // Clean up the memory by converting the NonNull pointer back
/// // into a Box and letting the Box be dropped.
/// let x = unsafe{ Box::from_raw(ptr.as_ptr()) };
/// }
/// ```
#[unstable(feature = "box_into_raw_non_null", issue = "47336")]
#[inline]
pub fn into_raw_non_null(b: Box<T>) -> NonNull<T> {
Box::into_unique(b).into()
}
#[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")]
#[inline]
#[doc(hidden)]
pub fn into_unique(mut b: Box<T>) -> Unique<T> {
// Box is kind-of a library type, but recognized as a "unique pointer" by
// Stacked Borrows. This function here corresponds to "reborrowing to
// a raw pointer", but there is no actual reborrow here -- so
// without some care, the pointer we are returning here still carries
// the `Uniq` tag. We round-trip through a mutable reference to avoid that.
let unique = unsafe { b.0.as_mut() as *mut T };
mem::forget(b);
unsafe { Unique::new_unchecked(unique) }
}
/// Consumes and leaks the `Box`, returning a mutable reference,
/// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
/// `'a`. If the type has only static references, or none at all, then this
/// may be chosen to be `'static`.
///
/// This function is mainly useful for data that lives for the remainder of
/// the program's life. Dropping the returned reference will cause a memory
/// leak. If this is not acceptable, the reference should first be wrapped
/// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
/// then be dropped which will properly destroy `T` and release the
/// allocated memory.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::leak(b)` instead of `b.leak()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// Simple usage:
///
/// ```
/// fn main() {
/// let x = Box::new(41);
/// let static_ref: &'static mut usize = Box::leak(x);
/// *static_ref += 1;
/// assert_eq!(*static_ref, 42);
/// }
/// ```
///
/// Unsized data:
///
/// ```
/// fn main() {
/// let x = vec![1, 2, 3].into_boxed_slice();
/// let static_ref = Box::leak(x);
/// static_ref[0] = 4;
/// assert_eq!(*static_ref, [4, 2, 3]);
/// }
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
#[inline]
pub fn leak<'a>(b: Box<T>) -> &'a mut T
where
T: 'a // Technically not needed, but kept to be explicit.
{
unsafe { &mut *Box::into_raw(b) }
}
/// Converts a `Box<T>` into a `Pin<Box<T>>`
///
/// This conversion does not allocate on the heap and happens in place.
///
/// This is also available via [`From`].
#[unstable(feature = "box_into_pin", issue = "0")]
pub fn into_pin(boxed: Box<T>) -> Pin<Box<T>> {
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
// when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T: ?Sized> Drop for Box<T> {
fn drop(&mut self) {
// FIXME: Do nothing, drop is currently performed by compiler.
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
fn default() -> Box<T> {
box Default::default()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
fn default() -> Box<[T]> {
Box::<[T; 0]>::new([])
}
}
#[stable(feature = "default_box_extra", since = "1.17.0")]
impl Default for Box<str> {
fn default() -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Default::default()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[rustfmt::skip]
#[inline]
fn clone(&self) -> Box<T> {
box { (**self).clone() }
}
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl Clone for Box<str> {
fn clone(&self) -> Self {
let len = self.len();
let buf = RawVec::with_capacity(len);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
from_boxed_utf8_unchecked(buf.into_box())
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool {
PartialEq::eq(&**self, &**other)
}
#[inline]
fn ne(&self, other: &Box<T>) -> bool {
PartialEq::ne(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool {
PartialOrd::lt(&**self, &**other)
}
#[inline]
fn le(&self, other: &Box<T>) -> bool {
PartialOrd::le(&**self, &**other)
}
#[inline]
fn ge(&self, other: &Box<T>) -> bool {
PartialOrd::ge(&**self, &**other)
}
#[inline]
fn gt(&self, other: &Box<T>) -> bool {
PartialOrd::gt(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
impl<T: ?Sized + Hasher> Hasher for Box<T> {
fn finish(&self) -> u64 {
(**self).finish()
}
fn write(&mut self, bytes: &[u8]) {
(**self).write(bytes)
}
fn write_u8(&mut self, i: u8) {
(**self).write_u8(i)
}
fn write_u16(&mut self, i: u16) {
(**self).write_u16(i)
}
fn write_u32(&mut self, i: u32) {
(**self).write_u32(i)
}
fn write_u64(&mut self, i: u64) {
(**self).write_u64(i)
}
fn write_u128(&mut self, i: u128) {
(**self).write_u128(i)
}
fn write_usize(&mut self, i: usize) {
(**self).write_usize(i)
}
fn write_i8(&mut self, i: i8) {
(**self).write_i8(i)
}
fn write_i16(&mut self, i: i16) {
(**self).write_i16(i)
}
fn write_i32(&mut self, i: i32) {
(**self).write_i32(i)
}
fn write_i64(&mut self, i: i64) {
(**self).write_i64(i)
}
fn write_i128(&mut self, i: i128) {
(**self).write_i128(i)
}
fn write_isize(&mut self, i: isize) {
(**self).write_isize(i)
}
}
#[stable(feature = "from_for_ptrs", since = "1.6.0")]
impl<T> From<T> for Box<T> {
/// Converts a generic type `T` into a `Box<T>`
///
/// The conversion allocates on the heap and moves `t`
/// from the stack into it.
///
/// # Examples
/// ```rust
/// let x = 5;
/// let boxed = Box::new(5);
///
/// assert_eq!(Box::from(x), boxed);
/// ```
fn from(t: T) -> Self {
Box::new(t)
}
}
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized> From<Box<T>> for Pin<Box<T>> {
/// Converts a `Box<T>` into a `Pin<Box<T>>`
///
/// This conversion does not allocate on the heap and happens in place.
fn from(boxed: Box<T>) -> Self {
Box::into_pin(boxed)
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<T: Copy> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
/// and performs a copy of `slice`.
///
/// # Examples
/// ```rust
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice: Box<[u8]> = Box::from(slice);
///
/// println!("{:?}", boxed_slice);
/// ```
fn from(slice: &[T]) -> Box<[T]> {
let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() };
boxed.copy_from_slice(slice);
boxed
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl From<&str> for Box<str> {
/// Converts a `&str` into a `Box<str>`
///
/// This conversion allocates on the heap
/// and performs a copy of `s`.
///
/// # Examples
/// ```rust
/// let boxed: Box<str> = Box::from("hello");
/// println!("{}", boxed);
/// ```
#[inline]
fn from(s: &str) -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
}
}
#[stable(feature = "boxed_str_conv", since = "1.19.0")]
impl From<Box<str>> for Box<[u8]> {
/// Converts a `Box<str>>` into a `Box<[u8]>`
///
/// This conversion does not allocate on the heap and happens in place.
///
/// # Examples
/// ```rust
/// // create a Box<str> which will be used to create a Box<[u8]>
/// let boxed: Box<str> = Box::from("hello");
/// let boxed_str: Box<[u8]> = Box::from(boxed);
///
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice = Box::from(slice);
///
/// assert_eq!(boxed_slice, boxed_str);
/// ```
#[inline]
fn from(s: Box<str>) -> Self {
unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) }
}
}
impl Box<dyn Any> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<dyn Any>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any>> {
if self.is::<T>() {
unsafe {
let raw: *mut dyn Any = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl Box<dyn Any + Send> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<dyn Any + Send>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any + Send>> {
<Box<dyn Any>>::downcast(self).map_err(|s| unsafe {
// reapply the Send marker
Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send))
})
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> fmt::Pointer for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// It's not possible to extract the inner Uniq directly from the Box,
// instead we cast it to a *const which aliases the Unique
let ptr: *const T = &**self;
fmt::Pointer::fmt(&ptr, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
}
#[unstable(feature = "receiver_trait", issue = "0")]
impl<T: ?Sized> Receiver for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
fn nth(&mut self, n: usize) -> Option<I::Item> {
(**self).nth(n)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
fn nth_back(&mut self, n: usize) -> Option<I::Item> {
(**self).nth_back(n)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
fn len(&self) -> usize {
(**self).len()
}
fn is_empty(&self) -> bool {
(**self).is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<A, F: FnOnce<A> + ?Sized> FnOnce<A> for Box<F> {
type Output = <F as FnOnce<A>>::Output;
extern "rust-call" fn call_once(self, args: A) -> Self::Output {
<F as FnOnce<A>>::call_once(*self, args)
}
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<A, F: FnMut<A> + ?Sized> FnMut<A> for Box<F> {
extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output {
<F as FnMut<A>>::call_mut(self, args)
}
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<A, F: Fn<A> + ?Sized> Fn<A> for Box<F> {
extern "rust-call" fn call(&self, args: A) -> Self::Output {
<F as Fn<A>>::call(self, args)
}
}
/// `FnBox` is a version of the `FnOnce` intended for use with boxed
/// closure objects. The idea is that where one would normally store a
/// `Box<dyn FnOnce()>` in a data structure, you should use
/// `Box<dyn FnBox()>`. The two traits behave essentially the same, except
/// that a `FnBox` closure can only be called if it is boxed. (Note
/// that `FnBox` may be deprecated in the future if `Box<dyn FnOnce()>`
/// closures become directly usable.)
///
/// # Examples
///
/// Here is a snippet of code which creates a hashmap full of boxed
/// once closures and then removes them one by one, calling each
/// closure as it is removed. Note that the type of the closures
/// stored in the map is `Box<dyn FnBox() -> i32>` and not `Box<dyn FnOnce()
/// -> i32>`.
///
/// ```
/// #![feature(fnbox)]
///
/// use std::boxed::FnBox;
/// use std::collections::HashMap;
///
/// fn make_map() -> HashMap<i32, Box<dyn FnBox() -> i32>> {
/// let mut map: HashMap<i32, Box<dyn FnBox() -> i32>> = HashMap::new();
/// map.insert(1, Box::new(|| 22));
/// map.insert(2, Box::new(|| 44));
/// map
/// }
///
/// fn main() {
/// let mut map = make_map();
/// for i in &[1, 2] {
/// let f = map.remove(&i).unwrap();
/// assert_eq!(f(), i * 22);
/// }
/// }
/// ```
#[rustc_paren_sugar]
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
pub trait FnBox<A>: FnOnce<A> {
/// Performs the call operation.
fn call_box(self: Box<Self>, args: A) -> Self::Output;
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<A, F> FnBox<A> for F
where F: FnOnce<A>
{
fn call_box(self: Box<F>, args: A) -> F::Output {
self.call_once(args)
}
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "0")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")]
impl<A> FromIterator<A> for Box<[A]> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
}
}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl<T: Clone> Clone for Box<[T]> {
fn clone(&self) -> Self {
let mut new = BoxBuilder {
data: RawVec::with_capacity(self.len()),
len: 0,
};
let mut target = new.data.ptr();
for item in self.iter() {
unsafe {
ptr::write(target, item.clone());
target = target.offset(1);
};
new.len += 1;
}
return unsafe { new.into_box() };
// Helper type for responding to panics correctly.
struct BoxBuilder<T> {
data: RawVec<T>,
len: usize,
}
impl<T> BoxBuilder<T> {
unsafe fn into_box(self) -> Box<[T]> {
let raw = ptr::read(&self.data);
mem::forget(self);
raw.into_box()
}
}
impl<T> Drop for BoxBuilder<T> {
fn drop(&mut self) {
let mut data = self.data.ptr();
let max = unsafe { data.add(self.len) };
while data != max {
unsafe {
ptr::read(data);
data = data.offset(1);
}
}
}
}
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::Borrow<T> for Box<T> {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::BorrowMut<T> for Box<T> {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsRef<T> for Box<T> {
fn as_ref(&self) -> &T {
&**self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsMut<T> for Box<T> {
fn as_mut(&mut self) -> &mut T {
&mut **self
}
}
/* Nota bene
*
* We could have chosen not to add this impl, and instead have written a
* function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
* because Box<T> implements Unpin even when T does not, as a result of
* this impl.
*
* We chose this API instead of the alternative for a few reasons:
* - Logically, it is helpful to understand pinning in regard to the
* memory region being pointed to. For this reason none of the
* standard library pointer types support projecting through a pin
* (Box<T> is the only pointer type in std for which this would be
* safe.)
* - It is in practice very useful to have Box<T> be unconditionally
* Unpin because of trait objects, for which the structural auto
* trait functionality does not apply (e.g., Box<dyn Foo> would
* otherwise not be Unpin).
*
* Another type with the same semantics as Box but only a conditional
* implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
* could have a method to project a Pin<T> from it.
*/
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized> Unpin for Box<T> { }
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator + Unpin> Generator for Box<G> {
type Yield = G::Yield;
type Return = G::Return;
fn resume(mut self: Pin<&mut Self>) -> GeneratorState<Self::Yield, Self::Return> {
G::resume(Pin::new(&mut *self))
}
}
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator> Generator for Pin<Box<G>> {
type Yield = G::Yield;
type Return = G::Return;
fn resume(mut self: Pin<&mut Self>) -> GeneratorState<Self::Yield, Self::Return> {
G::resume((*self).as_mut())
}
}
#[stable(feature = "futures_api", since = "1.36.0")]
impl<F: ?Sized + Future + Unpin> Future for Box<F> {
type Output = F::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
F::poll(Pin::new(&mut *self), cx)
}
}
Remove trailing whitespaces to satisfy tidy
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
//! its allocation. It is valid to convert both ways between a [`Box`] and a
//! raw pointer allocated with the [`Global`] allocator, given that the
//! [`Layout`] used with the allocator is correct for the type. More precisely,
//! a `value: *mut T` that has been allocated with the [`Global`] allocator
//! with `Layout::for_value(&*value)` may be converted into a box using
//! `Box::<T>::from_raw(value)`. Conversely, the memory backing a `value: *mut
//! T` obtained from `Box::<T>::into_raw` may be deallocated using the
//! [`Global`] allocator with `Layout::for_value(&*value)`.
//!
//! # Examples
//!
//! Move a value from the stack to the heap by creating a [`Box`]:
//!
//! ```
//! let val: u8 = 5;
//! let boxed: Box<u8> = Box::new(val);
//! ```
//!
//! Move a value from a [`Box`] back to the stack by [dereferencing]:
//!
//! ```
//! let boxed: Box<u8> = Box::new(5);
//! let val: u8 = *boxed;
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Cons(2, Nil))`.
//!
//! Recursive structures must be boxed, because if the definition of `Cons`
//! looked like this:
//!
//! ```compile_fail,E0072
//! # enum List<T> {
//! Cons(T, List<T>),
//! # }
//! ```
//!
//! It wouldn't work. This is because the size of a `List` depends on how many
//! elements are in the list, and so we don't know how much memory to allocate
//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how
//! big `Cons` needs to be.
//!
//! [dereferencing]: ../../std/ops/trait.Deref.html
//! [`Box`]: struct.Box.html
//! [`Global`]: ../alloc/struct.Global.html
//! [`Layout`]: ../alloc/struct.Layout.html
#![stable(feature = "rust1", since = "1.0.0")]
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
use core::convert::From;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
use core::iter::{Iterator, FromIterator, FusedIterator};
use core::marker::{Unpin, Unsize};
use core::mem;
use core::pin::Pin;
use core::ops::{
CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Receiver, Generator, GeneratorState
};
use core::ptr::{self, NonNull, Unique};
use core::task::{Context, Poll};
use crate::vec::Vec;
use crate::raw_vec::RawVec;
use crate::str::from_boxed_utf8_unchecked;
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[fundamental]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T: ?Sized>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then places `x` into it.
///
/// This doesn't actually allocate if `T` is zero-sized.
///
/// # Examples
///
/// ```
/// let five = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
/// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
/// `x` will be pinned in memory and unable to be moved.
#[stable(feature = "pin", since = "1.33.0")]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
(box x).into()
}
}
impl<T: ?Sized> Box<T> {
/// Constructs a box from a raw pointer.
///
/// After calling this function, the raw pointer is owned by the
/// resulting `Box`. Specifically, the `Box` destructor will call
/// the destructor of `T` and free the allocated memory. For this
/// to be safe, the memory must have been allocated in the precise
/// way that `Box` expects, namely, using the global allocator
/// with the correct [`Layout`] for holding a value of type `T`. In
/// particular, this will be satisfied for a pointer obtained
/// from a previously existing `Box` using [`Box::into_raw`].
///
/// # Safety
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
///
/// # Examples
/// Recreate a `Box` which was previously converted to a raw pointer using [`Box::into_raw`]:
/// ```
/// let x = Box::new(5);
/// let ptr = Box::into_raw(x);
/// let x = unsafe { Box::from_raw(ptr) };
/// ```
/// Manually create a `Box` from scratch by using the global allocator:
/// ```
/// use std::alloc::{Layout, alloc};
///
/// let ptr = unsafe{ alloc(Layout::new::<i32>()) } as *mut i32;
/// unsafe{ *ptr = 5; }
/// let x = unsafe{ Box::from_raw(ptr) };
/// ```
///
/// [`Layout`]: ../alloc/struct.Layout.html
/// [`Box::into_raw`]: struct.Box.html#method.into_raw
///
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
Box(Unique::new_unchecked(raw))
}
/// Consumes the `Box`, returning a wrapped raw pointer.
///
/// The pointer will be properly aligned and non-null.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// easiest way to do so is to convert the raw pointer back into a `Box`
/// with the [`Box::from_raw`] function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// # Examples
/// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
/// for automatic cleanup:
/// ```
/// let x = Box::new(String::from("Hello"));
/// let ptr = Box::into_raw(x);
/// let x = unsafe{ Box::from_raw(ptr) };
/// ```
/// Manual cleanup by running the destructor and deallocating the memory:
/// ```
/// use std::alloc::{Layout, dealloc};
/// use std::ptr;
///
/// let x = Box::new(String::from("Hello"));
/// let p = Box::into_raw(x);
/// unsafe{ ptr::drop_in_place(p); }
/// unsafe{ dealloc(p as *mut u8, Layout::new::<String>()); }
/// ```
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub fn into_raw(b: Box<T>) -> *mut T {
Box::into_raw_non_null(b).as_ptr()
}
/// Consumes the `Box`, returning the wrapped pointer as `NonNull<T>`.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// easiest way to do so is to convert the `NonNull<T>` pointer
/// into a raw pointer and back into a `Box` with the [`Box::from_raw`]
/// function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw_non_null(b)`
/// instead of `b.into_raw_non_null()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// ```
/// #![feature(box_into_raw_non_null)]
///
/// fn main() {
/// let x = Box::new(5);
/// let ptr = Box::into_raw_non_null(x);
///
/// // Clean up the memory by converting the NonNull pointer back
/// // into a Box and letting the Box be dropped.
/// let x = unsafe{ Box::from_raw(ptr.as_ptr()) };
/// }
/// ```
#[unstable(feature = "box_into_raw_non_null", issue = "47336")]
#[inline]
pub fn into_raw_non_null(b: Box<T>) -> NonNull<T> {
Box::into_unique(b).into()
}
#[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")]
#[inline]
#[doc(hidden)]
pub fn into_unique(mut b: Box<T>) -> Unique<T> {
// Box is kind-of a library type, but recognized as a "unique pointer" by
// Stacked Borrows. This function here corresponds to "reborrowing to
// a raw pointer", but there is no actual reborrow here -- so
// without some care, the pointer we are returning here still carries
// the `Uniq` tag. We round-trip through a mutable reference to avoid that.
let unique = unsafe { b.0.as_mut() as *mut T };
mem::forget(b);
unsafe { Unique::new_unchecked(unique) }
}
/// Consumes and leaks the `Box`, returning a mutable reference,
/// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
/// `'a`. If the type has only static references, or none at all, then this
/// may be chosen to be `'static`.
///
/// This function is mainly useful for data that lives for the remainder of
/// the program's life. Dropping the returned reference will cause a memory
/// leak. If this is not acceptable, the reference should first be wrapped
/// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
/// then be dropped which will properly destroy `T` and release the
/// allocated memory.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::leak(b)` instead of `b.leak()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// Simple usage:
///
/// ```
/// fn main() {
/// let x = Box::new(41);
/// let static_ref: &'static mut usize = Box::leak(x);
/// *static_ref += 1;
/// assert_eq!(*static_ref, 42);
/// }
/// ```
///
/// Unsized data:
///
/// ```
/// fn main() {
/// let x = vec![1, 2, 3].into_boxed_slice();
/// let static_ref = Box::leak(x);
/// static_ref[0] = 4;
/// assert_eq!(*static_ref, [4, 2, 3]);
/// }
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
#[inline]
pub fn leak<'a>(b: Box<T>) -> &'a mut T
where
T: 'a // Technically not needed, but kept to be explicit.
{
unsafe { &mut *Box::into_raw(b) }
}
/// Converts a `Box<T>` into a `Pin<Box<T>>`
///
/// This conversion does not allocate on the heap and happens in place.
///
/// This is also available via [`From`].
#[unstable(feature = "box_into_pin", issue = "0")]
pub fn into_pin(boxed: Box<T>) -> Pin<Box<T>> {
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
// when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T: ?Sized> Drop for Box<T> {
fn drop(&mut self) {
// FIXME: Do nothing, drop is currently performed by compiler.
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
fn default() -> Box<T> {
box Default::default()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
fn default() -> Box<[T]> {
Box::<[T; 0]>::new([])
}
}
#[stable(feature = "default_box_extra", since = "1.17.0")]
impl Default for Box<str> {
fn default() -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Default::default()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[rustfmt::skip]
#[inline]
fn clone(&self) -> Box<T> {
box { (**self).clone() }
}
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl Clone for Box<str> {
fn clone(&self) -> Self {
let len = self.len();
let buf = RawVec::with_capacity(len);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
from_boxed_utf8_unchecked(buf.into_box())
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool {
PartialEq::eq(&**self, &**other)
}
#[inline]
fn ne(&self, other: &Box<T>) -> bool {
PartialEq::ne(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool {
PartialOrd::lt(&**self, &**other)
}
#[inline]
fn le(&self, other: &Box<T>) -> bool {
PartialOrd::le(&**self, &**other)
}
#[inline]
fn ge(&self, other: &Box<T>) -> bool {
PartialOrd::ge(&**self, &**other)
}
#[inline]
fn gt(&self, other: &Box<T>) -> bool {
PartialOrd::gt(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
impl<T: ?Sized + Hasher> Hasher for Box<T> {
fn finish(&self) -> u64 {
(**self).finish()
}
fn write(&mut self, bytes: &[u8]) {
(**self).write(bytes)
}
fn write_u8(&mut self, i: u8) {
(**self).write_u8(i)
}
fn write_u16(&mut self, i: u16) {
(**self).write_u16(i)
}
fn write_u32(&mut self, i: u32) {
(**self).write_u32(i)
}
fn write_u64(&mut self, i: u64) {
(**self).write_u64(i)
}
fn write_u128(&mut self, i: u128) {
(**self).write_u128(i)
}
fn write_usize(&mut self, i: usize) {
(**self).write_usize(i)
}
fn write_i8(&mut self, i: i8) {
(**self).write_i8(i)
}
fn write_i16(&mut self, i: i16) {
(**self).write_i16(i)
}
fn write_i32(&mut self, i: i32) {
(**self).write_i32(i)
}
fn write_i64(&mut self, i: i64) {
(**self).write_i64(i)
}
fn write_i128(&mut self, i: i128) {
(**self).write_i128(i)
}
fn write_isize(&mut self, i: isize) {
(**self).write_isize(i)
}
}
#[stable(feature = "from_for_ptrs", since = "1.6.0")]
impl<T> From<T> for Box<T> {
/// Converts a generic type `T` into a `Box<T>`
///
/// The conversion allocates on the heap and moves `t`
/// from the stack into it.
///
/// # Examples
/// ```rust
/// let x = 5;
/// let boxed = Box::new(5);
///
/// assert_eq!(Box::from(x), boxed);
/// ```
fn from(t: T) -> Self {
Box::new(t)
}
}
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized> From<Box<T>> for Pin<Box<T>> {
/// Converts a `Box<T>` into a `Pin<Box<T>>`
///
/// This conversion does not allocate on the heap and happens in place.
fn from(boxed: Box<T>) -> Self {
Box::into_pin(boxed)
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<T: Copy> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
/// and performs a copy of `slice`.
///
/// # Examples
/// ```rust
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice: Box<[u8]> = Box::from(slice);
///
/// println!("{:?}", boxed_slice);
/// ```
fn from(slice: &[T]) -> Box<[T]> {
let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() };
boxed.copy_from_slice(slice);
boxed
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl From<&str> for Box<str> {
/// Converts a `&str` into a `Box<str>`
///
/// This conversion allocates on the heap
/// and performs a copy of `s`.
///
/// # Examples
/// ```rust
/// let boxed: Box<str> = Box::from("hello");
/// println!("{}", boxed);
/// ```
#[inline]
fn from(s: &str) -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
}
}
#[stable(feature = "boxed_str_conv", since = "1.19.0")]
impl From<Box<str>> for Box<[u8]> {
/// Converts a `Box<str>>` into a `Box<[u8]>`
///
/// This conversion does not allocate on the heap and happens in place.
///
/// # Examples
/// ```rust
/// // create a Box<str> which will be used to create a Box<[u8]>
/// let boxed: Box<str> = Box::from("hello");
/// let boxed_str: Box<[u8]> = Box::from(boxed);
///
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice = Box::from(slice);
///
/// assert_eq!(boxed_slice, boxed_str);
/// ```
#[inline]
fn from(s: Box<str>) -> Self {
unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) }
}
}
impl Box<dyn Any> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<dyn Any>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any>> {
if self.is::<T>() {
unsafe {
let raw: *mut dyn Any = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl Box<dyn Any + Send> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<dyn Any + Send>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any + Send>> {
<Box<dyn Any>>::downcast(self).map_err(|s| unsafe {
// reapply the Send marker
Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send))
})
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> fmt::Pointer for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// It's not possible to extract the inner Uniq directly from the Box,
// instead we cast it to a *const which aliases the Unique
let ptr: *const T = &**self;
fmt::Pointer::fmt(&ptr, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
}
#[unstable(feature = "receiver_trait", issue = "0")]
impl<T: ?Sized> Receiver for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
fn nth(&mut self, n: usize) -> Option<I::Item> {
(**self).nth(n)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
fn nth_back(&mut self, n: usize) -> Option<I::Item> {
(**self).nth_back(n)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
fn len(&self) -> usize {
(**self).len()
}
fn is_empty(&self) -> bool {
(**self).is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<A, F: FnOnce<A> + ?Sized> FnOnce<A> for Box<F> {
type Output = <F as FnOnce<A>>::Output;
extern "rust-call" fn call_once(self, args: A) -> Self::Output {
<F as FnOnce<A>>::call_once(*self, args)
}
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<A, F: FnMut<A> + ?Sized> FnMut<A> for Box<F> {
extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output {
<F as FnMut<A>>::call_mut(self, args)
}
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<A, F: Fn<A> + ?Sized> Fn<A> for Box<F> {
extern "rust-call" fn call(&self, args: A) -> Self::Output {
<F as Fn<A>>::call(self, args)
}
}
/// `FnBox` is a version of the `FnOnce` intended for use with boxed
/// closure objects. The idea is that where one would normally store a
/// `Box<dyn FnOnce()>` in a data structure, you should use
/// `Box<dyn FnBox()>`. The two traits behave essentially the same, except
/// that a `FnBox` closure can only be called if it is boxed. (Note
/// that `FnBox` may be deprecated in the future if `Box<dyn FnOnce()>`
/// closures become directly usable.)
///
/// # Examples
///
/// Here is a snippet of code which creates a hashmap full of boxed
/// once closures and then removes them one by one, calling each
/// closure as it is removed. Note that the type of the closures
/// stored in the map is `Box<dyn FnBox() -> i32>` and not `Box<dyn FnOnce()
/// -> i32>`.
///
/// ```
/// #![feature(fnbox)]
///
/// use std::boxed::FnBox;
/// use std::collections::HashMap;
///
/// fn make_map() -> HashMap<i32, Box<dyn FnBox() -> i32>> {
/// let mut map: HashMap<i32, Box<dyn FnBox() -> i32>> = HashMap::new();
/// map.insert(1, Box::new(|| 22));
/// map.insert(2, Box::new(|| 44));
/// map
/// }
///
/// fn main() {
/// let mut map = make_map();
/// for i in &[1, 2] {
/// let f = map.remove(&i).unwrap();
/// assert_eq!(f(), i * 22);
/// }
/// }
/// ```
#[rustc_paren_sugar]
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
pub trait FnBox<A>: FnOnce<A> {
/// Performs the call operation.
fn call_box(self: Box<Self>, args: A) -> Self::Output;
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<A, F> FnBox<A> for F
where F: FnOnce<A>
{
fn call_box(self: Box<F>, args: A) -> F::Output {
self.call_once(args)
}
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "0")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")]
impl<A> FromIterator<A> for Box<[A]> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
}
}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl<T: Clone> Clone for Box<[T]> {
fn clone(&self) -> Self {
let mut new = BoxBuilder {
data: RawVec::with_capacity(self.len()),
len: 0,
};
let mut target = new.data.ptr();
for item in self.iter() {
unsafe {
ptr::write(target, item.clone());
target = target.offset(1);
};
new.len += 1;
}
return unsafe { new.into_box() };
// Helper type for responding to panics correctly.
struct BoxBuilder<T> {
data: RawVec<T>,
len: usize,
}
impl<T> BoxBuilder<T> {
unsafe fn into_box(self) -> Box<[T]> {
let raw = ptr::read(&self.data);
mem::forget(self);
raw.into_box()
}
}
impl<T> Drop for BoxBuilder<T> {
fn drop(&mut self) {
let mut data = self.data.ptr();
let max = unsafe { data.add(self.len) };
while data != max {
unsafe {
ptr::read(data);
data = data.offset(1);
}
}
}
}
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::Borrow<T> for Box<T> {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::BorrowMut<T> for Box<T> {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsRef<T> for Box<T> {
fn as_ref(&self) -> &T {
&**self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsMut<T> for Box<T> {
fn as_mut(&mut self) -> &mut T {
&mut **self
}
}
/* Nota bene
*
* We could have chosen not to add this impl, and instead have written a
* function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
* because Box<T> implements Unpin even when T does not, as a result of
* this impl.
*
* We chose this API instead of the alternative for a few reasons:
* - Logically, it is helpful to understand pinning in regard to the
* memory region being pointed to. For this reason none of the
* standard library pointer types support projecting through a pin
* (Box<T> is the only pointer type in std for which this would be
* safe.)
* - It is in practice very useful to have Box<T> be unconditionally
* Unpin because of trait objects, for which the structural auto
* trait functionality does not apply (e.g., Box<dyn Foo> would
* otherwise not be Unpin).
*
* Another type with the same semantics as Box but only a conditional
* implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
* could have a method to project a Pin<T> from it.
*/
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized> Unpin for Box<T> { }
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator + Unpin> Generator for Box<G> {
type Yield = G::Yield;
type Return = G::Return;
fn resume(mut self: Pin<&mut Self>) -> GeneratorState<Self::Yield, Self::Return> {
G::resume(Pin::new(&mut *self))
}
}
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator> Generator for Pin<Box<G>> {
type Yield = G::Yield;
type Return = G::Return;
fn resume(mut self: Pin<&mut Self>) -> GeneratorState<Self::Yield, Self::Return> {
G::resume((*self).as_mut())
}
}
#[stable(feature = "futures_api", since = "1.36.0")]
impl<F: ?Sized + Future + Unpin> Future for Box<F> {
type Output = F::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
F::poll(Pin::new(&mut *self), cx)
}
}
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! # Examples
//!
//! Move a value from the stack to the heap by creating a [`Box`]:
//!
//! ```
//! let val: u8 = 5;
//! let boxed: Box<u8> = Box::new(val);
//! ```
//!
//! Move a value from a [`Box`] back to the stack by [dereferencing]:
//!
//! ```
//! let boxed: Box<u8> = Box::new(5);
//! let val: u8 = *boxed;
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Cons(2, Nil))`.
//!
//! Recursive structures must be boxed, because if the definition of `Cons`
//! looked like this:
//!
//! ```compile_fail,E0072
//! # enum List<T> {
//! Cons(T, List<T>),
//! # }
//! ```
//!
//! It wouldn't work. This is because the size of a `List` depends on how many
//! elements are in the list, and so we don't know how much memory to allocate
//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how
//! big `Cons` needs to be.
//!
//! [dereferencing]: ../../std/ops/trait.Deref.html
//! [`Box`]: struct.Box.html
#![stable(feature = "rust1", since = "1.0.0")]
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
use core::convert::From;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
use core::iter::FusedIterator;
use core::marker::{Unpin, Unsize};
use core::mem;
use core::pin::Pin;
use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState};
use core::ptr::{self, NonNull, Unique};
use core::task::{LocalWaker, Poll};
use raw_vec::RawVec;
use str::from_boxed_utf8_unchecked;
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[fundamental]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T: ?Sized>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then places `x` into it.
///
/// This doesn't actually allocate if `T` is zero-sized.
///
/// # Examples
///
/// ```
/// let five = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
#[unstable(feature = "pin", issue = "49150")]
#[inline(always)]
pub fn pinned(x: T) -> Pin<Box<T>> {
(box x).into()
}
}
impl<T: ?Sized> Box<T> {
/// Constructs a box from a raw pointer.
///
/// After calling this function, the raw pointer is owned by the
/// resulting `Box`. Specifically, the `Box` destructor will call
/// the destructor of `T` and free the allocated memory. Since the
/// way `Box` allocates and releases memory is unspecified, the
/// only valid pointer to pass to this function is the one taken
/// from another `Box` via the [`Box::into_raw`] function.
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
///
/// [`Box::into_raw`]: struct.Box.html#method.into_raw
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let ptr = Box::into_raw(x);
/// let x = unsafe { Box::from_raw(ptr) };
/// ```
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
Box(Unique::new_unchecked(raw))
}
/// Consumes the `Box`, returning a wrapped raw pointer.
///
/// The pointer will be properly aligned and non-null.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// proper way to do so is to convert the raw pointer back into a
/// `Box` with the [`Box::from_raw`] function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let ptr = Box::into_raw(x);
/// ```
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub fn into_raw(b: Box<T>) -> *mut T {
Box::into_raw_non_null(b).as_ptr()
}
/// Consumes the `Box`, returning the wrapped pointer as `NonNull<T>`.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// proper way to do so is to convert the `NonNull<T>` pointer
/// into a raw pointer and back into a `Box` with the [`Box::from_raw`]
/// function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw_non_null(b)`
/// instead of `b.into_raw_non_null()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// ```
/// #![feature(box_into_raw_non_null)]
///
/// fn main() {
/// let x = Box::new(5);
/// let ptr = Box::into_raw_non_null(x);
/// }
/// ```
#[unstable(feature = "box_into_raw_non_null", issue = "47336")]
#[inline]
pub fn into_raw_non_null(b: Box<T>) -> NonNull<T> {
Box::into_unique(b).into()
}
#[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")]
#[inline]
#[doc(hidden)]
pub fn into_unique(b: Box<T>) -> Unique<T> {
let unique = b.0;
mem::forget(b);
unique
}
/// Consumes and leaks the `Box`, returning a mutable reference,
/// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
/// `'a`. If the type has only static references, or none at all, then this
/// may be chosen to be `'static`.
///
/// This function is mainly useful for data that lives for the remainder of
/// the program's life. Dropping the returned reference will cause a memory
/// leak. If this is not acceptable, the reference should first be wrapped
/// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
/// then be dropped which will properly destroy `T` and release the
/// allocated memory.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::leak(b)` instead of `b.leak()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// Simple usage:
///
/// ```
/// fn main() {
/// let x = Box::new(41);
/// let static_ref: &'static mut usize = Box::leak(x);
/// *static_ref += 1;
/// assert_eq!(*static_ref, 42);
/// }
/// ```
///
/// Unsized data:
///
/// ```
/// fn main() {
/// let x = vec![1, 2, 3].into_boxed_slice();
/// let static_ref = Box::leak(x);
/// static_ref[0] = 4;
/// assert_eq!(*static_ref, [4, 2, 3]);
/// }
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
#[inline]
pub fn leak<'a>(b: Box<T>) -> &'a mut T
where
T: 'a // Technically not needed, but kept to be explicit.
{
unsafe { &mut *Box::into_raw(b) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T: ?Sized> Drop for Box<T> {
fn drop(&mut self) {
// FIXME: Do nothing, drop is currently performed by compiler.
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
fn default() -> Box<T> {
box Default::default()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
fn default() -> Box<[T]> {
Box::<[T; 0]>::new([])
}
}
#[stable(feature = "default_box_extra", since = "1.17.0")]
impl Default for Box<str> {
fn default() -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Default::default()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[rustfmt_skip]
#[inline]
fn clone(&self) -> Box<T> {
box { (**self).clone() }
}
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl Clone for Box<str> {
fn clone(&self) -> Self {
let len = self.len();
let buf = RawVec::with_capacity(len);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
from_boxed_utf8_unchecked(buf.into_box())
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool {
PartialEq::eq(&**self, &**other)
}
#[inline]
fn ne(&self, other: &Box<T>) -> bool {
PartialEq::ne(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool {
PartialOrd::lt(&**self, &**other)
}
#[inline]
fn le(&self, other: &Box<T>) -> bool {
PartialOrd::le(&**self, &**other)
}
#[inline]
fn ge(&self, other: &Box<T>) -> bool {
PartialOrd::ge(&**self, &**other)
}
#[inline]
fn gt(&self, other: &Box<T>) -> bool {
PartialOrd::gt(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
impl<T: ?Sized + Hasher> Hasher for Box<T> {
fn finish(&self) -> u64 {
(**self).finish()
}
fn write(&mut self, bytes: &[u8]) {
(**self).write(bytes)
}
fn write_u8(&mut self, i: u8) {
(**self).write_u8(i)
}
fn write_u16(&mut self, i: u16) {
(**self).write_u16(i)
}
fn write_u32(&mut self, i: u32) {
(**self).write_u32(i)
}
fn write_u64(&mut self, i: u64) {
(**self).write_u64(i)
}
fn write_u128(&mut self, i: u128) {
(**self).write_u128(i)
}
fn write_usize(&mut self, i: usize) {
(**self).write_usize(i)
}
fn write_i8(&mut self, i: i8) {
(**self).write_i8(i)
}
fn write_i16(&mut self, i: i16) {
(**self).write_i16(i)
}
fn write_i32(&mut self, i: i32) {
(**self).write_i32(i)
}
fn write_i64(&mut self, i: i64) {
(**self).write_i64(i)
}
fn write_i128(&mut self, i: i128) {
(**self).write_i128(i)
}
fn write_isize(&mut self, i: isize) {
(**self).write_isize(i)
}
}
#[stable(feature = "from_for_ptrs", since = "1.6.0")]
impl<T> From<T> for Box<T> {
/// Converts a generic type `T` into a `Box<T>`
///
/// The conversion allocates on the heap and moves `t`
/// from the stack into it.
///
/// # Examples
/// ```rust
/// let x = 5;
/// let boxed = Box::new(5);
///
/// assert_eq!(Box::from(x), boxed);
/// ```
fn from(t: T) -> Self {
Box::new(t)
}
}
#[unstable(feature = "pin", issue = "49150")]
impl<T> From<Box<T>> for Pin<Box<T>> {
/// Converts a `Box<T>` into a `Pin<Box<T>>`
///
/// This conversion does not allocate on the heap and happens in place.
fn from(boxed: Box<T>) -> Self {
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
// when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<'a, T: Copy> From<&'a [T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
/// and performs a copy of `slice`.
///
/// # Examples
/// ```rust
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice = Box::from(slice);
///
/// println!("{:?}", boxed_slice);
/// ```
fn from(slice: &'a [T]) -> Box<[T]> {
let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() };
boxed.copy_from_slice(slice);
boxed
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<'a> From<&'a str> for Box<str> {
/// Converts a `&str` into a `Box<str>`
///
/// This conversion allocates on the heap
/// and performs a copy of `s`.
///
/// # Examples
/// ```rust
/// let boxed: Box<str> = Box::from("hello");
/// println!("{}", boxed);
/// ```
#[inline]
fn from(s: &'a str) -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
}
}
#[stable(feature = "boxed_str_conv", since = "1.19.0")]
impl From<Box<str>> for Box<[u8]> {
/// Converts a `Box<str>>` into a `Box<[u8]>`
///
/// This conversion does not allocate on the heap and happens in place.
///
/// # Examples
/// ```rust
/// // create a Box<str> which will be used to create a Box<[u8]>
/// let boxed: Box<str> = Box::from("hello");
/// let boxed_str: Box<[u8]> = Box::from(boxed);
///
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice = Box::from(slice);
///
/// assert_eq!(boxed_slice, boxed_str);
/// ```
#[inline]
fn from(s: Box<str>) -> Self {
unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) }
}
}
impl Box<dyn Any> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<Any>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any>> {
if self.is::<T>() {
unsafe {
let raw: *mut dyn Any = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl Box<dyn Any + Send> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<Any + Send>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any + Send>> {
<Box<dyn Any>>::downcast(self).map_err(|s| unsafe {
// reapply the Send marker
Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send))
})
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> fmt::Pointer for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// It's not possible to extract the inner Uniq directly from the Box,
// instead we cast it to a *const which aliases the Unique
let ptr: *const T = &**self;
fmt::Pointer::fmt(&ptr, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
fn nth(&mut self, n: usize) -> Option<I::Item> {
(**self).nth(n)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
fn len(&self) -> usize {
(**self).len()
}
fn is_empty(&self) -> bool {
(**self).is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
/// `FnBox` is a version of the `FnOnce` intended for use with boxed
/// closure objects. The idea is that where one would normally store a
/// `Box<FnOnce()>` in a data structure, you should use
/// `Box<FnBox()>`. The two traits behave essentially the same, except
/// that a `FnBox` closure can only be called if it is boxed. (Note
/// that `FnBox` may be deprecated in the future if `Box<FnOnce()>`
/// closures become directly usable.)
///
/// # Examples
///
/// Here is a snippet of code which creates a hashmap full of boxed
/// once closures and then removes them one by one, calling each
/// closure as it is removed. Note that the type of the closures
/// stored in the map is `Box<FnBox() -> i32>` and not `Box<FnOnce()
/// -> i32>`.
///
/// ```
/// #![feature(fnbox)]
///
/// use std::boxed::FnBox;
/// use std::collections::HashMap;
///
/// fn make_map() -> HashMap<i32, Box<FnBox() -> i32>> {
/// let mut map: HashMap<i32, Box<FnBox() -> i32>> = HashMap::new();
/// map.insert(1, Box::new(|| 22));
/// map.insert(2, Box::new(|| 44));
/// map
/// }
///
/// fn main() {
/// let mut map = make_map();
/// for i in &[1, 2] {
/// let f = map.remove(&i).unwrap();
/// assert_eq!(f(), i * 22);
/// }
/// }
/// ```
#[rustc_paren_sugar]
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
pub trait FnBox<A> {
type Output;
fn call_box(self: Box<Self>, args: A) -> Self::Output;
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<A, F> FnBox<A> for F
where F: FnOnce<A>
{
type Output = F::Output;
fn call_box(self: Box<F>, args: A) -> F::Output {
self.call_once(args)
}
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<'a, A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + 'a> {
type Output = R;
extern "rust-call" fn call_once(self, args: A) -> R {
self.call_box(args)
}
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<'a, A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + Send + 'a> {
type Output = R;
extern "rust-call" fn call_once(self, args: A) -> R {
self.call_box(args)
}
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl<T: Clone> Clone for Box<[T]> {
fn clone(&self) -> Self {
let mut new = BoxBuilder {
data: RawVec::with_capacity(self.len()),
len: 0,
};
let mut target = new.data.ptr();
for item in self.iter() {
unsafe {
ptr::write(target, item.clone());
target = target.offset(1);
};
new.len += 1;
}
return unsafe { new.into_box() };
// Helper type for responding to panics correctly.
struct BoxBuilder<T> {
data: RawVec<T>,
len: usize,
}
impl<T> BoxBuilder<T> {
unsafe fn into_box(self) -> Box<[T]> {
let raw = ptr::read(&self.data);
mem::forget(self);
raw.into_box()
}
}
impl<T> Drop for BoxBuilder<T> {
fn drop(&mut self) {
let mut data = self.data.ptr();
let max = unsafe { data.add(self.len) };
while data != max {
unsafe {
ptr::read(data);
data = data.offset(1);
}
}
}
}
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::Borrow<T> for Box<T> {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::BorrowMut<T> for Box<T> {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsRef<T> for Box<T> {
fn as_ref(&self) -> &T {
&**self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsMut<T> for Box<T> {
fn as_mut(&mut self) -> &mut T {
&mut **self
}
}
/* Nota bene
*
* We could have chosen not to add this impl, and instead have written a
* function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
* because Box<T> implements Unpin even when T does not, as a result of
* this impl.
*
* We chose this API instead of the alternative for a few reasons:
* - Logically, it is helpful to understand pinning in regard to the
* memory region being pointed to. For this reason none of the
* standard library pointer types support projecting through a pin
* (Box<T> is the only pointer type in std for which this would be
* safe.)
* - It is in practice very useful to have Box<T> be unconditionally
* Unpin because of trait objects, for which the structural auto
* trait functionality does not apply (e.g. Box<dyn Foo> would
* otherwise not be Unpin).
*
* Another type with the same semantics as Box but only a conditional
* implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
* could have a method to project a Pin<T> from it.
*/
#[unstable(feature = "pin", issue = "49150")]
impl<T: ?Sized> Unpin for Box<T> { }
#[unstable(feature = "generator_trait", issue = "43122")]
impl<T> Generator for Box<T>
where T: Generator + ?Sized
{
type Yield = T::Yield;
type Return = T::Return;
unsafe fn resume(&mut self) -> GeneratorState<Self::Yield, Self::Return> {
(**self).resume()
}
}
#[unstable(feature = "futures_api", issue = "50547")]
impl<F: ?Sized + Future + Unpin> Future for Box<F> {
type Output = F::Output;
fn poll(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Self::Output> {
F::poll(Pin::new(&mut *self), lw)
}
}
Fix error in example by adding type annotation
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pointer type for heap allocation.
//!
//! `Box<T>`, casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! # Examples
//!
//! Move a value from the stack to the heap by creating a [`Box`]:
//!
//! ```
//! let val: u8 = 5;
//! let boxed: Box<u8> = Box::new(val);
//! ```
//!
//! Move a value from a [`Box`] back to the stack by [dereferencing]:
//!
//! ```
//! let boxed: Box<u8> = Box::new(5);
//! let val: u8 = *boxed;
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! #[derive(Debug)]
//! enum List<T> {
//! Cons(T, Box<List<T>>),
//! Nil,
//! }
//!
//! fn main() {
//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
//! println!("{:?}", list);
//! }
//! ```
//!
//! This will print `Cons(1, Cons(2, Nil))`.
//!
//! Recursive structures must be boxed, because if the definition of `Cons`
//! looked like this:
//!
//! ```compile_fail,E0072
//! # enum List<T> {
//! Cons(T, List<T>),
//! # }
//! ```
//!
//! It wouldn't work. This is because the size of a `List` depends on how many
//! elements are in the list, and so we don't know how much memory to allocate
//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how
//! big `Cons` needs to be.
//!
//! [dereferencing]: ../../std/ops/trait.Deref.html
//! [`Box`]: struct.Box.html
#![stable(feature = "rust1", since = "1.0.0")]
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
use core::convert::From;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
use core::iter::FusedIterator;
use core::marker::{Unpin, Unsize};
use core::mem;
use core::pin::Pin;
use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState};
use core::ptr::{self, NonNull, Unique};
use core::task::{LocalWaker, Poll};
use raw_vec::RawVec;
use str::from_boxed_utf8_unchecked;
/// A pointer type for heap allocation.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
#[fundamental]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Box<T: ?Sized>(Unique<T>);
impl<T> Box<T> {
/// Allocates memory on the heap and then places `x` into it.
///
/// This doesn't actually allocate if `T` is zero-sized.
///
/// # Examples
///
/// ```
/// let five = Box::new(5);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(always)]
pub fn new(x: T) -> Box<T> {
box x
}
#[unstable(feature = "pin", issue = "49150")]
#[inline(always)]
pub fn pinned(x: T) -> Pin<Box<T>> {
(box x).into()
}
}
impl<T: ?Sized> Box<T> {
/// Constructs a box from a raw pointer.
///
/// After calling this function, the raw pointer is owned by the
/// resulting `Box`. Specifically, the `Box` destructor will call
/// the destructor of `T` and free the allocated memory. Since the
/// way `Box` allocates and releases memory is unspecified, the
/// only valid pointer to pass to this function is the one taken
/// from another `Box` via the [`Box::into_raw`] function.
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
///
/// [`Box::into_raw`]: struct.Box.html#method.into_raw
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let ptr = Box::into_raw(x);
/// let x = unsafe { Box::from_raw(ptr) };
/// ```
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
Box(Unique::new_unchecked(raw))
}
/// Consumes the `Box`, returning a wrapped raw pointer.
///
/// The pointer will be properly aligned and non-null.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// proper way to do so is to convert the raw pointer back into a
/// `Box` with the [`Box::from_raw`] function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let ptr = Box::into_raw(x);
/// ```
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
pub fn into_raw(b: Box<T>) -> *mut T {
Box::into_raw_non_null(b).as_ptr()
}
/// Consumes the `Box`, returning the wrapped pointer as `NonNull<T>`.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// proper way to do so is to convert the `NonNull<T>` pointer
/// into a raw pointer and back into a `Box` with the [`Box::from_raw`]
/// function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw_non_null(b)`
/// instead of `b.into_raw_non_null()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// ```
/// #![feature(box_into_raw_non_null)]
///
/// fn main() {
/// let x = Box::new(5);
/// let ptr = Box::into_raw_non_null(x);
/// }
/// ```
#[unstable(feature = "box_into_raw_non_null", issue = "47336")]
#[inline]
pub fn into_raw_non_null(b: Box<T>) -> NonNull<T> {
Box::into_unique(b).into()
}
#[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")]
#[inline]
#[doc(hidden)]
pub fn into_unique(b: Box<T>) -> Unique<T> {
let unique = b.0;
mem::forget(b);
unique
}
/// Consumes and leaks the `Box`, returning a mutable reference,
/// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
/// `'a`. If the type has only static references, or none at all, then this
/// may be chosen to be `'static`.
///
/// This function is mainly useful for data that lives for the remainder of
/// the program's life. Dropping the returned reference will cause a memory
/// leak. If this is not acceptable, the reference should first be wrapped
/// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
/// then be dropped which will properly destroy `T` and release the
/// allocated memory.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::leak(b)` instead of `b.leak()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// [`Box::from_raw`]: struct.Box.html#method.from_raw
///
/// # Examples
///
/// Simple usage:
///
/// ```
/// fn main() {
/// let x = Box::new(41);
/// let static_ref: &'static mut usize = Box::leak(x);
/// *static_ref += 1;
/// assert_eq!(*static_ref, 42);
/// }
/// ```
///
/// Unsized data:
///
/// ```
/// fn main() {
/// let x = vec![1, 2, 3].into_boxed_slice();
/// let static_ref = Box::leak(x);
/// static_ref[0] = 4;
/// assert_eq!(*static_ref, [4, 2, 3]);
/// }
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
#[inline]
pub fn leak<'a>(b: Box<T>) -> &'a mut T
where
T: 'a // Technically not needed, but kept to be explicit.
{
unsafe { &mut *Box::into_raw(b) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T: ?Sized> Drop for Box<T> {
fn drop(&mut self) {
// FIXME: Do nothing, drop is currently performed by compiler.
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
fn default() -> Box<T> {
box Default::default()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Box<[T]> {
fn default() -> Box<[T]> {
Box::<[T; 0]>::new([])
}
}
#[stable(feature = "default_box_extra", since = "1.17.0")]
impl Default for Box<str> {
fn default() -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Default::default()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Box<T> {
/// Returns a new box with a `clone()` of this box's contents.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let y = x.clone();
/// ```
#[rustfmt_skip]
#[inline]
fn clone(&self) -> Box<T> {
box { (**self).clone() }
}
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
/// y.clone_from(&x);
///
/// assert_eq!(*y, 5);
/// ```
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl Clone for Box<str> {
fn clone(&self) -> Self {
let len = self.len();
let buf = RawVec::with_capacity(len);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
from_boxed_utf8_unchecked(buf.into_box())
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool {
PartialEq::eq(&**self, &**other)
}
#[inline]
fn ne(&self, other: &Box<T>) -> bool {
PartialEq::ne(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool {
PartialOrd::lt(&**self, &**other)
}
#[inline]
fn le(&self, other: &Box<T>) -> bool {
PartialOrd::le(&**self, &**other)
}
#[inline]
fn ge(&self, other: &Box<T>) -> bool {
PartialOrd::ge(&**self, &**other)
}
#[inline]
fn gt(&self, other: &Box<T>) -> bool {
PartialOrd::gt(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> Eq for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Hash> Hash for Box<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
impl<T: ?Sized + Hasher> Hasher for Box<T> {
fn finish(&self) -> u64 {
(**self).finish()
}
fn write(&mut self, bytes: &[u8]) {
(**self).write(bytes)
}
fn write_u8(&mut self, i: u8) {
(**self).write_u8(i)
}
fn write_u16(&mut self, i: u16) {
(**self).write_u16(i)
}
fn write_u32(&mut self, i: u32) {
(**self).write_u32(i)
}
fn write_u64(&mut self, i: u64) {
(**self).write_u64(i)
}
fn write_u128(&mut self, i: u128) {
(**self).write_u128(i)
}
fn write_usize(&mut self, i: usize) {
(**self).write_usize(i)
}
fn write_i8(&mut self, i: i8) {
(**self).write_i8(i)
}
fn write_i16(&mut self, i: i16) {
(**self).write_i16(i)
}
fn write_i32(&mut self, i: i32) {
(**self).write_i32(i)
}
fn write_i64(&mut self, i: i64) {
(**self).write_i64(i)
}
fn write_i128(&mut self, i: i128) {
(**self).write_i128(i)
}
fn write_isize(&mut self, i: isize) {
(**self).write_isize(i)
}
}
#[stable(feature = "from_for_ptrs", since = "1.6.0")]
impl<T> From<T> for Box<T> {
/// Converts a generic type `T` into a `Box<T>`
///
/// The conversion allocates on the heap and moves `t`
/// from the stack into it.
///
/// # Examples
/// ```rust
/// let x = 5;
/// let boxed = Box::new(5);
///
/// assert_eq!(Box::from(x), boxed);
/// ```
fn from(t: T) -> Self {
Box::new(t)
}
}
#[unstable(feature = "pin", issue = "49150")]
impl<T> From<Box<T>> for Pin<Box<T>> {
/// Converts a `Box<T>` into a `Pin<Box<T>>`
///
/// This conversion does not allocate on the heap and happens in place.
fn from(boxed: Box<T>) -> Self {
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
// when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<'a, T: Copy> From<&'a [T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
/// and performs a copy of `slice`.
///
/// # Examples
/// ```rust
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice: Box<[u8]> = Box::from(slice);
///
/// println!("{:?}", boxed_slice);
/// ```
fn from(slice: &'a [T]) -> Box<[T]> {
let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() };
boxed.copy_from_slice(slice);
boxed
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<'a> From<&'a str> for Box<str> {
/// Converts a `&str` into a `Box<str>`
///
/// This conversion allocates on the heap
/// and performs a copy of `s`.
///
/// # Examples
/// ```rust
/// let boxed: Box<str> = Box::from("hello");
/// println!("{}", boxed);
/// ```
#[inline]
fn from(s: &'a str) -> Box<str> {
unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
}
}
#[stable(feature = "boxed_str_conv", since = "1.19.0")]
impl From<Box<str>> for Box<[u8]> {
/// Converts a `Box<str>>` into a `Box<[u8]>`
///
/// This conversion does not allocate on the heap and happens in place.
///
/// # Examples
/// ```rust
/// // create a Box<str> which will be used to create a Box<[u8]>
/// let boxed: Box<str> = Box::from("hello");
/// let boxed_str: Box<[u8]> = Box::from(boxed);
///
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice = Box::from(slice);
///
/// assert_eq!(boxed_slice, boxed_str);
/// ```
#[inline]
fn from(s: Box<str>) -> Self {
unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) }
}
}
impl Box<dyn Any> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<Any>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any>> {
if self.is::<T>() {
unsafe {
let raw: *mut dyn Any = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl Box<dyn Any + Send> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<Any + Send>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// fn main() {
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// }
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<dyn Any + Send>> {
<Box<dyn Any>>::downcast(self).map_err(|s| unsafe {
// reapply the Send marker
Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send))
})
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> fmt::Pointer for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// It's not possible to extract the inner Uniq directly from the Box,
// instead we cast it to a *const which aliases the Unique
let ptr: *const T = &**self;
fmt::Pointer::fmt(&ptr, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> DerefMut for Box<T> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
fn nth(&mut self, n: usize) -> Option<I::Item> {
(**self).nth(n)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> {
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
fn len(&self) -> usize {
(**self).len()
}
fn is_empty(&self) -> bool {
(**self).is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
/// `FnBox` is a version of the `FnOnce` intended for use with boxed
/// closure objects. The idea is that where one would normally store a
/// `Box<FnOnce()>` in a data structure, you should use
/// `Box<FnBox()>`. The two traits behave essentially the same, except
/// that a `FnBox` closure can only be called if it is boxed. (Note
/// that `FnBox` may be deprecated in the future if `Box<FnOnce()>`
/// closures become directly usable.)
///
/// # Examples
///
/// Here is a snippet of code which creates a hashmap full of boxed
/// once closures and then removes them one by one, calling each
/// closure as it is removed. Note that the type of the closures
/// stored in the map is `Box<FnBox() -> i32>` and not `Box<FnOnce()
/// -> i32>`.
///
/// ```
/// #![feature(fnbox)]
///
/// use std::boxed::FnBox;
/// use std::collections::HashMap;
///
/// fn make_map() -> HashMap<i32, Box<FnBox() -> i32>> {
/// let mut map: HashMap<i32, Box<FnBox() -> i32>> = HashMap::new();
/// map.insert(1, Box::new(|| 22));
/// map.insert(2, Box::new(|| 44));
/// map
/// }
///
/// fn main() {
/// let mut map = make_map();
/// for i in &[1, 2] {
/// let f = map.remove(&i).unwrap();
/// assert_eq!(f(), i * 22);
/// }
/// }
/// ```
#[rustc_paren_sugar]
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
pub trait FnBox<A> {
type Output;
fn call_box(self: Box<Self>, args: A) -> Self::Output;
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<A, F> FnBox<A> for F
where F: FnOnce<A>
{
type Output = F::Output;
fn call_box(self: Box<F>, args: A) -> F::Output {
self.call_once(args)
}
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<'a, A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + 'a> {
type Output = R;
extern "rust-call" fn call_once(self, args: A) -> R {
self.call_box(args)
}
}
#[unstable(feature = "fnbox",
reason = "will be deprecated if and when `Box<FnOnce>` becomes usable", issue = "28796")]
impl<'a, A, R> FnOnce<A> for Box<dyn FnBox<A, Output = R> + Send + 'a> {
type Output = R;
extern "rust-call" fn call_once(self, args: A) -> R {
self.call_box(args)
}
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl<T: Clone> Clone for Box<[T]> {
fn clone(&self) -> Self {
let mut new = BoxBuilder {
data: RawVec::with_capacity(self.len()),
len: 0,
};
let mut target = new.data.ptr();
for item in self.iter() {
unsafe {
ptr::write(target, item.clone());
target = target.offset(1);
};
new.len += 1;
}
return unsafe { new.into_box() };
// Helper type for responding to panics correctly.
struct BoxBuilder<T> {
data: RawVec<T>,
len: usize,
}
impl<T> BoxBuilder<T> {
unsafe fn into_box(self) -> Box<[T]> {
let raw = ptr::read(&self.data);
mem::forget(self);
raw.into_box()
}
}
impl<T> Drop for BoxBuilder<T> {
fn drop(&mut self) {
let mut data = self.data.ptr();
let max = unsafe { data.add(self.len) };
while data != max {
unsafe {
ptr::read(data);
data = data.offset(1);
}
}
}
}
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::Borrow<T> for Box<T> {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "box_borrow", since = "1.1.0")]
impl<T: ?Sized> borrow::BorrowMut<T> for Box<T> {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsRef<T> for Box<T> {
fn as_ref(&self) -> &T {
&**self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
impl<T: ?Sized> AsMut<T> for Box<T> {
fn as_mut(&mut self) -> &mut T {
&mut **self
}
}
/* Nota bene
*
* We could have chosen not to add this impl, and instead have written a
* function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
* because Box<T> implements Unpin even when T does not, as a result of
* this impl.
*
* We chose this API instead of the alternative for a few reasons:
* - Logically, it is helpful to understand pinning in regard to the
* memory region being pointed to. For this reason none of the
* standard library pointer types support projecting through a pin
* (Box<T> is the only pointer type in std for which this would be
* safe.)
* - It is in practice very useful to have Box<T> be unconditionally
* Unpin because of trait objects, for which the structural auto
* trait functionality does not apply (e.g. Box<dyn Foo> would
* otherwise not be Unpin).
*
* Another type with the same semantics as Box but only a conditional
* implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
* could have a method to project a Pin<T> from it.
*/
#[unstable(feature = "pin", issue = "49150")]
impl<T: ?Sized> Unpin for Box<T> { }
#[unstable(feature = "generator_trait", issue = "43122")]
impl<T> Generator for Box<T>
where T: Generator + ?Sized
{
type Yield = T::Yield;
type Return = T::Return;
unsafe fn resume(&mut self) -> GeneratorState<Self::Yield, Self::Return> {
(**self).resume()
}
}
#[unstable(feature = "futures_api", issue = "50547")]
impl<F: ?Sized + Future + Unpin> Future for Box<F> {
type Output = F::Output;
fn poll(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Self::Output> {
F::poll(Pin::new(&mut *self), lw)
}
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
/// A trait identifying how borrowed data behaves.
///
/// In Rust, it is common to provide different representations of a type for
/// different use cases. For instance, storage location and management for a
/// value can be specifically chosen as appropriate for a particular use via
/// pointer types such as [`Box<T>`] or [`Rc<T>`] or one can opt into
/// concurrency via synchronization types such as [`Mutex<T>`], avoiding the
/// associated cost when in parallel doesn’t happen. Beyond these generic
/// wrappers that can be used with any type, some types provide optional
/// facets providing potentially costly functionality. An example for such a
/// type is [`String`] which adds the ability to extend a string to the basic
/// [`str`]. This requires keeping additional information unnecessary for a
/// simple, imutable string.
///
/// These types signal that they are a specialized representation of a basic
/// type `T` by implementing `Borrow<T>`. The method `borrow` provides a way
/// to convert a reference to the type into a reference to the underlying
/// basic type.
///
/// If a type implementing `Borrow<T>` implements other traits also
/// implemented by `T`, these implementations behave identically if the trait
/// is concerned with the data rather than its representation. For instance,
/// the comparison traits such as `PartialEq` or `PartialOrd` must behave
/// identical for `T` and any type implemeting `Borrow<T>`.
///
/// When writing generic code, a use of `Borrow` should always be justified
/// by additional trait bounds, making it clear that the two types need to
/// behave identically in a certain context. If the code should merely be
/// able to operate on any type that can produce a reference to a given type,
/// you should use [`AsRef`] instead.
///
/// The companion trait [`BorrowMut`] provides the same guarantees for
/// mutable references.
///
/// [`AsRef`]: ../../std/convert/trait.AsRef.html
/// [`BorrowMut`]: trait.BorrowMut.html
/// [`Box<T>`]: ../../std/boxed/struct.Box.html
/// [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
/// [`str`]: ../../std/primitive.str.html
/// [`String`]: ../../std/string/struct.String.html
///
/// # Examples
///
/// As a data collection, [`HashMap<K, V>`] owns both keys and values. If
/// the key’s actual data is wrapped in a managing type of some kind, it
/// should, however, still be possible to search for a value using a
/// reference to the key’s data. For instance, if the key is a string, then
/// it is likely stored with the hash map as a [`String`], while it should
/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to
/// operate on a `String` while `get` needs to be able to use a `&str`.
///
/// Slightly simplified, the relevant parts of `HashMap<K, V>` look like
/// this:
///
/// ```
/// use std::borrow::Borrow;
/// use std::hash::Hash;
///
/// pub struct HashMap<K, V> {
/// # marker: ::std::marker::PhantomData<(K, V)>,
/// // fields omitted
/// }
///
/// impl<K, V> HashMap<K, V> {
/// pub fn insert(&self, key: K, value: V) -> Option<V>
/// where K: Hash + Eq
/// {
/// # unimplemented!()
/// // ...
/// }
///
/// pub fn get<Q>(&self, k: &Q) -> Option<&V>
/// where
/// K: Borrow<Q>,
/// Q: Hash + Eq + ?Sized
/// {
/// # unimplemented!()
/// // ...
/// }
/// }
/// ```
///
/// The entire hash map is generic over a key type `K`. Because these keys
/// are stored with the hash map, this type has to own the key’s data.
/// When inserting a key-value pair, the map is given such a `K` and needs
/// to find the correct hash bucket and check if the key is already present
/// based on that `K`. It therefore requires `K: Hash + Eq`.
///
/// When searching for a value in the map, however, having to provide a
/// reference to a `K` as the key to search for would require to always
/// create such an owned value. For string keys, this would mean a `String`
/// value needs to be created just for the search for cases where only a
/// `str` is available.
///
/// Instead, the `get` method is generic over the type of the underlying key
/// data, called `Q` in the method signature above. It states that `K` is a
/// representation of `Q` by requiring that `K: Borrow<Q>`. By additionally
/// requiring `Q: Hash + Eq`, it demands that `K` and `Q` have
/// implementations of the `Hash` and `Eq` traits that procude identical
/// results.
///
/// The implementation of `get` relies in particular on identical
/// implementations of `Hash` by determining the key’s hash bucket by calling
/// `Hash::hash` on the `Q` value even though it inserted the key based on
/// the hash value calculated from the `K` value.
///
/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value
/// produces a different hash than `Q`. For instance, imagine you have a
/// type that wraps a string but compares ASCII letters ignoring their case:
///
/// ```
/// pub struct CIString(String);
///
/// impl PartialEq for CIString {
/// fn eq(&self, other: &Self) -> bool {
/// self.0.eq_ignore_ascii_case(&other.0)
/// }
/// }
///
/// impl Eq for CIString { }
/// ```
///
/// Because two equal values need to produce the same hash value, the
/// implementation of `Hash` needs to reflect that, too:
///
/// ```
/// # use std::hash::{Hash, Hasher};
/// # pub struct CIString(String);
/// impl Hash for CIString {
/// fn hash<H: Hasher>(&self, state: &mut H) {
/// for c in self.0.as_bytes() {
/// c.to_ascii_lowercase().hash(state)
/// }
/// }
/// }
/// ```
///
/// Can `CIString` implement `Borrow<str>`? It certainly can provide a
/// reference to a string slice via its contained owned string. But because
/// its `Hash` implementation differs, it cannot fulfill the guarantee for
/// `Borrow` that all common trait implementations must behave the same way
/// and must not, in fact, implement `Borrow<str>`. If it wants to allow
/// others access to the underlying `str`, it can do that via `AsRef<str>`
/// which doesn’t carry any such restrictions.
///
/// [`Hash`]: ../../std/hash/trait.Hash.html
/// [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
/// [`String`]: ../../std/string/struct.String.html
/// [`str`]: ../../std/primitive.str.html
///
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Borrow<Borrowed: ?Sized> {
/// Immutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::Borrow;
///
/// fn check<T: Borrow<str>>(s: T) {
/// assert_eq!("Hello", s.borrow());
/// }
///
/// let s = "Hello".to_string();
///
/// check(s);
///
/// let s = "Hello";
///
/// check(s);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow(&self) -> &Borrowed;
}
/// A trait for mutably borrowing data.
///
/// Similar to `Borrow`, but for mutable borrows.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BorrowMut<Borrowed: ?Sized> : Borrow<Borrowed> {
/// Mutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::BorrowMut;
///
/// fn check<T: BorrowMut<[i32]>>(mut v: T) {
/// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
/// }
///
/// let v = vec![1, 2, 3];
///
/// check(v);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow_mut(&mut self) -> &mut Borrowed;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for T {
fn borrow(&self) -> &T { self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for T {
fn borrow_mut(&mut self) -> &mut T { self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Borrow<T> for &'a T {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Borrow<T> for &'a mut T {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> BorrowMut<T> for &'a mut T {
fn borrow_mut(&mut self) -> &mut T { &mut **self }
}
Further refinement of Borrow documentation.
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
/// A trait identifying how borrowed data behaves.
///
/// In Rust, it is common to provide different representations of a type for
/// different use cases. For instance, storage location and management for a
/// value can be specifically chosen as appropriate for a particular use via
/// pointer types such as [`Box<T>`] or [`Rc<T>`]. Beyond these generic
/// wrappers that can be used with any type, some types provide optional
/// facets providing potentially costly functionality. An example for such a
/// type is [`String`] which adds the ability to extend a string to the basic
/// [`str`]. This requires keeping additional information unnecessary for a
/// simple, immutable string.
///
/// These types signal that they are a specialized representation of a basic
/// type `T` by implementing `Borrow<T>`. The method `borrow` provides a way
/// to convert a reference to the type into a reference to this basic type
/// `T`.
///
/// Further, when providing implementations for additional traits, it needs
/// to be considered whether they should behave identical to those of the
/// underlying type as a consequence of acting as a representation of that
/// underlying type.
///
/// Generic code typically uses `Borrow<T>` when it not only needs access
/// to a reference of the underlying type but relies on the identical
/// behavior of these additional trait implementations. These traits are
/// likely to appear as additional trait bounds.
///
/// If generic code merely needs to work for all types that can
/// provide a reference to related type `T`, it is often better to use
/// [`AsRef<T>`] as more types can safely implement it.
///
/// If a type implementing `Borrow<T>` also wishes to allow mutable access
/// to the underlying type `T`, it can do so by implementing the companion
/// trait [`BorrowMut`].
///
/// Note also that it is perfectly fine for a single type to have multiple
/// implementations of `Borrow<T>` for different `T`s. In fact, a blanket
/// implementation lets every type be at least a borrow of itself.
///
/// [`AsRef<T>`]: ../../std/convert/trait.AsRef.html
/// [`BorrowMut`]: trait.BorrowMut.html
/// [`Box<T>`]: ../../std/boxed/struct.Box.html
/// [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
/// [`str`]: ../../std/primitive.str.html
/// [`String`]: ../../std/string/struct.String.html
///
/// # Examples
///
/// As a data collection, [`HashMap<K, V>`] owns both keys and values. If
/// the key’s actual data is wrapped in a managing type of some kind, it
/// should, however, still be possible to search for a value using a
/// reference to the key’s data. For instance, if the key is a string, then
/// it is likely stored with the hash map as a [`String`], while it should
/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to
/// operate on a `String` while `get` needs to be able to use a `&str`.
///
/// Slightly simplified, the relevant parts of `HashMap<K, V>` look like
/// this:
///
/// ```
/// use std::borrow::Borrow;
/// use std::hash::Hash;
///
/// pub struct HashMap<K, V> {
/// # marker: ::std::marker::PhantomData<(K, V)>,
/// // fields omitted
/// }
///
/// impl<K, V> HashMap<K, V> {
/// pub fn insert(&self, key: K, value: V) -> Option<V>
/// where K: Hash + Eq
/// {
/// # unimplemented!()
/// // ...
/// }
///
/// pub fn get<Q>(&self, k: &Q) -> Option<&V>
/// where
/// K: Borrow<Q>,
/// Q: Hash + Eq + ?Sized
/// {
/// # unimplemented!()
/// // ...
/// }
/// }
/// ```
///
/// The entire hash map is generic over a key type `K`. Because these keys
/// are stored with the hash map, this type has to own the key’s data.
/// When inserting a key-value pair, the map is given such a `K` and needs
/// to find the correct hash bucket and check if the key is already present
/// based on that `K`. It therefore requires `K: Hash + Eq`.
///
/// When searching for a value in the map, however, having to provide a
/// reference to a `K` as the key to search for would require to always
/// create such an owned value. For string keys, this would mean a `String`
/// value needs to be created just for the search for cases where only a
/// `str` is available.
///
/// Instead, the `get` method is generic over the type of the underlying key
/// data, called `Q` in the method signature above. It states that `K` is a
/// representation of `Q` by requiring that `K: Borrow<Q>`. By additionally
/// requiring `Q: Hash + Eq`, it demands that `K` and `Q` have
/// implementations of the `Hash` and `Eq` traits that produce identical
/// results.
///
/// The implementation of `get` relies in particular on identical
/// implementations of `Hash` by determining the key’s hash bucket by calling
/// `Hash::hash` on the `Q` value even though it inserted the key based on
/// the hash value calculated from the `K` value.
///
/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value
/// produces a different hash than `Q`. For instance, imagine you have a
/// type that wraps a string but compares ASCII letters ignoring their case:
///
/// ```
/// pub struct CaseInsensitiveString(String);
///
/// impl PartialEq for CaseInsensitiveString {
/// fn eq(&self, other: &Self) -> bool {
/// self.0.eq_ignore_ascii_case(&other.0)
/// }
/// }
///
/// impl Eq for CaseInsensitiveString { }
/// ```
///
/// Because two equal values need to produce the same hash value, the
/// implementation of `Hash` needs to reflect that, too:
///
/// ```
/// # use std::hash::{Hash, Hasher};
/// # pub struct CaseInsensitiveString(String);
/// impl Hash for CaseInsensitiveString {
/// fn hash<H: Hasher>(&self, state: &mut H) {
/// for c in self.0.as_bytes() {
/// c.to_ascii_lowercase().hash(state)
/// }
/// }
/// }
/// ```
///
/// Can `CaseInsensitiveString` implement `Borrow<str>`? It certainly can
/// provide a reference to a string slice via its contained owned string.
/// But because its `Hash` implementation differs, it behaves differently
/// from `str` and therefore must not, in fact, implement `Borrow<str>`.
/// If it wants to allow others access to the underlying `str`, it can do
/// that via `AsRef<str>` which doesn’t carry any extra requirements.
///
/// [`Hash`]: ../../std/hash/trait.Hash.html
/// [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
/// [`String`]: ../../std/string/struct.String.html
/// [`str`]: ../../std/primitive.str.html
///
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Borrow<Borrowed: ?Sized> {
/// Immutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::Borrow;
///
/// fn check<T: Borrow<str>>(s: T) {
/// assert_eq!("Hello", s.borrow());
/// }
///
/// let s = "Hello".to_string();
///
/// check(s);
///
/// let s = "Hello";
///
/// check(s);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow(&self) -> &Borrowed;
}
/// A trait for mutably borrowing data.
///
/// Similar to `Borrow`, but for mutable borrows.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BorrowMut<Borrowed: ?Sized> : Borrow<Borrowed> {
/// Mutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::BorrowMut;
///
/// fn check<T: BorrowMut<[i32]>>(mut v: T) {
/// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
/// }
///
/// let v = vec![1, 2, 3];
///
/// check(v);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow_mut(&mut self) -> &mut Borrowed;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for T {
fn borrow(&self) -> &T { self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for T {
fn borrow_mut(&mut self) -> &mut T { self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Borrow<T> for &'a T {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Borrow<T> for &'a mut T {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> BorrowMut<T> for &'a mut T {
fn borrow_mut(&mut self) -> &mut T { &mut **self }
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Entry point of thread panic, for details, see std::macros
#[macro_export]
#[allow_internal_unstable]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! panic {
() => (
panic!("explicit panic")
);
($msg:expr) => ({
static _MSG_FILE_LINE: (&'static str, &'static str, u32) = ($msg, file!(), line!());
$crate::panicking::panic(&_MSG_FILE_LINE)
});
($fmt:expr, $($arg:tt)*) => ({
// The leading _'s are to avoid dead code warnings if this is
// used inside a dead function. Just `#[allow(dead_code)]` is
// insufficient, since the user may have
// `#[forbid(dead_code)]` and which cannot be overridden.
static _FILE_LINE: (&'static str, u32) = (file!(), line!());
$crate::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
});
}
/// Ensure that a boolean expression is `true` at runtime.
///
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
/// This macro has a second version, where a custom panic message can be provided.
///
/// # Examples
///
/// ```
/// // the panic message for these assertions is the stringified value of the
/// // expression given.
/// assert!(true);
///
/// fn some_computation() -> bool { true } // a very simple function
///
/// assert!(some_computation());
///
/// // assert with a custom message
/// let x = true;
/// assert!(x, "x wasn't true!");
///
/// let a = 3; let b = 27;
/// assert!(a + b == 30, "a = {}, b = {}", a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert {
($cond:expr) => (
if !$cond {
panic!(concat!("assertion failed: ", stringify!($cond)))
}
);
($cond:expr, $($arg:tt)+) => (
if !$cond {
panic!($($arg)+)
}
);
}
/// Asserts that two expressions are equal to each other.
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
/// # Examples
///
/// ```
/// let a = 3;
/// let b = 1 + 2;
/// assert_eq!(a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert_eq {
($left:expr , $right:expr) => ({
match (&($left), &($right)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
panic!("assertion failed: `(left == right)` \
(left: `{:?}`, right: `{:?}`)", left_val, right_val)
}
}
}
})
}
/// Ensure that a boolean expression is `true` at runtime.
///
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
/// Like `assert!`, this macro also has a second version, where a custom panic
/// message can be provided.
///
/// Unlike `assert!`, `debug_assert!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert!` statements unless `-C debug-assertions` is passed to the
/// compiler. This makes `debug_assert!` useful for checks that are too
/// expensive to be present in a release build but may be helpful during
/// development.
///
/// # Examples
///
/// ```
/// // the panic message for these assertions is the stringified value of the
/// // expression given.
/// debug_assert!(true);
///
/// fn some_expensive_computation() -> bool { true } // a very simple function
/// debug_assert!(some_expensive_computation());
///
/// // assert with a custom message
/// let x = true;
/// debug_assert!(x, "x wasn't true!");
///
/// let a = 3; let b = 27;
/// debug_assert!(a + b == 30, "a = {}, b = {}", a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! debug_assert {
($($arg:tt)*) => (if cfg!(debug_assertions) { assert!($($arg)*); })
}
/// Asserts that two expressions are equal to each other.
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
/// Unlike `assert_eq!`, `debug_assert_eq!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the
/// compiler. This makes `debug_assert_eq!` useful for checks that are too
/// expensive to be present in a release build but may be helpful during
/// development.
///
/// # Examples
///
/// ```
/// let a = 3;
/// let b = 1 + 2;
/// debug_assert_eq!(a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! debug_assert_eq {
($($arg:tt)*) => (if cfg!(debug_assertions) { assert_eq!($($arg)*); })
}
/// Helper macro for unwrapping `Result` values while returning early with an
/// error if the value of the expression is `Err`. Can only be used in
/// functions that return `Result` because of the early return of `Err` that
/// it provides.
///
/// # Examples
///
/// ```
/// use std::io;
/// use std::fs::File;
/// use std::io::prelude::*;
///
/// fn write_to_file_using_try() -> Result<(), io::Error> {
/// let mut file = try!(File::create("my_best_friends.txt"));
/// try!(file.write_all(b"This is a list of my best friends."));
/// println!("I wrote to the file");
/// Ok(())
/// }
/// // This is equivalent to:
/// fn write_to_file_using_match() -> Result<(), io::Error> {
/// let mut file = try!(File::create("my_best_friends.txt"));
/// match file.write_all(b"This is a list of my best friends.") {
/// Ok(_) => (),
/// Err(e) => return Err(e),
/// }
/// println!("I wrote to the file");
/// Ok(())
/// }
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! try {
($expr:expr) => (match $expr {
$crate::result::Result::Ok(val) => val,
$crate::result::Result::Err(err) => {
return $crate::result::Result::Err($crate::convert::From::from(err))
}
})
}
/// Use the `format!` syntax to write data into a buffer.
///
/// This macro is typically used with a buffer of `&mut `[`Write`][write].
///
/// See [`std::fmt`][fmt] for more information on format syntax.
///
/// [fmt]: ../std/fmt/index.html
/// [write]: ../std/io/trait.Write.html
///
/// # Examples
///
/// ```
/// use std::io::Write;
///
/// let mut w = Vec::new();
/// write!(&mut w, "test").unwrap();
/// write!(&mut w, "formatted {}", "arguments").unwrap();
///
/// assert_eq!(w, b"testformatted arguments");
/// ```
#[macro_export]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! write {
($dst:expr, $($arg:tt)*) => ($dst.write_fmt(format_args!($($arg)*)))
}
/// Use the `format!` syntax to write data into a buffer, appending a newline.
///
/// This macro is typically used with a buffer of `&mut `[`Write`][write].
///
/// See [`std::fmt`][fmt] for more information on format syntax.
///
/// [fmt]: ../std/fmt/index.html
/// [write]: ../std/io/trait.Write.html
///
/// # Examples
///
/// ```
/// use std::io::Write;
///
/// let mut w = Vec::new();
/// writeln!(&mut w, "test").unwrap();
/// writeln!(&mut w, "formatted {}", "arguments").unwrap();
///
/// assert_eq!(&w[..], "test\nformatted arguments\n".as_bytes());
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! writeln {
($dst:expr, $fmt:expr) => (
write!($dst, concat!($fmt, "\n"))
);
($dst:expr, $fmt:expr, $($arg:tt)*) => (
write!($dst, concat!($fmt, "\n"), $($arg)*)
);
}
/// A utility macro for indicating unreachable code.
///
/// This is useful any time that the compiler can't determine that some code is unreachable. For
/// example:
///
/// * Match arms with guard conditions.
/// * Loops that dynamically terminate.
/// * Iterators that dynamically terminate.
///
/// # Panics
///
/// This will always panic.
///
/// # Examples
///
/// Match arms:
///
/// ```
/// # #[allow(dead_code)]
/// fn foo(x: Option<i32>) {
/// match x {
/// Some(n) if n >= 0 => println!("Some(Non-negative)"),
/// Some(n) if n < 0 => println!("Some(Negative)"),
/// Some(_) => unreachable!(), // compile error if commented out
/// None => println!("None")
/// }
/// }
/// ```
///
/// Iterators:
///
/// ```
/// # #[allow(dead_code)]
/// fn divide_by_three(x: u32) -> u32 { // one of the poorest implementations of x/3
/// for i in 0.. {
/// if 3*i < i { panic!("u32 overflow"); }
/// if x < 3*i { return i-1; }
/// }
/// unreachable!();
/// }
/// ```
#[macro_export]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! unreachable {
() => ({
panic!("internal error: entered unreachable code")
});
($msg:expr) => ({
unreachable!("{}", $msg)
});
($fmt:expr, $($arg:tt)*) => ({
panic!(concat!("internal error: entered unreachable code: ", $fmt), $($arg)*)
});
}
/// A standardized placeholder for marking unfinished code. It panics with the
/// message `"not yet implemented"` when executed.
///
/// This can be useful if you are prototyping and are just looking to have your
/// code typecheck, or if you're implementing a trait that requires multiple
/// methods, and you're only planning on using one of them.
///
/// # Examples
///
/// Here's an example of some in-progress code. We have a trait `Foo`:
///
/// ```
/// trait Foo {
/// fn bar(&self);
/// fn baz(&self);
/// }
/// ```
///
/// We want to implement `Foo` on one of our types, but we also want to work on
/// just `bar()` first. In order for our code to compile, we need to implement
/// `baz()`, so we can use `unimplemented!`:
///
/// ```
/// # trait Foo {
/// # fn bar(&self);
/// # fn baz(&self);
/// # }
/// struct MyStruct;
///
/// impl Foo for MyStruct {
/// fn bar(&self) {
/// // implementation goes here
/// }
///
/// fn baz(&self) {
/// // let's not worry about implementing baz() for now
/// unimplemented!();
/// }
/// }
///
/// fn main() {
/// let s = MyStruct;
/// s.bar();
///
/// // we aren't even using baz() yet, so this is fine.
/// }
/// ```
#[macro_export]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! unimplemented {
() => (panic!("not yet implemented"))
}
Clarify try! doc example
The original is correct, but a bit misleading.
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Entry point of thread panic, for details, see std::macros
#[macro_export]
#[allow_internal_unstable]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! panic {
() => (
panic!("explicit panic")
);
($msg:expr) => ({
static _MSG_FILE_LINE: (&'static str, &'static str, u32) = ($msg, file!(), line!());
$crate::panicking::panic(&_MSG_FILE_LINE)
});
($fmt:expr, $($arg:tt)*) => ({
// The leading _'s are to avoid dead code warnings if this is
// used inside a dead function. Just `#[allow(dead_code)]` is
// insufficient, since the user may have
// `#[forbid(dead_code)]` and which cannot be overridden.
static _FILE_LINE: (&'static str, u32) = (file!(), line!());
$crate::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
});
}
/// Ensure that a boolean expression is `true` at runtime.
///
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
/// This macro has a second version, where a custom panic message can be provided.
///
/// # Examples
///
/// ```
/// // the panic message for these assertions is the stringified value of the
/// // expression given.
/// assert!(true);
///
/// fn some_computation() -> bool { true } // a very simple function
///
/// assert!(some_computation());
///
/// // assert with a custom message
/// let x = true;
/// assert!(x, "x wasn't true!");
///
/// let a = 3; let b = 27;
/// assert!(a + b == 30, "a = {}, b = {}", a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert {
($cond:expr) => (
if !$cond {
panic!(concat!("assertion failed: ", stringify!($cond)))
}
);
($cond:expr, $($arg:tt)+) => (
if !$cond {
panic!($($arg)+)
}
);
}
/// Asserts that two expressions are equal to each other.
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
/// # Examples
///
/// ```
/// let a = 3;
/// let b = 1 + 2;
/// assert_eq!(a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert_eq {
($left:expr , $right:expr) => ({
match (&($left), &($right)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
panic!("assertion failed: `(left == right)` \
(left: `{:?}`, right: `{:?}`)", left_val, right_val)
}
}
}
})
}
/// Ensure that a boolean expression is `true` at runtime.
///
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
/// Like `assert!`, this macro also has a second version, where a custom panic
/// message can be provided.
///
/// Unlike `assert!`, `debug_assert!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert!` statements unless `-C debug-assertions` is passed to the
/// compiler. This makes `debug_assert!` useful for checks that are too
/// expensive to be present in a release build but may be helpful during
/// development.
///
/// # Examples
///
/// ```
/// // the panic message for these assertions is the stringified value of the
/// // expression given.
/// debug_assert!(true);
///
/// fn some_expensive_computation() -> bool { true } // a very simple function
/// debug_assert!(some_expensive_computation());
///
/// // assert with a custom message
/// let x = true;
/// debug_assert!(x, "x wasn't true!");
///
/// let a = 3; let b = 27;
/// debug_assert!(a + b == 30, "a = {}, b = {}", a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! debug_assert {
($($arg:tt)*) => (if cfg!(debug_assertions) { assert!($($arg)*); })
}
/// Asserts that two expressions are equal to each other.
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
/// Unlike `assert_eq!`, `debug_assert_eq!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the
/// compiler. This makes `debug_assert_eq!` useful for checks that are too
/// expensive to be present in a release build but may be helpful during
/// development.
///
/// # Examples
///
/// ```
/// let a = 3;
/// let b = 1 + 2;
/// debug_assert_eq!(a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! debug_assert_eq {
($($arg:tt)*) => (if cfg!(debug_assertions) { assert_eq!($($arg)*); })
}
/// Helper macro for unwrapping `Result` values while returning early with an
/// error if the value of the expression is `Err`. Can only be used in
/// functions that return `Result` because of the early return of `Err` that
/// it provides.
///
/// # Examples
///
/// ```
/// use std::io;
/// use std::fs::File;
/// use std::io::prelude::*;
///
/// fn write_to_file_using_try() -> Result<(), io::Error> {
/// let mut file = try!(File::create("my_best_friends.txt"));
/// try!(file.write_all(b"This is a list of my best friends."));
/// println!("I wrote to the file");
/// Ok(())
/// }
/// // This is equivalent to:
/// fn write_to_file_using_match() -> Result<(), io::Error> {
/// let mut file = try!(File::create("my_best_friends.txt"));
/// match file.write_all(b"This is a list of my best friends.") {
/// Ok(v) => v,
/// Err(e) => return Err(e),
/// }
/// println!("I wrote to the file");
/// Ok(())
/// }
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! try {
($expr:expr) => (match $expr {
$crate::result::Result::Ok(val) => val,
$crate::result::Result::Err(err) => {
return $crate::result::Result::Err($crate::convert::From::from(err))
}
})
}
/// Use the `format!` syntax to write data into a buffer.
///
/// This macro is typically used with a buffer of `&mut `[`Write`][write].
///
/// See [`std::fmt`][fmt] for more information on format syntax.
///
/// [fmt]: ../std/fmt/index.html
/// [write]: ../std/io/trait.Write.html
///
/// # Examples
///
/// ```
/// use std::io::Write;
///
/// let mut w = Vec::new();
/// write!(&mut w, "test").unwrap();
/// write!(&mut w, "formatted {}", "arguments").unwrap();
///
/// assert_eq!(w, b"testformatted arguments");
/// ```
#[macro_export]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! write {
($dst:expr, $($arg:tt)*) => ($dst.write_fmt(format_args!($($arg)*)))
}
/// Use the `format!` syntax to write data into a buffer, appending a newline.
///
/// This macro is typically used with a buffer of `&mut `[`Write`][write].
///
/// See [`std::fmt`][fmt] for more information on format syntax.
///
/// [fmt]: ../std/fmt/index.html
/// [write]: ../std/io/trait.Write.html
///
/// # Examples
///
/// ```
/// use std::io::Write;
///
/// let mut w = Vec::new();
/// writeln!(&mut w, "test").unwrap();
/// writeln!(&mut w, "formatted {}", "arguments").unwrap();
///
/// assert_eq!(&w[..], "test\nformatted arguments\n".as_bytes());
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! writeln {
($dst:expr, $fmt:expr) => (
write!($dst, concat!($fmt, "\n"))
);
($dst:expr, $fmt:expr, $($arg:tt)*) => (
write!($dst, concat!($fmt, "\n"), $($arg)*)
);
}
/// A utility macro for indicating unreachable code.
///
/// This is useful any time that the compiler can't determine that some code is unreachable. For
/// example:
///
/// * Match arms with guard conditions.
/// * Loops that dynamically terminate.
/// * Iterators that dynamically terminate.
///
/// # Panics
///
/// This will always panic.
///
/// # Examples
///
/// Match arms:
///
/// ```
/// # #[allow(dead_code)]
/// fn foo(x: Option<i32>) {
/// match x {
/// Some(n) if n >= 0 => println!("Some(Non-negative)"),
/// Some(n) if n < 0 => println!("Some(Negative)"),
/// Some(_) => unreachable!(), // compile error if commented out
/// None => println!("None")
/// }
/// }
/// ```
///
/// Iterators:
///
/// ```
/// # #[allow(dead_code)]
/// fn divide_by_three(x: u32) -> u32 { // one of the poorest implementations of x/3
/// for i in 0.. {
/// if 3*i < i { panic!("u32 overflow"); }
/// if x < 3*i { return i-1; }
/// }
/// unreachable!();
/// }
/// ```
#[macro_export]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! unreachable {
() => ({
panic!("internal error: entered unreachable code")
});
($msg:expr) => ({
unreachable!("{}", $msg)
});
($fmt:expr, $($arg:tt)*) => ({
panic!(concat!("internal error: entered unreachable code: ", $fmt), $($arg)*)
});
}
/// A standardized placeholder for marking unfinished code. It panics with the
/// message `"not yet implemented"` when executed.
///
/// This can be useful if you are prototyping and are just looking to have your
/// code typecheck, or if you're implementing a trait that requires multiple
/// methods, and you're only planning on using one of them.
///
/// # Examples
///
/// Here's an example of some in-progress code. We have a trait `Foo`:
///
/// ```
/// trait Foo {
/// fn bar(&self);
/// fn baz(&self);
/// }
/// ```
///
/// We want to implement `Foo` on one of our types, but we also want to work on
/// just `bar()` first. In order for our code to compile, we need to implement
/// `baz()`, so we can use `unimplemented!`:
///
/// ```
/// # trait Foo {
/// # fn bar(&self);
/// # fn baz(&self);
/// # }
/// struct MyStruct;
///
/// impl Foo for MyStruct {
/// fn bar(&self) {
/// // implementation goes here
/// }
///
/// fn baz(&self) {
/// // let's not worry about implementing baz() for now
/// unimplemented!();
/// }
/// }
///
/// fn main() {
/// let s = MyStruct;
/// s.bar();
///
/// // we aren't even using baz() yet, so this is fine.
/// }
/// ```
#[macro_export]
#[stable(feature = "core", since = "1.6.0")]
macro_rules! unimplemented {
() => (panic!("not yet implemented"))
}
|
//! Optional values.
//!
//! Type [`Option`] represents an optional value: every [`Option`]
//! is either [`Some`] and contains a value, or [`None`], and
//! does not. [`Option`] types are very common in Rust code, as
//! they have a number of uses:
//!
//! * Initial values
//! * Return values for functions that are not defined
//! over their entire input range (partial functions)
//! * Return value for otherwise reporting simple errors, where `None` is
//! returned on error
//! * Optional struct fields
//! * Struct fields that can be loaned or "taken"
//! * Optional function arguments
//! * Nullable pointers
//! * Swapping things out of difficult situations
//!
//! [`Option`]s are commonly paired with pattern matching to query the presence
//! of a value and take action, always accounting for the [`None`] case.
//!
//! ```
//! fn divide(numerator: f64, denominator: f64) -> Option<f64> {
//! if denominator == 0.0 {
//! None
//! } else {
//! Some(numerator / denominator)
//! }
//! }
//!
//! // The return value of the function is an option
//! let result = divide(2.0, 3.0);
//!
//! // Pattern match to retrieve the value
//! match result {
//! // The division was valid
//! Some(x) => println!("Result: {}", x),
//! // The division was invalid
//! None => println!("Cannot divide by 0"),
//! }
//! ```
//!
//
// FIXME: Show how `Option` is used in practice, with lots of methods
//
//! # Options and pointers ("nullable" pointers)
//!
//! Rust's pointer types must always point to a valid location; there are
//! no "null" pointers. Instead, Rust has *optional* pointers, like
//! the optional owned box, [`Option`]`<`[`Box<T>`]`>`.
//!
//! The following example uses [`Option`] to create an optional box of
//! [`i32`]. Notice that in order to use the inner [`i32`] value first, the
//! `check_optional` function needs to use pattern matching to
//! determine whether the box has a value (i.e., it is [`Some(...)`][`Some`]) or
//! not ([`None`]).
//!
//! ```
//! let optional = None;
//! check_optional(optional);
//!
//! let optional = Some(Box::new(9000));
//! check_optional(optional);
//!
//! fn check_optional(optional: Option<Box<i32>>) {
//! match optional {
//! Some(ref p) => println!("has value {}", p),
//! None => println!("has no value"),
//! }
//! }
//! ```
//!
//! This usage of [`Option`] to create safe nullable pointers is so
//! common that Rust does special optimizations to make the
//! representation of [`Option`]`<`[`Box<T>`]`>` a single pointer. Optional pointers
//! in Rust are stored as efficiently as any other pointer type.
//!
//! # Examples
//!
//! Basic pattern matching on [`Option`]:
//!
//! ```
//! let msg = Some("howdy");
//!
//! // Take a reference to the contained string
//! if let Some(ref m) = msg {
//! println!("{}", *m);
//! }
//!
//! // Remove the contained string, destroying the Option
//! let unwrapped_msg = msg.unwrap_or("default message");
//! ```
//!
//! Initialize a result to [`None`] before a loop:
//!
//! ```
//! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) }
//!
//! // A list of data to search through.
//! let all_the_big_things = [
//! Kingdom::Plant(250, "redwood"),
//! Kingdom::Plant(230, "noble fir"),
//! Kingdom::Plant(229, "sugar pine"),
//! Kingdom::Animal(25, "blue whale"),
//! Kingdom::Animal(19, "fin whale"),
//! Kingdom::Animal(15, "north pacific right whale"),
//! ];
//!
//! // We're going to search for the name of the biggest animal,
//! // but to start with we've just got `None`.
//! let mut name_of_biggest_animal = None;
//! let mut size_of_biggest_animal = 0;
//! for big_thing in &all_the_big_things {
//! match *big_thing {
//! Kingdom::Animal(size, name) if size > size_of_biggest_animal => {
//! // Now we've found the name of some big animal
//! size_of_biggest_animal = size;
//! name_of_biggest_animal = Some(name);
//! }
//! Kingdom::Animal(..) | Kingdom::Plant(..) => ()
//! }
//! }
//!
//! match name_of_biggest_animal {
//! Some(name) => println!("the biggest animal is {}", name),
//! None => println!("there are no animals :("),
//! }
//! ```
//!
//! [`Option`]: enum.Option.html
//! [`Some`]: enum.Option.html#variant.Some
//! [`None`]: enum.Option.html#variant.None
//! [`Box<T>`]: ../../std/boxed/struct.Box.html
//! [`i32`]: ../../std/primitive.i32.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::iter::{FromIterator, FusedIterator, TrustedLen, OptionShunt};
use crate::{convert, fmt, hint, mem, ops::{self, Deref, DerefMut}};
use crate::pin::Pin;
// Note that this is not a lang item per se, but it has a hidden dependency on
// `Iterator`, which is one. The compiler assumes that the `next` method of
// `Iterator` is an enumeration with one type parameter and two variants,
// which basically means it must be `Option`.
/// The `Option` type. See [the module level documentation](index.html) for more.
#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
/// No value
#[stable(feature = "rust1", since = "1.0.0")]
None,
/// Some value `T`
#[stable(feature = "rust1", since = "1.0.0")]
Some(#[stable(feature = "rust1", since = "1.0.0")] T),
}
/////////////////////////////////////////////////////////////////////////////
// Type implementation
/////////////////////////////////////////////////////////////////////////////
impl<T> Option<T> {
/////////////////////////////////////////////////////////////////////////
// Querying the contained values
/////////////////////////////////////////////////////////////////////////
/// Returns `true` if the option is a [`Some`] value.
///
/// # Examples
///
/// ```
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_some(), true);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.is_some(), false);
/// ```
///
/// [`Some`]: #variant.Some
#[must_use = "if you intended to assert that this has a value, consider `.unwrap()` instead"]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_some(&self) -> bool {
match *self {
Some(_) => true,
None => false,
}
}
/// Returns `true` if the option is a [`None`] value.
///
/// # Examples
///
/// ```
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_none(), false);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.is_none(), true);
/// ```
///
/// [`None`]: #variant.None
#[must_use = "if you intended to assert that this doesn't have a value, consider \
`.and_then(|| panic!(\"`Option` had a value when expected `None`\"))` instead"]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_none(&self) -> bool {
!self.is_some()
}
/// Returns `true` if the option is a [`Some`] value containing the given value.
///
/// # Examples
///
/// ```
/// #![feature(option_result_contains)]
///
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.contains(&2), true);
///
/// let x: Option<u32> = Some(3);
/// assert_eq!(x.contains(&2), false);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.contains(&2), false);
/// ```
#[must_use]
#[inline]
#[unstable(feature = "option_result_contains", issue = "62358")]
pub fn contains<U>(&self, x: &U) -> bool where U: PartialEq<T> {
match self {
Some(y) => x == y,
None => false,
}
}
/////////////////////////////////////////////////////////////////////////
// Adapter for working with references
/////////////////////////////////////////////////////////////////////////
/// Converts from `&Option<T>` to `Option<&T>`.
///
/// # Examples
///
/// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original.
/// The [`map`] method takes the `self` argument by value, consuming the original,
/// so this technique uses `as_ref` to first take an `Option` to a reference
/// to the value inside the original.
///
/// [`map`]: enum.Option.html#method.map
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let text: Option<String> = Some("Hello, world!".to_string());
/// // First, cast `Option<String>` to `Option<&String>` with `as_ref`,
/// // then consume *that* with `map`, leaving `text` on the stack.
/// let text_length: Option<usize> = text.as_ref().map(|s| s.len());
/// println!("still can print text: {:?}", text);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_ref(&self) -> Option<&T> {
match *self {
Some(ref x) => Some(x),
None => None,
}
}
/// Converts from `&mut Option<T>` to `Option<&mut T>`.
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// match x.as_mut() {
/// Some(v) => *v = 42,
/// None => {},
/// }
/// assert_eq!(x, Some(42));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_mut(&mut self) -> Option<&mut T> {
match *self {
Some(ref mut x) => Some(x),
None => None,
}
}
/// Converts from [`Pin`]`<&Option<T>>` to `Option<`[`Pin`]`<&T>>`.
///
/// [`Pin`]: ../pin/struct.Pin.html
#[inline]
#[stable(feature = "pin", since = "1.33.0")]
pub fn as_pin_ref<'a>(self: Pin<&'a Option<T>>) -> Option<Pin<&'a T>> {
unsafe {
Pin::get_ref(self).as_ref().map(|x| Pin::new_unchecked(x))
}
}
/// Converts from [`Pin`]`<&mut Option<T>>` to `Option<`[`Pin`]`<&mut T>>`.
///
/// [`Pin`]: ../pin/struct.Pin.html
#[inline]
#[stable(feature = "pin", since = "1.33.0")]
pub fn as_pin_mut<'a>(self: Pin<&'a mut Option<T>>) -> Option<Pin<&'a mut T>> {
unsafe {
Pin::get_unchecked_mut(self).as_mut().map(|x| Pin::new_unchecked(x))
}
}
/////////////////////////////////////////////////////////////////////////
// Getting to contained values
/////////////////////////////////////////////////////////////////////////
/// Unwraps an option, yielding the content of a [`Some`].
///
/// # Panics
///
/// Panics if the value is a [`None`] with a custom panic message provided by
/// `msg`.
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some("value");
/// assert_eq!(x.expect("the world is ending"), "value");
/// ```
///
/// ```{.should_panic}
/// let x: Option<&str> = None;
/// x.expect("the world is ending"); // panics with `the world is ending`
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn expect(self, msg: &str) -> T {
match self {
Some(val) => val,
None => expect_failed(msg),
}
}
/// Moves the value `v` out of the `Option<T>` if it is [`Some(v)`].
///
/// In general, because this function may panic, its use is discouraged.
/// Instead, prefer to use pattern matching and handle the [`None`]
/// case explicitly.
///
/// # Panics
///
/// Panics if the self value equals [`None`].
///
/// [`Some(v)`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some("air");
/// assert_eq!(x.unwrap(), "air");
/// ```
///
/// ```{.should_panic}
/// let x: Option<&str> = None;
/// assert_eq!(x.unwrap(), "air"); // fails
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap(self) -> T {
match self {
Some(val) => val,
None => panic!("called `Option::unwrap()` on a `None` value"),
}
}
/// Returns the contained value or a default.
///
/// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
/// the result of a function call, it is recommended to use [`unwrap_or_else`],
/// which is lazily evaluated.
///
/// [`unwrap_or_else`]: #method.unwrap_or_else
///
/// # Examples
///
/// ```
/// assert_eq!(Some("car").unwrap_or("bike"), "car");
/// assert_eq!(None.unwrap_or("bike"), "bike");
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or(self, def: T) -> T {
match self {
Some(x) => x,
None => def,
}
}
/// Returns the contained value or computes it from a closure.
///
/// # Examples
///
/// ```
/// let k = 10;
/// assert_eq!(Some(4).unwrap_or_else(|| 2 * k), 4);
/// assert_eq!(None.unwrap_or_else(|| 2 * k), 20);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
match self {
Some(x) => x,
None => f(),
}
}
/////////////////////////////////////////////////////////////////////////
// Transforming contained values
/////////////////////////////////////////////////////////////////////////
/// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value.
///
/// # Examples
///
/// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, consuming the original:
///
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let maybe_some_string = Some(String::from("Hello, World!"));
/// // `Option::map` takes self *by value*, consuming `maybe_some_string`
/// let maybe_some_len = maybe_some_string.map(|s| s.len());
///
/// assert_eq!(maybe_some_len, Some(13));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Option<U> {
match self {
Some(x) => Some(f(x)),
None => None,
}
}
/// Applies a function to the contained value (if any),
/// or returns the provided default (if not).
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.map_or(42, |v| v.len()), 3);
///
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or(42, |v| v.len()), 42);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
match self {
Some(t) => f(t),
None => default,
}
}
/// Applies a function to the contained value (if any),
/// or computes a default (if not).
///
/// # Examples
///
/// ```
/// let k = 21;
///
/// let x = Some("foo");
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 3);
///
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or_else<U, D: FnOnce() -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
match self {
Some(t) => f(t),
None => default(),
}
}
/// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
/// [`Ok(v)`] and [`None`] to [`Err(err)`].
///
/// Arguments passed to `ok_or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`ok_or_else`], which is
/// lazily evaluated.
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err)`]: ../../std/result/enum.Result.html#variant.Err
/// [`None`]: #variant.None
/// [`Some(v)`]: #variant.Some
/// [`ok_or_else`]: #method.ok_or_else
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.ok_or(0), Ok("foo"));
///
/// let x: Option<&str> = None;
/// assert_eq!(x.ok_or(0), Err(0));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ok_or<E>(self, err: E) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err),
}
}
/// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
/// [`Ok(v)`] and [`None`] to [`Err(err())`].
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err())`]: ../../std/result/enum.Result.html#variant.Err
/// [`None`]: #variant.None
/// [`Some(v)`]: #variant.Some
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.ok_or_else(|| 0), Ok("foo"));
///
/// let x: Option<&str> = None;
/// assert_eq!(x.ok_or_else(|| 0), Err(0));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ok_or_else<E, F: FnOnce() -> E>(self, err: F) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err()),
}
}
/////////////////////////////////////////////////////////////////////////
// Iterator constructors
/////////////////////////////////////////////////////////////////////////
/// Returns an iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let x = Some(4);
/// assert_eq!(x.iter().next(), Some(&4));
///
/// let x: Option<u32> = None;
/// assert_eq!(x.iter().next(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<'_, T> {
Iter { inner: Item { opt: self.as_ref() } }
}
/// Returns a mutable iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let mut x = Some(4);
/// match x.iter_mut().next() {
/// Some(v) => *v = 42,
/// None => {},
/// }
/// assert_eq!(x, Some(42));
///
/// let mut x: Option<u32> = None;
/// assert_eq!(x.iter_mut().next(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut { inner: Item { opt: self.as_mut() } }
}
/////////////////////////////////////////////////////////////////////////
// Boolean operations on the values, eager and lazy
/////////////////////////////////////////////////////////////////////////
/// Returns [`None`] if the option is [`None`], otherwise returns `optb`.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y: Option<&str> = None;
/// assert_eq!(x.and(y), None);
///
/// let x: Option<u32> = None;
/// let y = Some("foo");
/// assert_eq!(x.and(y), None);
///
/// let x = Some(2);
/// let y = Some("foo");
/// assert_eq!(x.and(y), Some("foo"));
///
/// let x: Option<u32> = None;
/// let y: Option<&str> = None;
/// assert_eq!(x.and(y), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn and<U>(self, optb: Option<U>) -> Option<U> {
match self {
Some(_) => optb,
None => None,
}
}
/// Returns [`None`] if the option is [`None`], otherwise calls `f` with the
/// wrapped value and returns the result.
///
/// Some languages call this operation flatmap.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// fn sq(x: u32) -> Option<u32> { Some(x * x) }
/// fn nope(_: u32) -> Option<u32> { None }
///
/// assert_eq!(Some(2).and_then(sq).and_then(sq), Some(16));
/// assert_eq!(Some(2).and_then(sq).and_then(nope), None);
/// assert_eq!(Some(2).and_then(nope).and_then(sq), None);
/// assert_eq!(None.and_then(sq).and_then(sq), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn and_then<U, F: FnOnce(T) -> Option<U>>(self, f: F) -> Option<U> {
match self {
Some(x) => f(x),
None => None,
}
}
/// Returns [`None`] if the option is [`None`], otherwise calls `predicate`
/// with the wrapped value and returns:
///
/// - [`Some(t)`] if `predicate` returns `true` (where `t` is the wrapped
/// value), and
/// - [`None`] if `predicate` returns `false`.
///
/// This function works similar to [`Iterator::filter()`]. You can imagine
/// the `Option<T>` being an iterator over one or zero elements. `filter()`
/// lets you decide which elements to keep.
///
/// # Examples
///
/// ```rust
/// fn is_even(n: &i32) -> bool {
/// n % 2 == 0
/// }
///
/// assert_eq!(None.filter(is_even), None);
/// assert_eq!(Some(3).filter(is_even), None);
/// assert_eq!(Some(4).filter(is_even), Some(4));
/// ```
///
/// [`None`]: #variant.None
/// [`Some(t)`]: #variant.Some
/// [`Iterator::filter()`]: ../../std/iter/trait.Iterator.html#method.filter
#[inline]
#[stable(feature = "option_filter", since = "1.27.0")]
pub fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
if let Some(x) = self {
if predicate(&x) {
return Some(x)
}
}
None
}
/// Returns the option if it contains a value, otherwise returns `optb`.
///
/// Arguments passed to `or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`or_else`], which is
/// lazily evaluated.
///
/// [`or_else`]: #method.or_else
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y = None;
/// assert_eq!(x.or(y), Some(2));
///
/// let x = None;
/// let y = Some(100);
/// assert_eq!(x.or(y), Some(100));
///
/// let x = Some(2);
/// let y = Some(100);
/// assert_eq!(x.or(y), Some(2));
///
/// let x: Option<u32> = None;
/// let y = None;
/// assert_eq!(x.or(y), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or(self, optb: Option<T>) -> Option<T> {
match self {
Some(_) => self,
None => optb,
}
}
/// Returns the option if it contains a value, otherwise calls `f` and
/// returns the result.
///
/// # Examples
///
/// ```
/// fn nobody() -> Option<&'static str> { None }
/// fn vikings() -> Option<&'static str> { Some("vikings") }
///
/// assert_eq!(Some("barbarians").or_else(vikings), Some("barbarians"));
/// assert_eq!(None.or_else(vikings), Some("vikings"));
/// assert_eq!(None.or_else(nobody), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_else<F: FnOnce() -> Option<T>>(self, f: F) -> Option<T> {
match self {
Some(_) => self,
None => f(),
}
}
/// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns `None`.
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y: Option<u32> = None;
/// assert_eq!(x.xor(y), Some(2));
///
/// let x: Option<u32> = None;
/// let y = Some(2);
/// assert_eq!(x.xor(y), Some(2));
///
/// let x = Some(2);
/// let y = Some(2);
/// assert_eq!(x.xor(y), None);
///
/// let x: Option<u32> = None;
/// let y: Option<u32> = None;
/// assert_eq!(x.xor(y), None);
/// ```
#[inline]
#[stable(feature = "option_xor", since = "1.37.0")]
pub fn xor(self, optb: Option<T>) -> Option<T> {
match (self, optb) {
(Some(a), None) => Some(a),
(None, Some(b)) => Some(b),
_ => None,
}
}
/////////////////////////////////////////////////////////////////////////
// Entry-like operations to insert if None and return a reference
/////////////////////////////////////////////////////////////////////////
/// Inserts `v` into the option if it is [`None`], then
/// returns a mutable reference to the contained value.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = None;
///
/// {
/// let y: &mut u32 = x.get_or_insert(5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, Some(7));
/// ```
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
pub fn get_or_insert(&mut self, v: T) -> &mut T {
self.get_or_insert_with(|| v)
}
/// Inserts a value computed from `f` into the option if it is [`None`], then
/// returns a mutable reference to the contained value.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = None;
///
/// {
/// let y: &mut u32 = x.get_or_insert_with(|| 5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, Some(7));
/// ```
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
pub fn get_or_insert_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
match *self {
None => *self = Some(f()),
_ => (),
}
match *self {
Some(ref mut v) => v,
None => unsafe { hint::unreachable_unchecked() },
}
}
/////////////////////////////////////////////////////////////////////////
// Misc
/////////////////////////////////////////////////////////////////////////
/// Takes the value out of the option, leaving a [`None`] in its place.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// let y = x.take();
/// assert_eq!(x, None);
/// assert_eq!(y, Some(2));
///
/// let mut x: Option<u32> = None;
/// let y = x.take();
/// assert_eq!(x, None);
/// assert_eq!(y, None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn take(&mut self) -> Option<T> {
mem::take(self)
}
/// Replaces the actual value in the option by the value given in parameter,
/// returning the old value if present,
/// leaving a [`Some`] in its place without deinitializing either one.
///
/// [`Some`]: #variant.Some
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// let old = x.replace(5);
/// assert_eq!(x, Some(5));
/// assert_eq!(old, Some(2));
///
/// let mut x = None;
/// let old = x.replace(3);
/// assert_eq!(x, Some(3));
/// assert_eq!(old, None);
/// ```
#[inline]
#[stable(feature = "option_replace", since = "1.31.0")]
pub fn replace(&mut self, value: T) -> Option<T> {
mem::replace(self, Some(value))
}
}
impl<T: Copy> Option<&T> {
/// Maps an `Option<&T>` to an `Option<T>` by copying the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let x = 12;
/// let opt_x = Some(&x);
/// assert_eq!(opt_x, Some(&12));
/// let copied = opt_x.copied();
/// assert_eq!(copied, Some(12));
/// ```
#[stable(feature = "copied", since = "1.35.0")]
pub fn copied(self) -> Option<T> {
self.map(|&t| t)
}
}
impl<T: Copy> Option<&mut T> {
/// Maps an `Option<&mut T>` to an `Option<T>` by copying the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let mut x = 12;
/// let opt_x = Some(&mut x);
/// assert_eq!(opt_x, Some(&mut 12));
/// let copied = opt_x.copied();
/// assert_eq!(copied, Some(12));
/// ```
#[stable(feature = "copied", since = "1.35.0")]
pub fn copied(self) -> Option<T> {
self.map(|&mut t| t)
}
}
impl<T: Clone> Option<&T> {
/// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let x = 12;
/// let opt_x = Some(&x);
/// assert_eq!(opt_x, Some(&12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
}
}
impl<T: Clone> Option<&mut T> {
/// Maps an `Option<&mut T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let mut x = 12;
/// let opt_x = Some(&mut x);
/// assert_eq!(opt_x, Some(&mut 12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(since = "1.26.0", feature = "option_ref_mut_cloned")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
}
}
impl<T: fmt::Debug> Option<T> {
/// Unwraps an option, expecting [`None`] and returning nothing.
///
/// # Panics
///
/// Panics if the value is a [`Some`], with a panic message including the
/// passed message, and the content of the [`Some`].
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// #![feature(option_expect_none)]
///
/// use std::collections::HashMap;
/// let mut squares = HashMap::new();
/// for i in -10..=10 {
/// // This will not panic, since all keys are unique.
/// squares.insert(i, i * i).expect_none("duplicate key");
/// }
/// ```
///
/// ```{.should_panic}
/// #![feature(option_expect_none)]
///
/// use std::collections::HashMap;
/// let mut sqrts = HashMap::new();
/// for i in -10..=10 {
/// // This will panic, since both negative and positive `i` will
/// // insert the same `i * i` key, returning the old `Some(i)`.
/// sqrts.insert(i * i, i).expect_none("duplicate key");
/// }
/// ```
#[inline]
#[unstable(feature = "option_expect_none", reason = "newly added", issue = "62633")]
pub fn expect_none(self, msg: &str) {
if let Some(val) = self {
expect_none_failed(msg, &val);
}
}
/// Unwraps an option, expecting [`None`] and returning nothing.
///
/// # Panics
///
/// Panics if the value is a [`Some`], with a custom panic message provided
/// by the [`Some`]'s value.
///
/// [`Some(v)`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// #![feature(option_unwrap_none)]
///
/// use std::collections::HashMap;
/// let mut squares = HashMap::new();
/// for i in -10..=10 {
/// // This will not panic, since all keys are unique.
/// squares.insert(i, i * i).unwrap_none();
/// }
/// ```
///
/// ```{.should_panic}
/// #![feature(option_unwrap_none)]
///
/// use std::collections::HashMap;
/// let mut sqrts = HashMap::new();
/// for i in -10..=10 {
/// // This will panic, since both negative and positive `i` will
/// // insert the same `i * i` key, returning the old `Some(i)`.
/// sqrts.insert(i * i, i).unwrap_none();
/// }
/// ```
#[inline]
#[unstable(feature = "option_unwrap_none", reason = "newly added", issue = "62633")]
pub fn unwrap_none(self) {
if let Some(val) = self {
expect_none_failed("called `Option::unwrap_none()` on a `Some` value", &val);
}
}
}
impl<T: Default> Option<T> {
/// Returns the contained value or a default
///
/// Consumes the `self` argument then, if [`Some`], returns the contained
/// value, otherwise if [`None`], returns the [default value] for that
/// type.
///
/// # Examples
///
/// Converts a string to an integer, turning poorly-formed strings
/// into 0 (the default value for integers). [`parse`] converts
/// a string to any other type that implements [`FromStr`], returning
/// [`None`] on error.
///
/// ```
/// let good_year_from_input = "1909";
/// let bad_year_from_input = "190blarg";
/// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
/// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
///
/// assert_eq!(1909, good_year);
/// assert_eq!(0, bad_year);
/// ```
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
/// [default value]: ../default/trait.Default.html#tymethod.default
/// [`parse`]: ../../std/primitive.str.html#method.parse
/// [`FromStr`]: ../../std/str/trait.FromStr.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_default(self) -> T {
match self {
Some(x) => x,
None => Default::default(),
}
}
}
#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")]
impl<T: Deref> Option<T> {
/// Converts from `Option<T>` (or `&Option<T>`) to `Option<&T::Target>`.
///
/// Leaves the original Option in-place, creating a new one with a reference
/// to the original one, additionally coercing the contents via [`Deref`].
///
/// [`Deref`]: ../../std/ops/trait.Deref.html
pub fn as_deref(&self) -> Option<&T::Target> {
self.as_ref().map(|t| t.deref())
}
}
#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")]
impl<T: DerefMut> Option<T> {
/// Converts from `Option<T>` (or `&mut Option<T>`) to `Option<&mut T::Target>`.
///
/// Leaves the original `Option` in-place, creating a new one containing a mutable reference to
/// the inner type's `Deref::Target` type.
pub fn as_deref_mut(&mut self) -> Option<&mut T::Target> {
self.as_mut().map(|t| t.deref_mut())
}
}
impl<T, E> Option<Result<T, E>> {
/// Transposes an `Option` of a [`Result`] into a [`Result`] of an `Option`.
///
/// [`None`] will be mapped to [`Ok`]`(`[`None`]`)`.
/// [`Some`]`(`[`Ok`]`(_))` and [`Some`]`(`[`Err`]`(_))` will be mapped to
/// [`Ok`]`(`[`Some`]`(_))` and [`Err`]`(_)`.
///
/// [`None`]: #variant.None
/// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Some`]: #variant.Some
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// ```
/// #[derive(Debug, Eq, PartialEq)]
/// struct SomeErr;
///
/// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
/// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
/// assert_eq!(x, y.transpose());
/// ```
#[inline]
#[stable(feature = "transpose_result", since = "1.33.0")]
pub fn transpose(self) -> Result<Option<T>, E> {
match self {
Some(Ok(x)) => Ok(Some(x)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
}
// This is a separate function to reduce the code size of .expect() itself.
#[inline(never)]
#[cold]
fn expect_failed(msg: &str) -> ! {
panic!("{}", msg)
}
// This is a separate function to reduce the code size of .expect_none() itself.
#[inline(never)]
#[cold]
fn expect_none_failed(msg: &str, value: &dyn fmt::Debug) -> ! {
panic!("{}: {:?}", msg, value)
}
/////////////////////////////////////////////////////////////////////////////
// Trait implementations
/////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Option<T> {
#[inline]
fn clone(&self) -> Self {
match self {
Some(x) => Some(x.clone()),
None => None,
}
}
#[inline]
fn clone_from(&mut self, source: &Self) {
match (self, source) {
(Some(to), Some(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Option<T> {
/// Returns [`None`][Option::None].
#[inline]
fn default() -> Option<T> { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for Option<T> {
type Item = T;
type IntoIter = IntoIter<T>;
/// Returns a consuming iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let x = Some("string");
/// let v: Vec<&str> = x.into_iter().collect();
/// assert_eq!(v, ["string"]);
///
/// let x = None;
/// let v: Vec<&str> = x.into_iter().collect();
/// assert!(v.is_empty());
/// ```
#[inline]
fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: Item { opt: self } }
}
}
#[stable(since = "1.4.0", feature = "option_iter")]
impl<'a, T> IntoIterator for &'a Option<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(since = "1.4.0", feature = "option_iter")]
impl<'a, T> IntoIterator for &'a mut Option<T> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(since = "1.12.0", feature = "option_from")]
impl<T> From<T> for Option<T> {
fn from(val: T) -> Option<T> {
Some(val)
}
}
#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
impl<'a, T> From<&'a Option<T>> for Option<&'a T> {
fn from(o: &'a Option<T>) -> Option<&'a T> {
o.as_ref()
}
}
#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
impl<'a, T> From<&'a mut Option<T>> for Option<&'a mut T> {
fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
o.as_mut()
}
}
/////////////////////////////////////////////////////////////////////////////
// The Option Iterators
/////////////////////////////////////////////////////////////////////////////
#[derive(Clone, Debug)]
struct Item<A> {
opt: Option<A>
}
impl<A> Iterator for Item<A> {
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
self.opt.take()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match self.opt {
Some(_) => (1, Some(1)),
None => (0, Some(0)),
}
}
}
impl<A> DoubleEndedIterator for Item<A> {
#[inline]
fn next_back(&mut self) -> Option<A> {
self.opt.take()
}
}
impl<A> ExactSizeIterator for Item<A> {}
impl<A> FusedIterator for Item<A> {}
unsafe impl<A> TrustedLen for Item<A> {}
/// An iterator over a reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::iter`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::iter`]: enum.Option.html#method.iter
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Iterator for Iter<'a, A> {
type Item = &'a A;
#[inline]
fn next(&mut self) -> Option<&'a A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> DoubleEndedIterator for Iter<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for Iter<'_, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for Iter<'_, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for Iter<'_, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Clone for Iter<'_, A> {
#[inline]
fn clone(&self) -> Self {
Iter { inner: self.inner.clone() }
}
}
/// An iterator over a mutable reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::iter_mut`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::iter_mut`]: enum.Option.html#method.iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IterMut<'a, A: 'a> { inner: Item<&'a mut A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Iterator for IterMut<'a, A> {
type Item = &'a mut A;
#[inline]
fn next(&mut self) -> Option<&'a mut A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> DoubleEndedIterator for IterMut<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for IterMut<'_, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for IterMut<'_, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for IterMut<'_, A> {}
/// An iterator over the value in [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::into_iter`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::into_iter`]: enum.Option.html#method.into_iter
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<A> { inner: Item<A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Iterator for IntoIter<A> {
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> DoubleEndedIterator for IntoIter<A> {
#[inline]
fn next_back(&mut self) -> Option<A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for IntoIter<A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for IntoIter<A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for IntoIter<A> {}
/////////////////////////////////////////////////////////////////////////////
// FromIterator
/////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, V: FromIterator<A>> FromIterator<Option<A>> for Option<V> {
/// Takes each element in the [`Iterator`]: if it is [`None`][Option::None],
/// no further elements are taken, and the [`None`][Option::None] is
/// returned. Should no [`None`][Option::None] occur, a container with the
/// values of each [`Option`] is returned.
///
/// # Examples
///
/// Here is an example which increments every integer in a vector.
/// We use the checked variant of `add` that returns `None` when the
/// calculation would result in an overflow.
///
/// ```
/// let items = vec![0_u16, 1, 2];
///
/// let res: Option<Vec<u16>> = items
/// .iter()
/// .map(|x| x.checked_add(1))
/// .collect();
///
/// assert_eq!(res, Some(vec![1, 2, 3]));
/// ```
///
/// As you can see, this will return the expected, valid items.
///
/// Here is another example that tries to subtract one from another list
/// of integers, this time checking for underflow:
///
/// ```
/// let items = vec![2_u16, 1, 0];
///
/// let res: Option<Vec<u16>> = items
/// .iter()
/// .map(|x| x.checked_sub(1))
/// .collect();
///
/// assert_eq!(res, None);
/// ```
///
/// Since the last element is zero, it would underflow. Thus, the resulting
/// value is `None`.
///
/// Here is a variation on the previous example, showing that no
/// further elements are taken from `iter` after the first `None`.
///
/// ```
/// let items = vec![3_u16, 2, 1, 10];
///
/// let mut shared = 0;
///
/// let res: Option<Vec<u16>> = items
/// .iter()
/// .map(|x| { shared += x; x.checked_sub(2) })
/// .collect();
///
/// assert_eq!(res, None);
/// assert_eq!(shared, 6);
/// ```
///
/// Since the third element caused an underflow, no further elements were taken,
/// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
///
/// [`Iterator`]: ../iter/trait.Iterator.html
#[inline]
fn from_iter<I: IntoIterator<Item=Option<A>>>(iter: I) -> Option<V> {
// FIXME(#11084): This could be replaced with Iterator::scan when this
// performance bug is closed.
OptionShunt::process(iter.into_iter(), |i| i.collect())
}
}
/// The error type that results from applying the try operator (`?`) to a `None` value. If you wish
/// to allow `x?` (where `x` is an `Option<T>`) to be converted into your error type, you can
/// implement `impl From<NoneError>` for `YourErrorType`. In that case, `x?` within a function that
/// returns `Result<_, YourErrorType>` will translate a `None` value into an `Err` result.
#[unstable(feature = "try_trait", issue = "42327")]
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
pub struct NoneError;
#[unstable(feature = "try_trait", issue = "42327")]
impl<T> ops::Try for Option<T> {
type Ok = T;
type Error = NoneError;
#[inline]
fn into_result(self) -> Result<T, NoneError> {
self.ok_or(NoneError)
}
#[inline]
fn from_ok(v: T) -> Self {
Some(v)
}
#[inline]
fn from_error(_: NoneError) -> Self {
None
}
}
impl<T> Option<Option<T>> {
/// Converts from `Option<Option<T>>` to `Option<T>`
///
/// # Examples
/// Basic usage:
/// ```
/// #![feature(option_flattening)]
/// let x: Option<Option<u32>> = Some(Some(6));
/// assert_eq!(Some(6), x.flatten());
///
/// let x: Option<Option<u32>> = Some(None);
/// assert_eq!(None, x.flatten());
///
/// let x: Option<Option<u32>> = None;
/// assert_eq!(None, x.flatten());
/// ```
/// Flattening once only removes one level of nesting:
/// ```
/// #![feature(option_flattening)]
/// let x: Option<Option<Option<u32>>> = Some(Some(Some(6)));
/// assert_eq!(Some(Some(6)), x.flatten());
/// assert_eq!(Some(6), x.flatten().flatten());
/// ```
#[inline]
#[unstable(feature = "option_flattening", issue = "60258")]
pub fn flatten(self) -> Option<T> {
self.and_then(convert::identity)
}
}
Add links to None in Option doc
//! Optional values.
//!
//! Type [`Option`] represents an optional value: every [`Option`]
//! is either [`Some`] and contains a value, or [`None`], and
//! does not. [`Option`] types are very common in Rust code, as
//! they have a number of uses:
//!
//! * Initial values
//! * Return values for functions that are not defined
//! over their entire input range (partial functions)
//! * Return value for otherwise reporting simple errors, where [`None`] is
//! returned on error
//! * Optional struct fields
//! * Struct fields that can be loaned or "taken"
//! * Optional function arguments
//! * Nullable pointers
//! * Swapping things out of difficult situations
//!
//! [`Option`]s are commonly paired with pattern matching to query the presence
//! of a value and take action, always accounting for the [`None`] case.
//!
//! ```
//! fn divide(numerator: f64, denominator: f64) -> Option<f64> {
//! if denominator == 0.0 {
//! None
//! } else {
//! Some(numerator / denominator)
//! }
//! }
//!
//! // The return value of the function is an option
//! let result = divide(2.0, 3.0);
//!
//! // Pattern match to retrieve the value
//! match result {
//! // The division was valid
//! Some(x) => println!("Result: {}", x),
//! // The division was invalid
//! None => println!("Cannot divide by 0"),
//! }
//! ```
//!
//
// FIXME: Show how `Option` is used in practice, with lots of methods
//
//! # Options and pointers ("nullable" pointers)
//!
//! Rust's pointer types must always point to a valid location; there are
//! no "null" pointers. Instead, Rust has *optional* pointers, like
//! the optional owned box, [`Option`]`<`[`Box<T>`]`>`.
//!
//! The following example uses [`Option`] to create an optional box of
//! [`i32`]. Notice that in order to use the inner [`i32`] value first, the
//! `check_optional` function needs to use pattern matching to
//! determine whether the box has a value (i.e., it is [`Some(...)`][`Some`]) or
//! not ([`None`]).
//!
//! ```
//! let optional = None;
//! check_optional(optional);
//!
//! let optional = Some(Box::new(9000));
//! check_optional(optional);
//!
//! fn check_optional(optional: Option<Box<i32>>) {
//! match optional {
//! Some(ref p) => println!("has value {}", p),
//! None => println!("has no value"),
//! }
//! }
//! ```
//!
//! This usage of [`Option`] to create safe nullable pointers is so
//! common that Rust does special optimizations to make the
//! representation of [`Option`]`<`[`Box<T>`]`>` a single pointer. Optional pointers
//! in Rust are stored as efficiently as any other pointer type.
//!
//! # Examples
//!
//! Basic pattern matching on [`Option`]:
//!
//! ```
//! let msg = Some("howdy");
//!
//! // Take a reference to the contained string
//! if let Some(ref m) = msg {
//! println!("{}", *m);
//! }
//!
//! // Remove the contained string, destroying the Option
//! let unwrapped_msg = msg.unwrap_or("default message");
//! ```
//!
//! Initialize a result to [`None`] before a loop:
//!
//! ```
//! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) }
//!
//! // A list of data to search through.
//! let all_the_big_things = [
//! Kingdom::Plant(250, "redwood"),
//! Kingdom::Plant(230, "noble fir"),
//! Kingdom::Plant(229, "sugar pine"),
//! Kingdom::Animal(25, "blue whale"),
//! Kingdom::Animal(19, "fin whale"),
//! Kingdom::Animal(15, "north pacific right whale"),
//! ];
//!
//! // We're going to search for the name of the biggest animal,
//! // but to start with we've just got `None`.
//! let mut name_of_biggest_animal = None;
//! let mut size_of_biggest_animal = 0;
//! for big_thing in &all_the_big_things {
//! match *big_thing {
//! Kingdom::Animal(size, name) if size > size_of_biggest_animal => {
//! // Now we've found the name of some big animal
//! size_of_biggest_animal = size;
//! name_of_biggest_animal = Some(name);
//! }
//! Kingdom::Animal(..) | Kingdom::Plant(..) => ()
//! }
//! }
//!
//! match name_of_biggest_animal {
//! Some(name) => println!("the biggest animal is {}", name),
//! None => println!("there are no animals :("),
//! }
//! ```
//!
//! [`Option`]: enum.Option.html
//! [`Some`]: enum.Option.html#variant.Some
//! [`None`]: enum.Option.html#variant.None
//! [`Box<T>`]: ../../std/boxed/struct.Box.html
//! [`i32`]: ../../std/primitive.i32.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::iter::{FromIterator, FusedIterator, TrustedLen, OptionShunt};
use crate::{convert, fmt, hint, mem, ops::{self, Deref, DerefMut}};
use crate::pin::Pin;
// Note that this is not a lang item per se, but it has a hidden dependency on
// `Iterator`, which is one. The compiler assumes that the `next` method of
// `Iterator` is an enumeration with one type parameter and two variants,
// which basically means it must be `Option`.
/// The `Option` type. See [the module level documentation](index.html) for more.
#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
/// No value
#[stable(feature = "rust1", since = "1.0.0")]
None,
/// Some value `T`
#[stable(feature = "rust1", since = "1.0.0")]
Some(#[stable(feature = "rust1", since = "1.0.0")] T),
}
/////////////////////////////////////////////////////////////////////////////
// Type implementation
/////////////////////////////////////////////////////////////////////////////
impl<T> Option<T> {
/////////////////////////////////////////////////////////////////////////
// Querying the contained values
/////////////////////////////////////////////////////////////////////////
/// Returns `true` if the option is a [`Some`] value.
///
/// # Examples
///
/// ```
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_some(), true);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.is_some(), false);
/// ```
///
/// [`Some`]: #variant.Some
#[must_use = "if you intended to assert that this has a value, consider `.unwrap()` instead"]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_some(&self) -> bool {
match *self {
Some(_) => true,
None => false,
}
}
/// Returns `true` if the option is a [`None`] value.
///
/// # Examples
///
/// ```
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_none(), false);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.is_none(), true);
/// ```
///
/// [`None`]: #variant.None
#[must_use = "if you intended to assert that this doesn't have a value, consider \
`.and_then(|| panic!(\"`Option` had a value when expected `None`\"))` instead"]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_none(&self) -> bool {
!self.is_some()
}
/// Returns `true` if the option is a [`Some`] value containing the given value.
///
/// # Examples
///
/// ```
/// #![feature(option_result_contains)]
///
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.contains(&2), true);
///
/// let x: Option<u32> = Some(3);
/// assert_eq!(x.contains(&2), false);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.contains(&2), false);
/// ```
#[must_use]
#[inline]
#[unstable(feature = "option_result_contains", issue = "62358")]
pub fn contains<U>(&self, x: &U) -> bool where U: PartialEq<T> {
match self {
Some(y) => x == y,
None => false,
}
}
/////////////////////////////////////////////////////////////////////////
// Adapter for working with references
/////////////////////////////////////////////////////////////////////////
/// Converts from `&Option<T>` to `Option<&T>`.
///
/// # Examples
///
/// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original.
/// The [`map`] method takes the `self` argument by value, consuming the original,
/// so this technique uses `as_ref` to first take an `Option` to a reference
/// to the value inside the original.
///
/// [`map`]: enum.Option.html#method.map
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let text: Option<String> = Some("Hello, world!".to_string());
/// // First, cast `Option<String>` to `Option<&String>` with `as_ref`,
/// // then consume *that* with `map`, leaving `text` on the stack.
/// let text_length: Option<usize> = text.as_ref().map(|s| s.len());
/// println!("still can print text: {:?}", text);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_ref(&self) -> Option<&T> {
match *self {
Some(ref x) => Some(x),
None => None,
}
}
/// Converts from `&mut Option<T>` to `Option<&mut T>`.
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// match x.as_mut() {
/// Some(v) => *v = 42,
/// None => {},
/// }
/// assert_eq!(x, Some(42));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_mut(&mut self) -> Option<&mut T> {
match *self {
Some(ref mut x) => Some(x),
None => None,
}
}
/// Converts from [`Pin`]`<&Option<T>>` to `Option<`[`Pin`]`<&T>>`.
///
/// [`Pin`]: ../pin/struct.Pin.html
#[inline]
#[stable(feature = "pin", since = "1.33.0")]
pub fn as_pin_ref<'a>(self: Pin<&'a Option<T>>) -> Option<Pin<&'a T>> {
unsafe {
Pin::get_ref(self).as_ref().map(|x| Pin::new_unchecked(x))
}
}
/// Converts from [`Pin`]`<&mut Option<T>>` to `Option<`[`Pin`]`<&mut T>>`.
///
/// [`Pin`]: ../pin/struct.Pin.html
#[inline]
#[stable(feature = "pin", since = "1.33.0")]
pub fn as_pin_mut<'a>(self: Pin<&'a mut Option<T>>) -> Option<Pin<&'a mut T>> {
unsafe {
Pin::get_unchecked_mut(self).as_mut().map(|x| Pin::new_unchecked(x))
}
}
/////////////////////////////////////////////////////////////////////////
// Getting to contained values
/////////////////////////////////////////////////////////////////////////
/// Unwraps an option, yielding the content of a [`Some`].
///
/// # Panics
///
/// Panics if the value is a [`None`] with a custom panic message provided by
/// `msg`.
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some("value");
/// assert_eq!(x.expect("the world is ending"), "value");
/// ```
///
/// ```{.should_panic}
/// let x: Option<&str> = None;
/// x.expect("the world is ending"); // panics with `the world is ending`
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn expect(self, msg: &str) -> T {
match self {
Some(val) => val,
None => expect_failed(msg),
}
}
/// Moves the value `v` out of the `Option<T>` if it is [`Some(v)`].
///
/// In general, because this function may panic, its use is discouraged.
/// Instead, prefer to use pattern matching and handle the [`None`]
/// case explicitly.
///
/// # Panics
///
/// Panics if the self value equals [`None`].
///
/// [`Some(v)`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some("air");
/// assert_eq!(x.unwrap(), "air");
/// ```
///
/// ```{.should_panic}
/// let x: Option<&str> = None;
/// assert_eq!(x.unwrap(), "air"); // fails
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap(self) -> T {
match self {
Some(val) => val,
None => panic!("called `Option::unwrap()` on a `None` value"),
}
}
/// Returns the contained value or a default.
///
/// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
/// the result of a function call, it is recommended to use [`unwrap_or_else`],
/// which is lazily evaluated.
///
/// [`unwrap_or_else`]: #method.unwrap_or_else
///
/// # Examples
///
/// ```
/// assert_eq!(Some("car").unwrap_or("bike"), "car");
/// assert_eq!(None.unwrap_or("bike"), "bike");
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or(self, def: T) -> T {
match self {
Some(x) => x,
None => def,
}
}
/// Returns the contained value or computes it from a closure.
///
/// # Examples
///
/// ```
/// let k = 10;
/// assert_eq!(Some(4).unwrap_or_else(|| 2 * k), 4);
/// assert_eq!(None.unwrap_or_else(|| 2 * k), 20);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
match self {
Some(x) => x,
None => f(),
}
}
/////////////////////////////////////////////////////////////////////////
// Transforming contained values
/////////////////////////////////////////////////////////////////////////
/// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value.
///
/// # Examples
///
/// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, consuming the original:
///
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let maybe_some_string = Some(String::from("Hello, World!"));
/// // `Option::map` takes self *by value*, consuming `maybe_some_string`
/// let maybe_some_len = maybe_some_string.map(|s| s.len());
///
/// assert_eq!(maybe_some_len, Some(13));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Option<U> {
match self {
Some(x) => Some(f(x)),
None => None,
}
}
/// Applies a function to the contained value (if any),
/// or returns the provided default (if not).
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.map_or(42, |v| v.len()), 3);
///
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or(42, |v| v.len()), 42);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
match self {
Some(t) => f(t),
None => default,
}
}
/// Applies a function to the contained value (if any),
/// or computes a default (if not).
///
/// # Examples
///
/// ```
/// let k = 21;
///
/// let x = Some("foo");
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 3);
///
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or_else<U, D: FnOnce() -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
match self {
Some(t) => f(t),
None => default(),
}
}
/// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
/// [`Ok(v)`] and [`None`] to [`Err(err)`].
///
/// Arguments passed to `ok_or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`ok_or_else`], which is
/// lazily evaluated.
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err)`]: ../../std/result/enum.Result.html#variant.Err
/// [`None`]: #variant.None
/// [`Some(v)`]: #variant.Some
/// [`ok_or_else`]: #method.ok_or_else
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.ok_or(0), Ok("foo"));
///
/// let x: Option<&str> = None;
/// assert_eq!(x.ok_or(0), Err(0));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ok_or<E>(self, err: E) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err),
}
}
/// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
/// [`Ok(v)`] and [`None`] to [`Err(err())`].
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err())`]: ../../std/result/enum.Result.html#variant.Err
/// [`None`]: #variant.None
/// [`Some(v)`]: #variant.Some
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.ok_or_else(|| 0), Ok("foo"));
///
/// let x: Option<&str> = None;
/// assert_eq!(x.ok_or_else(|| 0), Err(0));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ok_or_else<E, F: FnOnce() -> E>(self, err: F) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err()),
}
}
/////////////////////////////////////////////////////////////////////////
// Iterator constructors
/////////////////////////////////////////////////////////////////////////
/// Returns an iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let x = Some(4);
/// assert_eq!(x.iter().next(), Some(&4));
///
/// let x: Option<u32> = None;
/// assert_eq!(x.iter().next(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<'_, T> {
Iter { inner: Item { opt: self.as_ref() } }
}
/// Returns a mutable iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let mut x = Some(4);
/// match x.iter_mut().next() {
/// Some(v) => *v = 42,
/// None => {},
/// }
/// assert_eq!(x, Some(42));
///
/// let mut x: Option<u32> = None;
/// assert_eq!(x.iter_mut().next(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut { inner: Item { opt: self.as_mut() } }
}
/////////////////////////////////////////////////////////////////////////
// Boolean operations on the values, eager and lazy
/////////////////////////////////////////////////////////////////////////
/// Returns [`None`] if the option is [`None`], otherwise returns `optb`.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y: Option<&str> = None;
/// assert_eq!(x.and(y), None);
///
/// let x: Option<u32> = None;
/// let y = Some("foo");
/// assert_eq!(x.and(y), None);
///
/// let x = Some(2);
/// let y = Some("foo");
/// assert_eq!(x.and(y), Some("foo"));
///
/// let x: Option<u32> = None;
/// let y: Option<&str> = None;
/// assert_eq!(x.and(y), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn and<U>(self, optb: Option<U>) -> Option<U> {
match self {
Some(_) => optb,
None => None,
}
}
/// Returns [`None`] if the option is [`None`], otherwise calls `f` with the
/// wrapped value and returns the result.
///
/// Some languages call this operation flatmap.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// fn sq(x: u32) -> Option<u32> { Some(x * x) }
/// fn nope(_: u32) -> Option<u32> { None }
///
/// assert_eq!(Some(2).and_then(sq).and_then(sq), Some(16));
/// assert_eq!(Some(2).and_then(sq).and_then(nope), None);
/// assert_eq!(Some(2).and_then(nope).and_then(sq), None);
/// assert_eq!(None.and_then(sq).and_then(sq), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn and_then<U, F: FnOnce(T) -> Option<U>>(self, f: F) -> Option<U> {
match self {
Some(x) => f(x),
None => None,
}
}
/// Returns [`None`] if the option is [`None`], otherwise calls `predicate`
/// with the wrapped value and returns:
///
/// - [`Some(t)`] if `predicate` returns `true` (where `t` is the wrapped
/// value), and
/// - [`None`] if `predicate` returns `false`.
///
/// This function works similar to [`Iterator::filter()`]. You can imagine
/// the `Option<T>` being an iterator over one or zero elements. `filter()`
/// lets you decide which elements to keep.
///
/// # Examples
///
/// ```rust
/// fn is_even(n: &i32) -> bool {
/// n % 2 == 0
/// }
///
/// assert_eq!(None.filter(is_even), None);
/// assert_eq!(Some(3).filter(is_even), None);
/// assert_eq!(Some(4).filter(is_even), Some(4));
/// ```
///
/// [`None`]: #variant.None
/// [`Some(t)`]: #variant.Some
/// [`Iterator::filter()`]: ../../std/iter/trait.Iterator.html#method.filter
#[inline]
#[stable(feature = "option_filter", since = "1.27.0")]
pub fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
if let Some(x) = self {
if predicate(&x) {
return Some(x)
}
}
None
}
/// Returns the option if it contains a value, otherwise returns `optb`.
///
/// Arguments passed to `or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`or_else`], which is
/// lazily evaluated.
///
/// [`or_else`]: #method.or_else
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y = None;
/// assert_eq!(x.or(y), Some(2));
///
/// let x = None;
/// let y = Some(100);
/// assert_eq!(x.or(y), Some(100));
///
/// let x = Some(2);
/// let y = Some(100);
/// assert_eq!(x.or(y), Some(2));
///
/// let x: Option<u32> = None;
/// let y = None;
/// assert_eq!(x.or(y), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or(self, optb: Option<T>) -> Option<T> {
match self {
Some(_) => self,
None => optb,
}
}
/// Returns the option if it contains a value, otherwise calls `f` and
/// returns the result.
///
/// # Examples
///
/// ```
/// fn nobody() -> Option<&'static str> { None }
/// fn vikings() -> Option<&'static str> { Some("vikings") }
///
/// assert_eq!(Some("barbarians").or_else(vikings), Some("barbarians"));
/// assert_eq!(None.or_else(vikings), Some("vikings"));
/// assert_eq!(None.or_else(nobody), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_else<F: FnOnce() -> Option<T>>(self, f: F) -> Option<T> {
match self {
Some(_) => self,
None => f(),
}
}
/// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns [`None`].
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y: Option<u32> = None;
/// assert_eq!(x.xor(y), Some(2));
///
/// let x: Option<u32> = None;
/// let y = Some(2);
/// assert_eq!(x.xor(y), Some(2));
///
/// let x = Some(2);
/// let y = Some(2);
/// assert_eq!(x.xor(y), None);
///
/// let x: Option<u32> = None;
/// let y: Option<u32> = None;
/// assert_eq!(x.xor(y), None);
/// ```
#[inline]
#[stable(feature = "option_xor", since = "1.37.0")]
pub fn xor(self, optb: Option<T>) -> Option<T> {
match (self, optb) {
(Some(a), None) => Some(a),
(None, Some(b)) => Some(b),
_ => None,
}
}
/////////////////////////////////////////////////////////////////////////
// Entry-like operations to insert if None and return a reference
/////////////////////////////////////////////////////////////////////////
/// Inserts `v` into the option if it is [`None`], then
/// returns a mutable reference to the contained value.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = None;
///
/// {
/// let y: &mut u32 = x.get_or_insert(5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, Some(7));
/// ```
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
pub fn get_or_insert(&mut self, v: T) -> &mut T {
self.get_or_insert_with(|| v)
}
/// Inserts a value computed from `f` into the option if it is [`None`], then
/// returns a mutable reference to the contained value.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = None;
///
/// {
/// let y: &mut u32 = x.get_or_insert_with(|| 5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, Some(7));
/// ```
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
pub fn get_or_insert_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
match *self {
None => *self = Some(f()),
_ => (),
}
match *self {
Some(ref mut v) => v,
None => unsafe { hint::unreachable_unchecked() },
}
}
/////////////////////////////////////////////////////////////////////////
// Misc
/////////////////////////////////////////////////////////////////////////
/// Takes the value out of the option, leaving a [`None`] in its place.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// let y = x.take();
/// assert_eq!(x, None);
/// assert_eq!(y, Some(2));
///
/// let mut x: Option<u32> = None;
/// let y = x.take();
/// assert_eq!(x, None);
/// assert_eq!(y, None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn take(&mut self) -> Option<T> {
mem::take(self)
}
/// Replaces the actual value in the option by the value given in parameter,
/// returning the old value if present,
/// leaving a [`Some`] in its place without deinitializing either one.
///
/// [`Some`]: #variant.Some
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// let old = x.replace(5);
/// assert_eq!(x, Some(5));
/// assert_eq!(old, Some(2));
///
/// let mut x = None;
/// let old = x.replace(3);
/// assert_eq!(x, Some(3));
/// assert_eq!(old, None);
/// ```
#[inline]
#[stable(feature = "option_replace", since = "1.31.0")]
pub fn replace(&mut self, value: T) -> Option<T> {
mem::replace(self, Some(value))
}
}
impl<T: Copy> Option<&T> {
/// Maps an `Option<&T>` to an `Option<T>` by copying the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let x = 12;
/// let opt_x = Some(&x);
/// assert_eq!(opt_x, Some(&12));
/// let copied = opt_x.copied();
/// assert_eq!(copied, Some(12));
/// ```
#[stable(feature = "copied", since = "1.35.0")]
pub fn copied(self) -> Option<T> {
self.map(|&t| t)
}
}
impl<T: Copy> Option<&mut T> {
/// Maps an `Option<&mut T>` to an `Option<T>` by copying the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let mut x = 12;
/// let opt_x = Some(&mut x);
/// assert_eq!(opt_x, Some(&mut 12));
/// let copied = opt_x.copied();
/// assert_eq!(copied, Some(12));
/// ```
#[stable(feature = "copied", since = "1.35.0")]
pub fn copied(self) -> Option<T> {
self.map(|&mut t| t)
}
}
impl<T: Clone> Option<&T> {
/// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let x = 12;
/// let opt_x = Some(&x);
/// assert_eq!(opt_x, Some(&12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
}
}
impl<T: Clone> Option<&mut T> {
/// Maps an `Option<&mut T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let mut x = 12;
/// let opt_x = Some(&mut x);
/// assert_eq!(opt_x, Some(&mut 12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(since = "1.26.0", feature = "option_ref_mut_cloned")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
}
}
impl<T: fmt::Debug> Option<T> {
/// Unwraps an option, expecting [`None`] and returning nothing.
///
/// # Panics
///
/// Panics if the value is a [`Some`], with a panic message including the
/// passed message, and the content of the [`Some`].
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// #![feature(option_expect_none)]
///
/// use std::collections::HashMap;
/// let mut squares = HashMap::new();
/// for i in -10..=10 {
/// // This will not panic, since all keys are unique.
/// squares.insert(i, i * i).expect_none("duplicate key");
/// }
/// ```
///
/// ```{.should_panic}
/// #![feature(option_expect_none)]
///
/// use std::collections::HashMap;
/// let mut sqrts = HashMap::new();
/// for i in -10..=10 {
/// // This will panic, since both negative and positive `i` will
/// // insert the same `i * i` key, returning the old `Some(i)`.
/// sqrts.insert(i * i, i).expect_none("duplicate key");
/// }
/// ```
#[inline]
#[unstable(feature = "option_expect_none", reason = "newly added", issue = "62633")]
pub fn expect_none(self, msg: &str) {
if let Some(val) = self {
expect_none_failed(msg, &val);
}
}
/// Unwraps an option, expecting [`None`] and returning nothing.
///
/// # Panics
///
/// Panics if the value is a [`Some`], with a custom panic message provided
/// by the [`Some`]'s value.
///
/// [`Some(v)`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// #![feature(option_unwrap_none)]
///
/// use std::collections::HashMap;
/// let mut squares = HashMap::new();
/// for i in -10..=10 {
/// // This will not panic, since all keys are unique.
/// squares.insert(i, i * i).unwrap_none();
/// }
/// ```
///
/// ```{.should_panic}
/// #![feature(option_unwrap_none)]
///
/// use std::collections::HashMap;
/// let mut sqrts = HashMap::new();
/// for i in -10..=10 {
/// // This will panic, since both negative and positive `i` will
/// // insert the same `i * i` key, returning the old `Some(i)`.
/// sqrts.insert(i * i, i).unwrap_none();
/// }
/// ```
#[inline]
#[unstable(feature = "option_unwrap_none", reason = "newly added", issue = "62633")]
pub fn unwrap_none(self) {
if let Some(val) = self {
expect_none_failed("called `Option::unwrap_none()` on a `Some` value", &val);
}
}
}
impl<T: Default> Option<T> {
/// Returns the contained value or a default
///
/// Consumes the `self` argument then, if [`Some`], returns the contained
/// value, otherwise if [`None`], returns the [default value] for that
/// type.
///
/// # Examples
///
/// Converts a string to an integer, turning poorly-formed strings
/// into 0 (the default value for integers). [`parse`] converts
/// a string to any other type that implements [`FromStr`], returning
/// [`None`] on error.
///
/// ```
/// let good_year_from_input = "1909";
/// let bad_year_from_input = "190blarg";
/// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
/// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
///
/// assert_eq!(1909, good_year);
/// assert_eq!(0, bad_year);
/// ```
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
/// [default value]: ../default/trait.Default.html#tymethod.default
/// [`parse`]: ../../std/primitive.str.html#method.parse
/// [`FromStr`]: ../../std/str/trait.FromStr.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_default(self) -> T {
match self {
Some(x) => x,
None => Default::default(),
}
}
}
#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")]
impl<T: Deref> Option<T> {
/// Converts from `Option<T>` (or `&Option<T>`) to `Option<&T::Target>`.
///
/// Leaves the original Option in-place, creating a new one with a reference
/// to the original one, additionally coercing the contents via [`Deref`].
///
/// [`Deref`]: ../../std/ops/trait.Deref.html
pub fn as_deref(&self) -> Option<&T::Target> {
self.as_ref().map(|t| t.deref())
}
}
#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")]
impl<T: DerefMut> Option<T> {
/// Converts from `Option<T>` (or `&mut Option<T>`) to `Option<&mut T::Target>`.
///
/// Leaves the original `Option` in-place, creating a new one containing a mutable reference to
/// the inner type's `Deref::Target` type.
pub fn as_deref_mut(&mut self) -> Option<&mut T::Target> {
self.as_mut().map(|t| t.deref_mut())
}
}
impl<T, E> Option<Result<T, E>> {
/// Transposes an `Option` of a [`Result`] into a [`Result`] of an `Option`.
///
/// [`None`] will be mapped to [`Ok`]`(`[`None`]`)`.
/// [`Some`]`(`[`Ok`]`(_))` and [`Some`]`(`[`Err`]`(_))` will be mapped to
/// [`Ok`]`(`[`Some`]`(_))` and [`Err`]`(_)`.
///
/// [`None`]: #variant.None
/// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Some`]: #variant.Some
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// ```
/// #[derive(Debug, Eq, PartialEq)]
/// struct SomeErr;
///
/// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
/// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
/// assert_eq!(x, y.transpose());
/// ```
#[inline]
#[stable(feature = "transpose_result", since = "1.33.0")]
pub fn transpose(self) -> Result<Option<T>, E> {
match self {
Some(Ok(x)) => Ok(Some(x)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
}
// This is a separate function to reduce the code size of .expect() itself.
#[inline(never)]
#[cold]
fn expect_failed(msg: &str) -> ! {
panic!("{}", msg)
}
// This is a separate function to reduce the code size of .expect_none() itself.
#[inline(never)]
#[cold]
fn expect_none_failed(msg: &str, value: &dyn fmt::Debug) -> ! {
panic!("{}: {:?}", msg, value)
}
/////////////////////////////////////////////////////////////////////////////
// Trait implementations
/////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for Option<T> {
#[inline]
fn clone(&self) -> Self {
match self {
Some(x) => Some(x.clone()),
None => None,
}
}
#[inline]
fn clone_from(&mut self, source: &Self) {
match (self, source) {
(Some(to), Some(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Option<T> {
/// Returns [`None`][Option::None].
#[inline]
fn default() -> Option<T> { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for Option<T> {
type Item = T;
type IntoIter = IntoIter<T>;
/// Returns a consuming iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let x = Some("string");
/// let v: Vec<&str> = x.into_iter().collect();
/// assert_eq!(v, ["string"]);
///
/// let x = None;
/// let v: Vec<&str> = x.into_iter().collect();
/// assert!(v.is_empty());
/// ```
#[inline]
fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: Item { opt: self } }
}
}
#[stable(since = "1.4.0", feature = "option_iter")]
impl<'a, T> IntoIterator for &'a Option<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(since = "1.4.0", feature = "option_iter")]
impl<'a, T> IntoIterator for &'a mut Option<T> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(since = "1.12.0", feature = "option_from")]
impl<T> From<T> for Option<T> {
fn from(val: T) -> Option<T> {
Some(val)
}
}
#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
impl<'a, T> From<&'a Option<T>> for Option<&'a T> {
fn from(o: &'a Option<T>) -> Option<&'a T> {
o.as_ref()
}
}
#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
impl<'a, T> From<&'a mut Option<T>> for Option<&'a mut T> {
fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
o.as_mut()
}
}
/////////////////////////////////////////////////////////////////////////////
// The Option Iterators
/////////////////////////////////////////////////////////////////////////////
#[derive(Clone, Debug)]
struct Item<A> {
opt: Option<A>
}
impl<A> Iterator for Item<A> {
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
self.opt.take()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match self.opt {
Some(_) => (1, Some(1)),
None => (0, Some(0)),
}
}
}
impl<A> DoubleEndedIterator for Item<A> {
#[inline]
fn next_back(&mut self) -> Option<A> {
self.opt.take()
}
}
impl<A> ExactSizeIterator for Item<A> {}
impl<A> FusedIterator for Item<A> {}
unsafe impl<A> TrustedLen for Item<A> {}
/// An iterator over a reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::iter`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::iter`]: enum.Option.html#method.iter
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Iterator for Iter<'a, A> {
type Item = &'a A;
#[inline]
fn next(&mut self) -> Option<&'a A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> DoubleEndedIterator for Iter<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for Iter<'_, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for Iter<'_, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for Iter<'_, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Clone for Iter<'_, A> {
#[inline]
fn clone(&self) -> Self {
Iter { inner: self.inner.clone() }
}
}
/// An iterator over a mutable reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::iter_mut`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::iter_mut`]: enum.Option.html#method.iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IterMut<'a, A: 'a> { inner: Item<&'a mut A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Iterator for IterMut<'a, A> {
type Item = &'a mut A;
#[inline]
fn next(&mut self) -> Option<&'a mut A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> DoubleEndedIterator for IterMut<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for IterMut<'_, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for IterMut<'_, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for IterMut<'_, A> {}
/// An iterator over the value in [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::into_iter`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::into_iter`]: enum.Option.html#method.into_iter
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<A> { inner: Item<A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Iterator for IntoIter<A> {
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> DoubleEndedIterator for IntoIter<A> {
#[inline]
fn next_back(&mut self) -> Option<A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for IntoIter<A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for IntoIter<A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for IntoIter<A> {}
/////////////////////////////////////////////////////////////////////////////
// FromIterator
/////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, V: FromIterator<A>> FromIterator<Option<A>> for Option<V> {
/// Takes each element in the [`Iterator`]: if it is [`None`][Option::None],
/// no further elements are taken, and the [`None`][Option::None] is
/// returned. Should no [`None`][Option::None] occur, a container with the
/// values of each [`Option`] is returned.
///
/// # Examples
///
/// Here is an example which increments every integer in a vector.
/// We use the checked variant of `add` that returns `None` when the
/// calculation would result in an overflow.
///
/// ```
/// let items = vec![0_u16, 1, 2];
///
/// let res: Option<Vec<u16>> = items
/// .iter()
/// .map(|x| x.checked_add(1))
/// .collect();
///
/// assert_eq!(res, Some(vec![1, 2, 3]));
/// ```
///
/// As you can see, this will return the expected, valid items.
///
/// Here is another example that tries to subtract one from another list
/// of integers, this time checking for underflow:
///
/// ```
/// let items = vec![2_u16, 1, 0];
///
/// let res: Option<Vec<u16>> = items
/// .iter()
/// .map(|x| x.checked_sub(1))
/// .collect();
///
/// assert_eq!(res, None);
/// ```
///
/// Since the last element is zero, it would underflow. Thus, the resulting
/// value is `None`.
///
/// Here is a variation on the previous example, showing that no
/// further elements are taken from `iter` after the first `None`.
///
/// ```
/// let items = vec![3_u16, 2, 1, 10];
///
/// let mut shared = 0;
///
/// let res: Option<Vec<u16>> = items
/// .iter()
/// .map(|x| { shared += x; x.checked_sub(2) })
/// .collect();
///
/// assert_eq!(res, None);
/// assert_eq!(shared, 6);
/// ```
///
/// Since the third element caused an underflow, no further elements were taken,
/// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
///
/// [`Iterator`]: ../iter/trait.Iterator.html
#[inline]
fn from_iter<I: IntoIterator<Item=Option<A>>>(iter: I) -> Option<V> {
// FIXME(#11084): This could be replaced with Iterator::scan when this
// performance bug is closed.
OptionShunt::process(iter.into_iter(), |i| i.collect())
}
}
/// The error type that results from applying the try operator (`?`) to a `None` value. If you wish
/// to allow `x?` (where `x` is an `Option<T>`) to be converted into your error type, you can
/// implement `impl From<NoneError>` for `YourErrorType`. In that case, `x?` within a function that
/// returns `Result<_, YourErrorType>` will translate a `None` value into an `Err` result.
#[unstable(feature = "try_trait", issue = "42327")]
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
pub struct NoneError;
#[unstable(feature = "try_trait", issue = "42327")]
impl<T> ops::Try for Option<T> {
type Ok = T;
type Error = NoneError;
#[inline]
fn into_result(self) -> Result<T, NoneError> {
self.ok_or(NoneError)
}
#[inline]
fn from_ok(v: T) -> Self {
Some(v)
}
#[inline]
fn from_error(_: NoneError) -> Self {
None
}
}
impl<T> Option<Option<T>> {
/// Converts from `Option<Option<T>>` to `Option<T>`
///
/// # Examples
/// Basic usage:
/// ```
/// #![feature(option_flattening)]
/// let x: Option<Option<u32>> = Some(Some(6));
/// assert_eq!(Some(6), x.flatten());
///
/// let x: Option<Option<u32>> = Some(None);
/// assert_eq!(None, x.flatten());
///
/// let x: Option<Option<u32>> = None;
/// assert_eq!(None, x.flatten());
/// ```
/// Flattening once only removes one level of nesting:
/// ```
/// #![feature(option_flattening)]
/// let x: Option<Option<Option<u32>>> = Some(Some(Some(6)));
/// assert_eq!(Some(Some(6)), x.flatten());
/// assert_eq!(Some(6), x.flatten().flatten());
/// ```
#[inline]
#[unstable(feature = "option_flattening", issue = "60258")]
pub fn flatten(self) -> Option<T> {
self.and_then(convert::identity)
}
}
|
use bindings::{
jit_context_t,
jit_context_create,
jit_context_destroy,
jit_context_build_start,
jit_context_build_end,
jit_function_t,
jit_function_next
};
use function::Function;
use std::kinds::marker::ContravariantLifetime;
use util::NativeRef;
/// Holds all of the functions you have built and compiled. There can be multiple, but normally there is only one.
native_ref!(Context, _context, jit_context_t, ContravariantLifetime)
impl<'a> Context<'a> {
/// Create a new JIT Context
pub fn new() -> Context<'a> {
unsafe {
NativeRef::from_ptr(jit_context_create())
}
}
/// Run a closure that can generate IR
pub fn build<R>(&self, cb: || -> R) -> R {
unsafe {
jit_context_build_start(self.as_ptr());
let rv = cb();
jit_context_build_end(self.as_ptr());
rv
}
}
/// Iterate through all the functions in this context
pub fn iter_funcs(&self) -> Functions<'a> {
Functions::new(self)
}
}
#[unsafe_destructor]
impl<'a> Drop for Context<'a> {
#[inline]
fn drop(&mut self) {
unsafe {
jit_context_destroy(self.as_ptr());
}
}
}
/// Any JIT object which is in a context
pub trait InContext<'a> {
/// Get the context for this object
fn get_context(&self) -> Context<'a>;
}
/// An iterator over a context's functions
pub struct Functions<'a> {
ctx: jit_context_t,
last: jit_function_t,
marker: ContravariantLifetime<'a>
}
impl<'a> Functions<'a> {
fn new(ctx:&Context<'a>) -> Functions<'a> {
unsafe {
Functions {
ctx: ctx.as_ptr(),
last: RawPtr::null(),
marker: ContravariantLifetime::<'a>
}
}
}
}
impl<'a> Iterator<Function<'a>> for Functions<'a> {
fn next(&mut self) -> Option<Function> {
unsafe {
let native_next = jit_function_next(self.ctx, self.last);
self.last = native_next;
NativeRef::from_ptr_opt(native_next)
}
}
fn size_hint(&self) -> (uint, Option<uint>) {
unsafe {
let mut size : uint = 0;
let mut last = self.last;
loop {
last = jit_function_next(self.ctx, last);
if last.is_null() {
break;
} else {
size += 1;
}
}
(size, Some(size))
}
}
}
Make unsafe block in `size_hint` of `Functions` minimaal
use bindings::{
jit_context_t,
jit_context_create,
jit_context_destroy,
jit_context_build_start,
jit_context_build_end,
jit_function_t,
jit_function_next
};
use function::Function;
use std::kinds::marker::ContravariantLifetime;
use util::NativeRef;
/// Holds all of the functions you have built and compiled. There can be multiple, but normally there is only one.
native_ref!(Context, _context, jit_context_t, ContravariantLifetime)
impl<'a> Context<'a> {
/// Create a new JIT Context
pub fn new() -> Context<'a> {
unsafe {
NativeRef::from_ptr(jit_context_create())
}
}
/// Run a closure that can generate IR
pub fn build<R>(&self, cb: || -> R) -> R {
unsafe {
jit_context_build_start(self.as_ptr());
let rv = cb();
jit_context_build_end(self.as_ptr());
rv
}
}
/// Iterate through all the functions in this context
pub fn iter_funcs(&self) -> Functions<'a> {
Functions::new(self)
}
}
#[unsafe_destructor]
impl<'a> Drop for Context<'a> {
#[inline]
fn drop(&mut self) {
unsafe {
jit_context_destroy(self.as_ptr());
}
}
}
/// Any JIT object which is in a context
pub trait InContext<'a> {
/// Get the context for this object
fn get_context(&self) -> Context<'a>;
}
/// An iterator over a context's functions
pub struct Functions<'a> {
ctx: jit_context_t,
last: jit_function_t,
marker: ContravariantLifetime<'a>
}
impl<'a> Functions<'a> {
fn new(ctx:&Context<'a>) -> Functions<'a> {
unsafe {
Functions {
ctx: ctx.as_ptr(),
last: RawPtr::null(),
marker: ContravariantLifetime::<'a>
}
}
}
}
impl<'a> Iterator<Function<'a>> for Functions<'a> {
fn next(&mut self) -> Option<Function> {
unsafe {
let native_next = jit_function_next(self.ctx, self.last);
self.last = native_next;
NativeRef::from_ptr_opt(native_next)
}
}
fn size_hint(&self) -> (uint, Option<uint>) {
let mut size : uint = 0;
let mut last = self.last;
loop {
last = unsafe {
jit_function_next(self.ctx, last)
};
if last.is_null() {
break;
} else {
size += 1;
}
}
(size, Some(size))
}
} |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io::prelude::*;
use fmt;
use io;
use net::{ToSocketAddrs, SocketAddr, Shutdown};
use sys_common::io::read_to_end_uninitialized;
use sys_common::net as net_imp;
use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
/// A structure which represents a TCP stream between a local socket and a
/// remote socket.
///
/// The socket will be closed when the value is dropped.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::net::TcpStream;
///
/// {
/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
///
/// // ignore the Result
/// let _ = stream.write(&[1]);
/// let _ = stream.read(&mut [0; 128]); // ignore here too
/// } // the stream is closed here
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpStream(net_imp::TcpStream);
/// A structure representing a socket server.
///
/// # Examples
///
/// ```no_run
/// use std::net::{TcpListener, TcpStream};
/// use std::thread;
///
/// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
///
/// fn handle_client(stream: TcpStream) {
/// // ...
/// }
///
/// // accept connections and process them, spawning a new thread for each one
/// for stream in listener.incoming() {
/// match stream {
/// Ok(stream) => {
/// thread::spawn(move|| {
/// // connection succeeded
/// handle_client(stream)
/// });
/// }
/// Err(e) => { /* connection failed */ }
/// }
/// }
///
/// // close the socket server
/// drop(listener);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpListener(net_imp::TcpListener);
/// An infinite iterator over the connections from a `TcpListener`.
///
/// This iterator will infinitely yield `Some` of the accepted connections. It
/// is equivalent to calling `accept` in a loop.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Incoming<'a> { listener: &'a TcpListener }
impl TcpStream {
/// Opens a TCP connection to a remote host.
///
/// `addr` is an address of the remote host. Anything which implements
/// `ToSocketAddrs` trait can be supplied for the address; see this trait
/// documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> {
super::each_addr(addr, net_imp::TcpStream::connect).map(TcpStream)
}
/// Returns the socket address of the remote peer of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
/// Returns the socket address of the local half of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.0.shutdown(how)
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpStream` is a reference to the same stream that this
/// object references. Both handles will read and write the same stream of
/// data, and options set on one stream will be propagated to the other
/// stream.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpStream> {
self.0.duplicate().map(TcpStream)
}
/// Sets the read timeout to the timeout specified.
///
/// If the value specified is `None`, then `read` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a read times out as
/// a result of setting this option. For example Unix typically returns an
/// error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_read_timeout(dur)
}
/// Sets the write timeout to the timeout specified.
///
/// If the value specified is `None`, then `write` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a write times out
/// as a result of setting this option. For example Unix typically returns
/// an error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_write_timeout(dur)
}
/// Returns the read timeout of this socket.
///
/// If the timeout is `None`, then `read` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.0.read_timeout()
}
/// Returns the write timeout of this socket.
///
/// If the timeout is `None`, then `write` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.0.write_timeout()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
unsafe { read_to_end_uninitialized(self, buf) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
unsafe { read_to_end_uninitialized(self, buf) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl AsInner<net_imp::TcpStream> for TcpStream {
fn as_inner(&self) -> &net_imp::TcpStream { &self.0 }
}
impl FromInner<net_imp::TcpStream> for TcpStream {
fn from_inner(inner: net_imp::TcpStream) -> TcpStream { TcpStream(inner) }
}
impl IntoInner<net_imp::TcpStream> for TcpStream {
fn into_inner(self) -> net_imp::TcpStream { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl TcpListener {
/// Creates a new `TcpListener` which will be bound to the specified
/// address.
///
/// The returned listener is ready for accepting connections.
///
/// Binding with a port number of 0 will request that the OS assigns a port
/// to this listener. The port allocated can be queried via the
/// `socket_addr` function.
///
/// The address type can be any implementor of `ToSocketAddrs` trait. See
/// its documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> {
super::each_addr(addr, net_imp::TcpListener::bind).map(TcpListener)
}
/// Returns the local socket address of this listener.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpListener` is a reference to the same socket that this
/// object references. Both handles can be used to accept incoming
/// connections and options set on one listener will affect the other.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpListener> {
self.0.duplicate().map(TcpListener)
}
/// Accept a new incoming connection from this listener.
///
/// This function will block the calling thread until a new TCP connection
/// is established. When established, the corresponding `TcpStream` and the
/// remote peer's address will be returned.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
self.0.accept().map(|(a, b)| (TcpStream(a), b))
}
/// Returns an iterator over the connections being received on this
/// listener.
///
/// The returned iterator will never return `None` and will also not yield
/// the peer's `SocketAddr` structure.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn incoming(&self) -> Incoming {
Incoming { listener: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Incoming<'a> {
type Item = io::Result<TcpStream>;
fn next(&mut self) -> Option<io::Result<TcpStream>> {
Some(self.listener.accept().map(|p| p.0))
}
}
impl AsInner<net_imp::TcpListener> for TcpListener {
fn as_inner(&self) -> &net_imp::TcpListener { &self.0 }
}
impl FromInner<net_imp::TcpListener> for TcpListener {
fn from_inner(inner: net_imp::TcpListener) -> TcpListener {
TcpListener(inner)
}
}
impl IntoInner<net_imp::TcpListener> for TcpListener {
fn into_inner(self) -> net_imp::TcpListener { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use io::ErrorKind;
use io::prelude::*;
use net::*;
use net::test::{next_test_ip4, next_test_ip6};
use sync::mpsc::channel;
use sys_common::AsInner;
use time::Duration;
use thread;
fn each_ip(f: &mut FnMut(SocketAddr)) {
f(next_test_ip4());
f(next_test_ip6());
}
macro_rules! t {
($e:expr) => {
match $e {
Ok(t) => t,
Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
}
}
}
#[test]
fn bind_error() {
match TcpListener::bind("1.1.1.1:9999") {
Ok(..) => panic!(),
Err(e) =>
assert_eq!(e.kind(), ErrorKind::AddrNotAvailable),
}
}
#[test]
fn connect_error() {
match TcpStream::connect("0.0.0.0:1") {
Ok(..) => panic!(),
Err(e) => assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::InvalidInput ||
e.kind() == ErrorKind::AddrInUse ||
e.kind() == ErrorKind::AddrNotAvailable,
"bad error: {} {:?}", e, e.kind()),
}
}
#[test]
fn listen_localhost() {
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let _t = thread::spawn(move || {
let mut stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
t!(stream.write(&[144]));
});
let mut stream = t!(listener.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 144);
}
#[test]
fn connect_loopback() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let host = match addr {
SocketAddr::V4(..) => "127.0.0.1",
SocketAddr::V6(..) => "::1",
};
let mut stream = t!(TcpStream::connect(&(host, addr.port())));
t!(stream.write(&[66]));
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 66);
})
}
#[test]
fn smoke_test() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
tx.send(t!(stream.local_addr())).unwrap();
});
let (mut stream, addr) = t!(acceptor.accept());
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
assert_eq!(addr, t!(rx.recv()));
})
}
#[test]
fn read_eof() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let _stream = t!(TcpStream::connect(&addr));
// Close
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
})
}
#[test]
fn write_close() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
drop(t!(TcpStream::connect(&addr)));
tx.send(()).unwrap();
});
let mut stream = t!(acceptor.accept()).0;
rx.recv().unwrap();
let buf = [0];
match stream.write(&buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionReset ||
e.kind() == ErrorKind::BrokenPipe ||
e.kind() == ErrorKind::ConnectionAborted,
"unknown error: {}", e);
}
}
})
}
#[test]
fn multiple_connect_serial() {
each_ip(&mut |addr| {
let max = 10;
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for _ in 0..max {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
}
});
for stream in acceptor.incoming().take(max) {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert_eq!(buf[0], 99);
}
})
}
#[test]
fn multiple_connect_interleaved_greedy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let acceptor = acceptor;
for (i, stream) in acceptor.incoming().enumerate().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == i as u8);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
// Connect again before writing
connect(i + 1, addr);
t!(stream.write(&[i as u8]));
});
t.join().ok().unwrap();
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for stream in acceptor.incoming().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
connect(i + 1, addr);
t!(stream.write(&[99]));
});
t.join().ok().unwrap();
}
}
#[test]
fn socket_and_peer_name() {
each_ip(&mut |addr| {
let listener = t!(TcpListener::bind(&addr));
let so_name = t!(listener.local_addr());
assert_eq!(addr, so_name);
let _t = thread::spawn(move|| {
t!(listener.accept());
});
let stream = t!(TcpStream::connect(&addr));
assert_eq!(addr, t!(stream.peer_addr()));
})
}
#[test]
fn partial_read() {
each_ip(&mut |addr| {
let (tx, rx) = channel();
let srv = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut cl = t!(srv.accept()).0;
cl.write(&[10]).unwrap();
let mut b = [0];
t!(cl.read(&mut b));
tx.send(()).unwrap();
});
let mut c = t!(TcpStream::connect(&addr));
let mut b = [0; 10];
assert_eq!(c.read(&mut b).unwrap(), 1);
t!(c.write(&[1]));
rx.recv().unwrap();
})
}
#[test]
fn double_bind() {
each_ip(&mut |addr| {
let _listener = t!(TcpListener::bind(&addr));
match TcpListener::bind(&addr) {
Ok(..) => panic!(),
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::Other ||
e.kind() == ErrorKind::AddrInUse,
"unknown error: {} {:?}", e, e.kind());
}
}
})
}
#[test]
fn fast_rebind() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
t!(TcpStream::connect(&addr));
});
t!(acceptor.accept());
drop(acceptor);
t!(TcpListener::bind(&addr));
});
}
#[test]
fn tcp_clone_smoke() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 0];
assert_eq!(s.read(&mut buf).unwrap(), 1);
assert_eq!(buf[0], 1);
t!(s.write(&[2]));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
rx1.recv().unwrap();
t!(s2.write(&[1]));
tx2.send(()).unwrap();
});
tx1.send(()).unwrap();
let mut buf = [0, 0];
assert_eq!(s1.read(&mut buf).unwrap(), 1);
rx2.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_read() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel();
let tx2 = tx1.clone();
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
t!(s.write(&[1]));
rx.recv().unwrap();
t!(s.write(&[2]));
rx.recv().unwrap();
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
let mut buf = [0, 0];
t!(s2.read(&mut buf));
tx2.send(()).unwrap();
done.send(()).unwrap();
});
let mut buf = [0, 0];
t!(s1.read(&mut buf));
tx1.send(()).unwrap();
rx.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_write() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 1];
t!(s.read(&mut buf));
t!(s.read(&mut buf));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
t!(s2.write(&[1]));
done.send(()).unwrap();
});
t!(s1.write(&[2]));
rx.recv().unwrap();
})
}
#[test]
fn shutdown_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut c = t!(a.accept()).0;
let mut b = [0];
assert_eq!(c.read(&mut b).unwrap(), 0);
t!(c.write(&[1]));
});
let mut s = t!(TcpStream::connect(&addr));
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[1]).is_err());
let mut b = [0, 0];
assert_eq!(t!(s.read(&mut b)), 1);
assert_eq!(b[0], 1);
})
}
#[test]
fn close_readwrite_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let mut b = [0];
let mut s = t!(TcpStream::connect(&addr));
let mut s2 = t!(s.try_clone());
// closing should prevent reads/writes
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[0]).is_err());
t!(s.shutdown(Shutdown::Read));
assert_eq!(s.read(&mut b).unwrap(), 0);
// closing should affect previous handles
assert!(s2.write(&[0]).is_err());
assert_eq!(s2.read(&mut b).unwrap(), 0);
// closing should affect new handles
let mut s3 = t!(s.try_clone());
assert!(s3.write(&[0]).is_err());
assert_eq!(s3.read(&mut b).unwrap(), 0);
// make sure these don't die
let _ = s2.shutdown(Shutdown::Read);
let _ = s2.shutdown(Shutdown::Write);
let _ = s3.shutdown(Shutdown::Read);
let _ = s3.shutdown(Shutdown::Write);
drop(tx);
})
}
#[test]
fn close_read_wakes_up() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let s = t!(TcpStream::connect(&addr));
let s2 = t!(s.try_clone());
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
assert_eq!(t!(s2.read(&mut [0])), 0);
tx.send(()).unwrap();
});
// this should wake up the child thread
t!(s.shutdown(Shutdown::Read));
// this test will never finish if the child doesn't wake up
rx.recv().unwrap();
drop(tx1);
})
}
#[test]
fn clone_while_reading() {
each_ip(&mut |addr| {
let accept = t!(TcpListener::bind(&addr));
// Enqueue a thread to write to a socket
let (tx, rx) = channel();
let (txdone, rxdone) = channel();
let txdone2 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp = t!(TcpStream::connect(&addr));
rx.recv().unwrap();
t!(tcp.write(&[0]));
txdone2.send(()).unwrap();
});
// Spawn off a reading clone
let tcp = t!(accept.accept()).0;
let tcp2 = t!(tcp.try_clone());
let txdone3 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp2 = tcp2;
t!(tcp2.read(&mut [0]));
txdone3.send(()).unwrap();
});
// Try to ensure that the reading clone is indeed reading
for _ in 0..50 {
thread::yield_now();
}
// clone the handle again while it's reading, then let it finish the
// read.
let _ = t!(tcp.try_clone());
tx.send(()).unwrap();
rxdone.recv().unwrap();
rxdone.recv().unwrap();
})
}
#[test]
fn clone_accept_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
t!(a.accept());
t!(a2.accept());
})
}
#[test]
fn clone_accept_concurrent() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let (tx, rx) = channel();
let tx2 = tx.clone();
let _t = thread::spawn(move|| {
tx.send(t!(a.accept())).unwrap();
});
let _t = thread::spawn(move|| {
tx2.send(t!(a2.accept())).unwrap();
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
rx.recv().unwrap();
rx.recv().unwrap();
})
}
#[test]
fn debug() {
let name = if cfg!(windows) {"socket"} else {"fd"};
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let listener_inner = listener.0.socket().as_inner();
let compare = format!("TcpListener {{ addr: {:?}, {}: {:?} }}",
socket_addr, name, listener_inner);
assert_eq!(format!("{:?}", listener), compare);
let stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
let stream_inner = stream.0.socket().as_inner();
let compare = format!("TcpStream {{ addr: {:?}, \
peer: {:?}, {}: {:?} }}",
stream.local_addr().unwrap(),
stream.peer_addr().unwrap(),
name,
stream_inner);
assert_eq!(format!("{:?}", stream), compare);
}
// FIXME: re-enabled bitrig/openbsd tests once their socket timeout code
// no longer has rounding errors.
#[cfg_attr(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"), ignore)]
#[test]
fn timeouts() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
let dur = Duration::new(15410, 0);
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_read_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.read_timeout()));
assert_eq!(None, t!(stream.write_timeout()));
t!(stream.set_write_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.write_timeout()));
t!(stream.set_read_timeout(None));
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_write_timeout(None));
assert_eq!(None, t!(stream.write_timeout()));
}
#[test]
fn test_read_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut buf = [0; 10];
let wait = Duration::span(|| {
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
});
assert!(wait > Duration::from_millis(400));
}
#[test]
fn test_read_with_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut other_end = t!(listener.accept()).0;
t!(other_end.write_all(b"hello world"));
let mut buf = [0; 11];
t!(stream.read(&mut buf));
assert_eq!(b"hello world", &buf[..]);
let wait = Duration::span(|| {
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
});
assert!(wait > Duration::from_millis(400));
}
}
Auto merge of #30052 - Ryman:bind_docs, r=apasel422
`socket_addr` was renamed to `local_addr` in 1.0beta.
See: f798674b86382929ca17c88de422a6e2fdb27f2a
r? @steveklabnik
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io::prelude::*;
use fmt;
use io;
use net::{ToSocketAddrs, SocketAddr, Shutdown};
use sys_common::io::read_to_end_uninitialized;
use sys_common::net as net_imp;
use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
/// A structure which represents a TCP stream between a local socket and a
/// remote socket.
///
/// The socket will be closed when the value is dropped.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::net::TcpStream;
///
/// {
/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
///
/// // ignore the Result
/// let _ = stream.write(&[1]);
/// let _ = stream.read(&mut [0; 128]); // ignore here too
/// } // the stream is closed here
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpStream(net_imp::TcpStream);
/// A structure representing a socket server.
///
/// # Examples
///
/// ```no_run
/// use std::net::{TcpListener, TcpStream};
/// use std::thread;
///
/// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
///
/// fn handle_client(stream: TcpStream) {
/// // ...
/// }
///
/// // accept connections and process them, spawning a new thread for each one
/// for stream in listener.incoming() {
/// match stream {
/// Ok(stream) => {
/// thread::spawn(move|| {
/// // connection succeeded
/// handle_client(stream)
/// });
/// }
/// Err(e) => { /* connection failed */ }
/// }
/// }
///
/// // close the socket server
/// drop(listener);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpListener(net_imp::TcpListener);
/// An infinite iterator over the connections from a `TcpListener`.
///
/// This iterator will infinitely yield `Some` of the accepted connections. It
/// is equivalent to calling `accept` in a loop.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Incoming<'a> { listener: &'a TcpListener }
impl TcpStream {
/// Opens a TCP connection to a remote host.
///
/// `addr` is an address of the remote host. Anything which implements
/// `ToSocketAddrs` trait can be supplied for the address; see this trait
/// documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> {
super::each_addr(addr, net_imp::TcpStream::connect).map(TcpStream)
}
/// Returns the socket address of the remote peer of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
/// Returns the socket address of the local half of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.0.shutdown(how)
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpStream` is a reference to the same stream that this
/// object references. Both handles will read and write the same stream of
/// data, and options set on one stream will be propagated to the other
/// stream.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpStream> {
self.0.duplicate().map(TcpStream)
}
/// Sets the read timeout to the timeout specified.
///
/// If the value specified is `None`, then `read` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a read times out as
/// a result of setting this option. For example Unix typically returns an
/// error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_read_timeout(dur)
}
/// Sets the write timeout to the timeout specified.
///
/// If the value specified is `None`, then `write` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a write times out
/// as a result of setting this option. For example Unix typically returns
/// an error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_write_timeout(dur)
}
/// Returns the read timeout of this socket.
///
/// If the timeout is `None`, then `read` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.0.read_timeout()
}
/// Returns the write timeout of this socket.
///
/// If the timeout is `None`, then `write` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.0.write_timeout()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
unsafe { read_to_end_uninitialized(self, buf) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
unsafe { read_to_end_uninitialized(self, buf) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl AsInner<net_imp::TcpStream> for TcpStream {
fn as_inner(&self) -> &net_imp::TcpStream { &self.0 }
}
impl FromInner<net_imp::TcpStream> for TcpStream {
fn from_inner(inner: net_imp::TcpStream) -> TcpStream { TcpStream(inner) }
}
impl IntoInner<net_imp::TcpStream> for TcpStream {
fn into_inner(self) -> net_imp::TcpStream { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl TcpListener {
/// Creates a new `TcpListener` which will be bound to the specified
/// address.
///
/// The returned listener is ready for accepting connections.
///
/// Binding with a port number of 0 will request that the OS assigns a port
/// to this listener. The port allocated can be queried via the
/// `local_addr` method.
///
/// The address type can be any implementor of `ToSocketAddrs` trait. See
/// its documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> {
super::each_addr(addr, net_imp::TcpListener::bind).map(TcpListener)
}
/// Returns the local socket address of this listener.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpListener` is a reference to the same socket that this
/// object references. Both handles can be used to accept incoming
/// connections and options set on one listener will affect the other.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpListener> {
self.0.duplicate().map(TcpListener)
}
/// Accept a new incoming connection from this listener.
///
/// This function will block the calling thread until a new TCP connection
/// is established. When established, the corresponding `TcpStream` and the
/// remote peer's address will be returned.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
self.0.accept().map(|(a, b)| (TcpStream(a), b))
}
/// Returns an iterator over the connections being received on this
/// listener.
///
/// The returned iterator will never return `None` and will also not yield
/// the peer's `SocketAddr` structure.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn incoming(&self) -> Incoming {
Incoming { listener: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Incoming<'a> {
type Item = io::Result<TcpStream>;
fn next(&mut self) -> Option<io::Result<TcpStream>> {
Some(self.listener.accept().map(|p| p.0))
}
}
impl AsInner<net_imp::TcpListener> for TcpListener {
fn as_inner(&self) -> &net_imp::TcpListener { &self.0 }
}
impl FromInner<net_imp::TcpListener> for TcpListener {
fn from_inner(inner: net_imp::TcpListener) -> TcpListener {
TcpListener(inner)
}
}
impl IntoInner<net_imp::TcpListener> for TcpListener {
fn into_inner(self) -> net_imp::TcpListener { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use io::ErrorKind;
use io::prelude::*;
use net::*;
use net::test::{next_test_ip4, next_test_ip6};
use sync::mpsc::channel;
use sys_common::AsInner;
use time::Duration;
use thread;
fn each_ip(f: &mut FnMut(SocketAddr)) {
f(next_test_ip4());
f(next_test_ip6());
}
macro_rules! t {
($e:expr) => {
match $e {
Ok(t) => t,
Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
}
}
}
#[test]
fn bind_error() {
match TcpListener::bind("1.1.1.1:9999") {
Ok(..) => panic!(),
Err(e) =>
assert_eq!(e.kind(), ErrorKind::AddrNotAvailable),
}
}
#[test]
fn connect_error() {
match TcpStream::connect("0.0.0.0:1") {
Ok(..) => panic!(),
Err(e) => assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::InvalidInput ||
e.kind() == ErrorKind::AddrInUse ||
e.kind() == ErrorKind::AddrNotAvailable,
"bad error: {} {:?}", e, e.kind()),
}
}
#[test]
fn listen_localhost() {
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let _t = thread::spawn(move || {
let mut stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
t!(stream.write(&[144]));
});
let mut stream = t!(listener.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 144);
}
#[test]
fn connect_loopback() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let host = match addr {
SocketAddr::V4(..) => "127.0.0.1",
SocketAddr::V6(..) => "::1",
};
let mut stream = t!(TcpStream::connect(&(host, addr.port())));
t!(stream.write(&[66]));
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 66);
})
}
#[test]
fn smoke_test() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
tx.send(t!(stream.local_addr())).unwrap();
});
let (mut stream, addr) = t!(acceptor.accept());
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
assert_eq!(addr, t!(rx.recv()));
})
}
#[test]
fn read_eof() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let _stream = t!(TcpStream::connect(&addr));
// Close
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
})
}
#[test]
fn write_close() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
drop(t!(TcpStream::connect(&addr)));
tx.send(()).unwrap();
});
let mut stream = t!(acceptor.accept()).0;
rx.recv().unwrap();
let buf = [0];
match stream.write(&buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionReset ||
e.kind() == ErrorKind::BrokenPipe ||
e.kind() == ErrorKind::ConnectionAborted,
"unknown error: {}", e);
}
}
})
}
#[test]
fn multiple_connect_serial() {
each_ip(&mut |addr| {
let max = 10;
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for _ in 0..max {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
}
});
for stream in acceptor.incoming().take(max) {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert_eq!(buf[0], 99);
}
})
}
#[test]
fn multiple_connect_interleaved_greedy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let acceptor = acceptor;
for (i, stream) in acceptor.incoming().enumerate().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == i as u8);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
// Connect again before writing
connect(i + 1, addr);
t!(stream.write(&[i as u8]));
});
t.join().ok().unwrap();
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for stream in acceptor.incoming().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
connect(i + 1, addr);
t!(stream.write(&[99]));
});
t.join().ok().unwrap();
}
}
#[test]
fn socket_and_peer_name() {
each_ip(&mut |addr| {
let listener = t!(TcpListener::bind(&addr));
let so_name = t!(listener.local_addr());
assert_eq!(addr, so_name);
let _t = thread::spawn(move|| {
t!(listener.accept());
});
let stream = t!(TcpStream::connect(&addr));
assert_eq!(addr, t!(stream.peer_addr()));
})
}
#[test]
fn partial_read() {
each_ip(&mut |addr| {
let (tx, rx) = channel();
let srv = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut cl = t!(srv.accept()).0;
cl.write(&[10]).unwrap();
let mut b = [0];
t!(cl.read(&mut b));
tx.send(()).unwrap();
});
let mut c = t!(TcpStream::connect(&addr));
let mut b = [0; 10];
assert_eq!(c.read(&mut b).unwrap(), 1);
t!(c.write(&[1]));
rx.recv().unwrap();
})
}
#[test]
fn double_bind() {
each_ip(&mut |addr| {
let _listener = t!(TcpListener::bind(&addr));
match TcpListener::bind(&addr) {
Ok(..) => panic!(),
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::Other ||
e.kind() == ErrorKind::AddrInUse,
"unknown error: {} {:?}", e, e.kind());
}
}
})
}
#[test]
fn fast_rebind() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
t!(TcpStream::connect(&addr));
});
t!(acceptor.accept());
drop(acceptor);
t!(TcpListener::bind(&addr));
});
}
#[test]
fn tcp_clone_smoke() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 0];
assert_eq!(s.read(&mut buf).unwrap(), 1);
assert_eq!(buf[0], 1);
t!(s.write(&[2]));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
rx1.recv().unwrap();
t!(s2.write(&[1]));
tx2.send(()).unwrap();
});
tx1.send(()).unwrap();
let mut buf = [0, 0];
assert_eq!(s1.read(&mut buf).unwrap(), 1);
rx2.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_read() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel();
let tx2 = tx1.clone();
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
t!(s.write(&[1]));
rx.recv().unwrap();
t!(s.write(&[2]));
rx.recv().unwrap();
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
let mut buf = [0, 0];
t!(s2.read(&mut buf));
tx2.send(()).unwrap();
done.send(()).unwrap();
});
let mut buf = [0, 0];
t!(s1.read(&mut buf));
tx1.send(()).unwrap();
rx.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_write() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 1];
t!(s.read(&mut buf));
t!(s.read(&mut buf));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
t!(s2.write(&[1]));
done.send(()).unwrap();
});
t!(s1.write(&[2]));
rx.recv().unwrap();
})
}
#[test]
fn shutdown_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut c = t!(a.accept()).0;
let mut b = [0];
assert_eq!(c.read(&mut b).unwrap(), 0);
t!(c.write(&[1]));
});
let mut s = t!(TcpStream::connect(&addr));
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[1]).is_err());
let mut b = [0, 0];
assert_eq!(t!(s.read(&mut b)), 1);
assert_eq!(b[0], 1);
})
}
#[test]
fn close_readwrite_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let mut b = [0];
let mut s = t!(TcpStream::connect(&addr));
let mut s2 = t!(s.try_clone());
// closing should prevent reads/writes
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[0]).is_err());
t!(s.shutdown(Shutdown::Read));
assert_eq!(s.read(&mut b).unwrap(), 0);
// closing should affect previous handles
assert!(s2.write(&[0]).is_err());
assert_eq!(s2.read(&mut b).unwrap(), 0);
// closing should affect new handles
let mut s3 = t!(s.try_clone());
assert!(s3.write(&[0]).is_err());
assert_eq!(s3.read(&mut b).unwrap(), 0);
// make sure these don't die
let _ = s2.shutdown(Shutdown::Read);
let _ = s2.shutdown(Shutdown::Write);
let _ = s3.shutdown(Shutdown::Read);
let _ = s3.shutdown(Shutdown::Write);
drop(tx);
})
}
#[test]
fn close_read_wakes_up() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let s = t!(TcpStream::connect(&addr));
let s2 = t!(s.try_clone());
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
assert_eq!(t!(s2.read(&mut [0])), 0);
tx.send(()).unwrap();
});
// this should wake up the child thread
t!(s.shutdown(Shutdown::Read));
// this test will never finish if the child doesn't wake up
rx.recv().unwrap();
drop(tx1);
})
}
#[test]
fn clone_while_reading() {
each_ip(&mut |addr| {
let accept = t!(TcpListener::bind(&addr));
// Enqueue a thread to write to a socket
let (tx, rx) = channel();
let (txdone, rxdone) = channel();
let txdone2 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp = t!(TcpStream::connect(&addr));
rx.recv().unwrap();
t!(tcp.write(&[0]));
txdone2.send(()).unwrap();
});
// Spawn off a reading clone
let tcp = t!(accept.accept()).0;
let tcp2 = t!(tcp.try_clone());
let txdone3 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp2 = tcp2;
t!(tcp2.read(&mut [0]));
txdone3.send(()).unwrap();
});
// Try to ensure that the reading clone is indeed reading
for _ in 0..50 {
thread::yield_now();
}
// clone the handle again while it's reading, then let it finish the
// read.
let _ = t!(tcp.try_clone());
tx.send(()).unwrap();
rxdone.recv().unwrap();
rxdone.recv().unwrap();
})
}
#[test]
fn clone_accept_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
t!(a.accept());
t!(a2.accept());
})
}
#[test]
fn clone_accept_concurrent() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let (tx, rx) = channel();
let tx2 = tx.clone();
let _t = thread::spawn(move|| {
tx.send(t!(a.accept())).unwrap();
});
let _t = thread::spawn(move|| {
tx2.send(t!(a2.accept())).unwrap();
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
rx.recv().unwrap();
rx.recv().unwrap();
})
}
#[test]
fn debug() {
let name = if cfg!(windows) {"socket"} else {"fd"};
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let listener_inner = listener.0.socket().as_inner();
let compare = format!("TcpListener {{ addr: {:?}, {}: {:?} }}",
socket_addr, name, listener_inner);
assert_eq!(format!("{:?}", listener), compare);
let stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
let stream_inner = stream.0.socket().as_inner();
let compare = format!("TcpStream {{ addr: {:?}, \
peer: {:?}, {}: {:?} }}",
stream.local_addr().unwrap(),
stream.peer_addr().unwrap(),
name,
stream_inner);
assert_eq!(format!("{:?}", stream), compare);
}
// FIXME: re-enabled bitrig/openbsd tests once their socket timeout code
// no longer has rounding errors.
#[cfg_attr(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"), ignore)]
#[test]
fn timeouts() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
let dur = Duration::new(15410, 0);
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_read_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.read_timeout()));
assert_eq!(None, t!(stream.write_timeout()));
t!(stream.set_write_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.write_timeout()));
t!(stream.set_read_timeout(None));
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_write_timeout(None));
assert_eq!(None, t!(stream.write_timeout()));
}
#[test]
fn test_read_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut buf = [0; 10];
let wait = Duration::span(|| {
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
});
assert!(wait > Duration::from_millis(400));
}
#[test]
fn test_read_with_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut other_end = t!(listener.accept()).0;
t!(other_end.write_all(b"hello world"));
let mut buf = [0; 11];
t!(stream.read(&mut buf));
assert_eq!(b"hello world", &buf[..]);
let wait = Duration::span(|| {
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
});
assert!(wait > Duration::from_millis(400));
}
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io::prelude::*;
use fmt;
use io;
use net::{ToSocketAddrs, SocketAddr, Shutdown};
use sys_common::net as net_imp;
use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
/// A structure which represents a TCP stream between a local socket and a
/// remote socket.
///
/// The socket will be closed when the value is dropped.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::net::TcpStream;
///
/// {
/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
///
/// // ignore the Result
/// let _ = stream.write(&[1]);
/// let _ = stream.read(&mut [0; 128]); // ignore here too
/// } // the stream is closed here
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpStream(net_imp::TcpStream);
/// A structure representing a socket server.
///
/// # Examples
///
/// ```no_run
/// use std::net::{TcpListener, TcpStream};
/// use std::thread;
///
/// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
///
/// fn handle_client(stream: TcpStream) {
/// // ...
/// }
///
/// // accept connections and process them, spawning a new thread for each one
/// for stream in listener.incoming() {
/// match stream {
/// Ok(stream) => {
/// thread::spawn(move|| {
/// // connection succeeded
/// handle_client(stream)
/// });
/// }
/// Err(e) => { /* connection failed */ }
/// }
/// }
///
/// // close the socket server
/// drop(listener);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpListener(net_imp::TcpListener);
/// An infinite iterator over the connections from a `TcpListener`.
///
/// This iterator will infinitely yield `Some` of the accepted connections. It
/// is equivalent to calling `accept` in a loop.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Incoming<'a> { listener: &'a TcpListener }
impl TcpStream {
/// Opens a TCP connection to a remote host.
///
/// `addr` is an address of the remote host. Anything which implements
/// `ToSocketAddrs` trait can be supplied for the address; see this trait
/// documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> {
super::each_addr(addr, net_imp::TcpStream::connect).map(TcpStream)
}
/// Returns the socket address of the remote peer of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
/// Returns the socket address of the local half of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.0.shutdown(how)
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpStream` is a reference to the same stream that this
/// object references. Both handles will read and write the same stream of
/// data, and options set on one stream will be propagated to the other
/// stream.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpStream> {
self.0.duplicate().map(TcpStream)
}
/// Sets the read timeout to the timeout specified.
///
/// If the value specified is `None`, then `read` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a read times out as
/// a result of setting this option. For example Unix typically returns an
/// error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_read_timeout(dur)
}
/// Sets the write timeout to the timeout specified.
///
/// If the value specified is `None`, then `write` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a write times out
/// as a result of setting this option. For example Unix typically returns
/// an error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_write_timeout(dur)
}
/// Returns the read timeout of this socket.
///
/// If the timeout is `None`, then `read` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.0.read_timeout()
}
/// Returns the write timeout of this socket.
///
/// If the timeout is `None`, then `write` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.0.write_timeout()
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
///
/// If set, this option disables the Nagle algorithm. This means that
/// segments are always sent as soon as possible, even if there is only a
/// small amount of data. When not set, data is buffered until there is a
/// sufficient amount to send out, thereby avoiding the frequent sending of
/// small packets.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.0.set_nodelay(nodelay)
}
/// Gets the value of the `TCP_NODELAY` option on this socket.
///
/// For more information about this option, see [`set_nodelay`][link].
///
/// [link]: #method.set_nodelay
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn nodelay(&self) -> io::Result<bool> {
self.0.nodelay()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.0.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn ttl(&self) -> io::Result<u32> {
self.0.ttl()
}
/// Sets the value for the `IPV6_V6ONLY` option on this socket.
///
/// If this is set to `true` then the socket is restricted to sending and
/// receiving IPv6 packets only. If this is the case, an IPv4 and an IPv6
/// application can each bind the same port at the same time.
///
/// If this is set to `false` then the socket can be used to send and
/// receive packets from an IPv4-mapped IPv6 address.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.0.set_only_v6(only_v6)
}
/// Gets the value of the `IPV6_V6ONLY` option for this socket.
///
/// For more information about this option, see [`set_only_v6`][link].
///
/// [link]: #method.set_only_v6
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn only_v6(&self) -> io::Result<bool> {
self.0.only_v6()
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.0.take_error()
}
/// Moves this TCP stream into or out of nonblocking mode.
///
/// On Unix this corresponds to calling fcntl, and on Windows this
/// corresponds to calling ioctlsocket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.0.set_nonblocking(nonblocking)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.0.read_to_end(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.0.read_to_end(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl AsInner<net_imp::TcpStream> for TcpStream {
fn as_inner(&self) -> &net_imp::TcpStream { &self.0 }
}
impl FromInner<net_imp::TcpStream> for TcpStream {
fn from_inner(inner: net_imp::TcpStream) -> TcpStream { TcpStream(inner) }
}
impl IntoInner<net_imp::TcpStream> for TcpStream {
fn into_inner(self) -> net_imp::TcpStream { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl TcpListener {
/// Creates a new `TcpListener` which will be bound to the specified
/// address.
///
/// The returned listener is ready for accepting connections.
///
/// Binding with a port number of 0 will request that the OS assigns a port
/// to this listener. The port allocated can be queried via the
/// `local_addr` method.
///
/// The address type can be any implementor of `ToSocketAddrs` trait. See
/// its documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> {
super::each_addr(addr, net_imp::TcpListener::bind).map(TcpListener)
}
/// Returns the local socket address of this listener.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpListener` is a reference to the same socket that this
/// object references. Both handles can be used to accept incoming
/// connections and options set on one listener will affect the other.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpListener> {
self.0.duplicate().map(TcpListener)
}
/// Accept a new incoming connection from this listener.
///
/// This function will block the calling thread until a new TCP connection
/// is established. When established, the corresponding `TcpStream` and the
/// remote peer's address will be returned.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
self.0.accept().map(|(a, b)| (TcpStream(a), b))
}
/// Returns an iterator over the connections being received on this
/// listener.
///
/// The returned iterator will never return `None` and will also not yield
/// the peer's `SocketAddr` structure.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn incoming(&self) -> Incoming {
Incoming { listener: self }
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.0.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn ttl(&self) -> io::Result<u32> {
self.0.ttl()
}
/// Sets the value for the `IPV6_V6ONLY` option on this socket.
///
/// If this is set to `true` then the socket is restricted to sending and
/// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
/// can bind the same port at the same time.
///
/// If this is set to `false` then the socket can be used to send and
/// receive packets from an IPv4-mapped IPv6 address.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.0.set_only_v6(only_v6)
}
/// Gets the value of the `IPV6_V6ONLY` option for this socket.
///
/// For more information about this option, see [`set_only_v6`][link].
///
/// [link]: #method.set_only_v6
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn only_v6(&self) -> io::Result<bool> {
self.0.only_v6()
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.0.take_error()
}
/// Moves this TCP stream into or out of nonblocking mode.
///
/// On Unix this corresponds to calling fcntl, and on Windows this
/// corresponds to calling ioctlsocket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.0.set_nonblocking(nonblocking)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Incoming<'a> {
type Item = io::Result<TcpStream>;
fn next(&mut self) -> Option<io::Result<TcpStream>> {
Some(self.listener.accept().map(|p| p.0))
}
}
impl AsInner<net_imp::TcpListener> for TcpListener {
fn as_inner(&self) -> &net_imp::TcpListener { &self.0 }
}
impl FromInner<net_imp::TcpListener> for TcpListener {
fn from_inner(inner: net_imp::TcpListener) -> TcpListener {
TcpListener(inner)
}
}
impl IntoInner<net_imp::TcpListener> for TcpListener {
fn into_inner(self) -> net_imp::TcpListener { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use io::ErrorKind;
use io::prelude::*;
use net::*;
use net::test::{next_test_ip4, next_test_ip6};
use sync::mpsc::channel;
use sys_common::AsInner;
use time::{Instant, Duration};
use thread;
fn each_ip(f: &mut FnMut(SocketAddr)) {
f(next_test_ip4());
f(next_test_ip6());
}
macro_rules! t {
($e:expr) => {
match $e {
Ok(t) => t,
Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
}
}
}
#[test]
fn bind_error() {
match TcpListener::bind("1.1.1.1:9999") {
Ok(..) => panic!(),
Err(e) =>
assert_eq!(e.kind(), ErrorKind::AddrNotAvailable),
}
}
#[test]
fn connect_error() {
match TcpStream::connect("0.0.0.0:1") {
Ok(..) => panic!(),
Err(e) => assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::InvalidInput ||
e.kind() == ErrorKind::AddrInUse ||
e.kind() == ErrorKind::AddrNotAvailable,
"bad error: {} {:?}", e, e.kind()),
}
}
#[test]
fn listen_localhost() {
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let _t = thread::spawn(move || {
let mut stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
t!(stream.write(&[144]));
});
let mut stream = t!(listener.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 144);
}
#[test]
fn connect_loopback() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let host = match addr {
SocketAddr::V4(..) => "127.0.0.1",
SocketAddr::V6(..) => "::1",
};
let mut stream = t!(TcpStream::connect(&(host, addr.port())));
t!(stream.write(&[66]));
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 66);
})
}
#[test]
fn smoke_test() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
tx.send(t!(stream.local_addr())).unwrap();
});
let (mut stream, addr) = t!(acceptor.accept());
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
assert_eq!(addr, t!(rx.recv()));
})
}
#[test]
fn read_eof() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let _stream = t!(TcpStream::connect(&addr));
// Close
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
})
}
#[test]
fn write_close() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
drop(t!(TcpStream::connect(&addr)));
tx.send(()).unwrap();
});
let mut stream = t!(acceptor.accept()).0;
rx.recv().unwrap();
let buf = [0];
match stream.write(&buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionReset ||
e.kind() == ErrorKind::BrokenPipe ||
e.kind() == ErrorKind::ConnectionAborted,
"unknown error: {}", e);
}
}
})
}
#[test]
fn multiple_connect_serial() {
each_ip(&mut |addr| {
let max = 10;
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for _ in 0..max {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
}
});
for stream in acceptor.incoming().take(max) {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert_eq!(buf[0], 99);
}
})
}
#[test]
fn multiple_connect_interleaved_greedy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let acceptor = acceptor;
for (i, stream) in acceptor.incoming().enumerate().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == i as u8);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
// Connect again before writing
connect(i + 1, addr);
t!(stream.write(&[i as u8]));
});
t.join().ok().unwrap();
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for stream in acceptor.incoming().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
connect(i + 1, addr);
t!(stream.write(&[99]));
});
t.join().ok().unwrap();
}
}
#[test]
fn socket_and_peer_name() {
each_ip(&mut |addr| {
let listener = t!(TcpListener::bind(&addr));
let so_name = t!(listener.local_addr());
assert_eq!(addr, so_name);
let _t = thread::spawn(move|| {
t!(listener.accept());
});
let stream = t!(TcpStream::connect(&addr));
assert_eq!(addr, t!(stream.peer_addr()));
})
}
#[test]
fn partial_read() {
each_ip(&mut |addr| {
let (tx, rx) = channel();
let srv = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut cl = t!(srv.accept()).0;
cl.write(&[10]).unwrap();
let mut b = [0];
t!(cl.read(&mut b));
tx.send(()).unwrap();
});
let mut c = t!(TcpStream::connect(&addr));
let mut b = [0; 10];
assert_eq!(c.read(&mut b).unwrap(), 1);
t!(c.write(&[1]));
rx.recv().unwrap();
})
}
#[test]
fn double_bind() {
each_ip(&mut |addr| {
let _listener = t!(TcpListener::bind(&addr));
match TcpListener::bind(&addr) {
Ok(..) => panic!(),
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::Other ||
e.kind() == ErrorKind::AddrInUse,
"unknown error: {} {:?}", e, e.kind());
}
}
})
}
#[test]
fn fast_rebind() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
t!(TcpStream::connect(&addr));
});
t!(acceptor.accept());
drop(acceptor);
t!(TcpListener::bind(&addr));
});
}
#[test]
fn tcp_clone_smoke() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 0];
assert_eq!(s.read(&mut buf).unwrap(), 1);
assert_eq!(buf[0], 1);
t!(s.write(&[2]));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
rx1.recv().unwrap();
t!(s2.write(&[1]));
tx2.send(()).unwrap();
});
tx1.send(()).unwrap();
let mut buf = [0, 0];
assert_eq!(s1.read(&mut buf).unwrap(), 1);
rx2.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_read() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel();
let tx2 = tx1.clone();
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
t!(s.write(&[1]));
rx.recv().unwrap();
t!(s.write(&[2]));
rx.recv().unwrap();
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
let mut buf = [0, 0];
t!(s2.read(&mut buf));
tx2.send(()).unwrap();
done.send(()).unwrap();
});
let mut buf = [0, 0];
t!(s1.read(&mut buf));
tx1.send(()).unwrap();
rx.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_write() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 1];
t!(s.read(&mut buf));
t!(s.read(&mut buf));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
t!(s2.write(&[1]));
done.send(()).unwrap();
});
t!(s1.write(&[2]));
rx.recv().unwrap();
})
}
#[test]
fn shutdown_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut c = t!(a.accept()).0;
let mut b = [0];
assert_eq!(c.read(&mut b).unwrap(), 0);
t!(c.write(&[1]));
});
let mut s = t!(TcpStream::connect(&addr));
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[1]).is_err());
let mut b = [0, 0];
assert_eq!(t!(s.read(&mut b)), 1);
assert_eq!(b[0], 1);
})
}
#[test]
fn close_readwrite_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let mut b = [0];
let mut s = t!(TcpStream::connect(&addr));
let mut s2 = t!(s.try_clone());
// closing should prevent reads/writes
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[0]).is_err());
t!(s.shutdown(Shutdown::Read));
assert_eq!(s.read(&mut b).unwrap(), 0);
// closing should affect previous handles
assert!(s2.write(&[0]).is_err());
assert_eq!(s2.read(&mut b).unwrap(), 0);
// closing should affect new handles
let mut s3 = t!(s.try_clone());
assert!(s3.write(&[0]).is_err());
assert_eq!(s3.read(&mut b).unwrap(), 0);
// make sure these don't die
let _ = s2.shutdown(Shutdown::Read);
let _ = s2.shutdown(Shutdown::Write);
let _ = s3.shutdown(Shutdown::Read);
let _ = s3.shutdown(Shutdown::Write);
drop(tx);
})
}
#[test]
fn close_read_wakes_up() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let s = t!(TcpStream::connect(&addr));
let s2 = t!(s.try_clone());
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
assert_eq!(t!(s2.read(&mut [0])), 0);
tx.send(()).unwrap();
});
// this should wake up the child thread
t!(s.shutdown(Shutdown::Read));
// this test will never finish if the child doesn't wake up
rx.recv().unwrap();
drop(tx1);
})
}
#[test]
fn clone_while_reading() {
each_ip(&mut |addr| {
let accept = t!(TcpListener::bind(&addr));
// Enqueue a thread to write to a socket
let (tx, rx) = channel();
let (txdone, rxdone) = channel();
let txdone2 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp = t!(TcpStream::connect(&addr));
rx.recv().unwrap();
t!(tcp.write(&[0]));
txdone2.send(()).unwrap();
});
// Spawn off a reading clone
let tcp = t!(accept.accept()).0;
let tcp2 = t!(tcp.try_clone());
let txdone3 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp2 = tcp2;
t!(tcp2.read(&mut [0]));
txdone3.send(()).unwrap();
});
// Try to ensure that the reading clone is indeed reading
for _ in 0..50 {
thread::yield_now();
}
// clone the handle again while it's reading, then let it finish the
// read.
let _ = t!(tcp.try_clone());
tx.send(()).unwrap();
rxdone.recv().unwrap();
rxdone.recv().unwrap();
})
}
#[test]
fn clone_accept_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
t!(a.accept());
t!(a2.accept());
})
}
#[test]
fn clone_accept_concurrent() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let (tx, rx) = channel();
let tx2 = tx.clone();
let _t = thread::spawn(move|| {
tx.send(t!(a.accept())).unwrap();
});
let _t = thread::spawn(move|| {
tx2.send(t!(a2.accept())).unwrap();
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
rx.recv().unwrap();
rx.recv().unwrap();
})
}
#[test]
fn debug() {
let name = if cfg!(windows) {"socket"} else {"fd"};
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let listener_inner = listener.0.socket().as_inner();
let compare = format!("TcpListener {{ addr: {:?}, {}: {:?} }}",
socket_addr, name, listener_inner);
assert_eq!(format!("{:?}", listener), compare);
let stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
let stream_inner = stream.0.socket().as_inner();
let compare = format!("TcpStream {{ addr: {:?}, \
peer: {:?}, {}: {:?} }}",
stream.local_addr().unwrap(),
stream.peer_addr().unwrap(),
name,
stream_inner);
assert_eq!(format!("{:?}", stream), compare);
}
// FIXME: re-enabled bitrig/openbsd tests once their socket timeout code
// no longer has rounding errors.
#[cfg_attr(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"), ignore)]
#[test]
fn timeouts() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
let dur = Duration::new(15410, 0);
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_read_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.read_timeout()));
assert_eq!(None, t!(stream.write_timeout()));
t!(stream.set_write_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.write_timeout()));
t!(stream.set_read_timeout(None));
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_write_timeout(None));
assert_eq!(None, t!(stream.write_timeout()));
drop(listener);
}
#[test]
fn test_read_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut buf = [0; 10];
let start = Instant::now();
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
assert!(start.elapsed() > Duration::from_millis(400));
drop(listener);
}
#[test]
fn test_read_with_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut other_end = t!(listener.accept()).0;
t!(other_end.write_all(b"hello world"));
let mut buf = [0; 11];
t!(stream.read(&mut buf));
assert_eq!(b"hello world", &buf[..]);
let start = Instant::now();
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
assert!(start.elapsed() > Duration::from_millis(400));
drop(listener);
}
#[test]
fn nodelay() {
let addr = next_test_ip4();
let _listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
assert_eq!(false, t!(stream.nodelay()));
t!(stream.set_nodelay(true));
assert_eq!(true, t!(stream.nodelay()));
t!(stream.set_nodelay(false));
assert_eq!(false, t!(stream.nodelay()));
}
#[test]
fn ttl() {
let ttl = 100;
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
t!(listener.set_ttl(ttl));
assert_eq!(ttl, t!(listener.ttl()));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_ttl(ttl));
assert_eq!(ttl, t!(stream.ttl()));
}
#[test]
fn set_nonblocking() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
t!(listener.set_nonblocking(true));
t!(listener.set_nonblocking(false));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_nonblocking(false));
t!(stream.set_nonblocking(true));
let mut buf = [0];
match stream.read(&mut buf) {
Ok(_) => panic!("expected error"),
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
Err(e) => panic!("unexpected error {}", e),
}
}
}
clarify documentation of TcpStream::connect() for multiple valid addresses
Signed-off-by: benaryorg <7e57cfe843145135aee1f4d0d63ceb7842093712@benary.org>
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io::prelude::*;
use fmt;
use io;
use net::{ToSocketAddrs, SocketAddr, Shutdown};
use sys_common::net as net_imp;
use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
/// A structure which represents a TCP stream between a local socket and a
/// remote socket.
///
/// The socket will be closed when the value is dropped.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::net::TcpStream;
///
/// {
/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
///
/// // ignore the Result
/// let _ = stream.write(&[1]);
/// let _ = stream.read(&mut [0; 128]); // ignore here too
/// } // the stream is closed here
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpStream(net_imp::TcpStream);
/// A structure representing a socket server.
///
/// # Examples
///
/// ```no_run
/// use std::net::{TcpListener, TcpStream};
/// use std::thread;
///
/// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
///
/// fn handle_client(stream: TcpStream) {
/// // ...
/// }
///
/// // accept connections and process them, spawning a new thread for each one
/// for stream in listener.incoming() {
/// match stream {
/// Ok(stream) => {
/// thread::spawn(move|| {
/// // connection succeeded
/// handle_client(stream)
/// });
/// }
/// Err(e) => { /* connection failed */ }
/// }
/// }
///
/// // close the socket server
/// drop(listener);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TcpListener(net_imp::TcpListener);
/// An infinite iterator over the connections from a `TcpListener`.
///
/// This iterator will infinitely yield `Some` of the accepted connections. It
/// is equivalent to calling `accept` in a loop.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Incoming<'a> { listener: &'a TcpListener }
impl TcpStream {
/// Opens a TCP connection to a remote host.
///
/// `addr` is an address of the remote host. Anything which implements
/// `ToSocketAddrs` trait can be supplied for the address; see this trait
/// documentation for concrete examples.
/// In case `ToSocketAddrs::to_socket_addrs()` returns more than one entry,
/// then the first valid and reachable address is used.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> {
super::each_addr(addr, net_imp::TcpStream::connect).map(TcpStream)
}
/// Returns the socket address of the remote peer of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
/// Returns the socket address of the local half of this TCP connection.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.0.shutdown(how)
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpStream` is a reference to the same stream that this
/// object references. Both handles will read and write the same stream of
/// data, and options set on one stream will be propagated to the other
/// stream.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpStream> {
self.0.duplicate().map(TcpStream)
}
/// Sets the read timeout to the timeout specified.
///
/// If the value specified is `None`, then `read` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a read times out as
/// a result of setting this option. For example Unix typically returns an
/// error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_read_timeout(dur)
}
/// Sets the write timeout to the timeout specified.
///
/// If the value specified is `None`, then `write` calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// # Note
///
/// Platforms may return a different error code whenever a write times out
/// as a result of setting this option. For example Unix typically returns
/// an error of the kind `WouldBlock`, but Windows may return `TimedOut`.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.0.set_write_timeout(dur)
}
/// Returns the read timeout of this socket.
///
/// If the timeout is `None`, then `read` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.0.read_timeout()
}
/// Returns the write timeout of this socket.
///
/// If the timeout is `None`, then `write` calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
#[stable(feature = "socket_timeout", since = "1.4.0")]
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.0.write_timeout()
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
///
/// If set, this option disables the Nagle algorithm. This means that
/// segments are always sent as soon as possible, even if there is only a
/// small amount of data. When not set, data is buffered until there is a
/// sufficient amount to send out, thereby avoiding the frequent sending of
/// small packets.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.0.set_nodelay(nodelay)
}
/// Gets the value of the `TCP_NODELAY` option on this socket.
///
/// For more information about this option, see [`set_nodelay`][link].
///
/// [link]: #method.set_nodelay
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn nodelay(&self) -> io::Result<bool> {
self.0.nodelay()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.0.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn ttl(&self) -> io::Result<u32> {
self.0.ttl()
}
/// Sets the value for the `IPV6_V6ONLY` option on this socket.
///
/// If this is set to `true` then the socket is restricted to sending and
/// receiving IPv6 packets only. If this is the case, an IPv4 and an IPv6
/// application can each bind the same port at the same time.
///
/// If this is set to `false` then the socket can be used to send and
/// receive packets from an IPv4-mapped IPv6 address.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.0.set_only_v6(only_v6)
}
/// Gets the value of the `IPV6_V6ONLY` option for this socket.
///
/// For more information about this option, see [`set_only_v6`][link].
///
/// [link]: #method.set_only_v6
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn only_v6(&self) -> io::Result<bool> {
self.0.only_v6()
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.0.take_error()
}
/// Moves this TCP stream into or out of nonblocking mode.
///
/// On Unix this corresponds to calling fcntl, and on Windows this
/// corresponds to calling ioctlsocket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.0.set_nonblocking(nonblocking)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.0.read_to_end(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.0.read_to_end(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl AsInner<net_imp::TcpStream> for TcpStream {
fn as_inner(&self) -> &net_imp::TcpStream { &self.0 }
}
impl FromInner<net_imp::TcpStream> for TcpStream {
fn from_inner(inner: net_imp::TcpStream) -> TcpStream { TcpStream(inner) }
}
impl IntoInner<net_imp::TcpStream> for TcpStream {
fn into_inner(self) -> net_imp::TcpStream { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl TcpListener {
/// Creates a new `TcpListener` which will be bound to the specified
/// address.
///
/// The returned listener is ready for accepting connections.
///
/// Binding with a port number of 0 will request that the OS assigns a port
/// to this listener. The port allocated can be queried via the
/// `local_addr` method.
///
/// The address type can be any implementor of `ToSocketAddrs` trait. See
/// its documentation for concrete examples.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> {
super::each_addr(addr, net_imp::TcpListener::bind).map(TcpListener)
}
/// Returns the local socket address of this listener.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.socket_addr()
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `TcpListener` is a reference to the same socket that this
/// object references. Both handles can be used to accept incoming
/// connections and options set on one listener will affect the other.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_clone(&self) -> io::Result<TcpListener> {
self.0.duplicate().map(TcpListener)
}
/// Accept a new incoming connection from this listener.
///
/// This function will block the calling thread until a new TCP connection
/// is established. When established, the corresponding `TcpStream` and the
/// remote peer's address will be returned.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
self.0.accept().map(|(a, b)| (TcpStream(a), b))
}
/// Returns an iterator over the connections being received on this
/// listener.
///
/// The returned iterator will never return `None` and will also not yield
/// the peer's `SocketAddr` structure.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn incoming(&self) -> Incoming {
Incoming { listener: self }
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.0.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn ttl(&self) -> io::Result<u32> {
self.0.ttl()
}
/// Sets the value for the `IPV6_V6ONLY` option on this socket.
///
/// If this is set to `true` then the socket is restricted to sending and
/// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
/// can bind the same port at the same time.
///
/// If this is set to `false` then the socket can be used to send and
/// receive packets from an IPv4-mapped IPv6 address.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.0.set_only_v6(only_v6)
}
/// Gets the value of the `IPV6_V6ONLY` option for this socket.
///
/// For more information about this option, see [`set_only_v6`][link].
///
/// [link]: #method.set_only_v6
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn only_v6(&self) -> io::Result<bool> {
self.0.only_v6()
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.0.take_error()
}
/// Moves this TCP stream into or out of nonblocking mode.
///
/// On Unix this corresponds to calling fcntl, and on Windows this
/// corresponds to calling ioctlsocket.
#[stable(feature = "net2_mutators", since = "1.9.0")]
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.0.set_nonblocking(nonblocking)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Incoming<'a> {
type Item = io::Result<TcpStream>;
fn next(&mut self) -> Option<io::Result<TcpStream>> {
Some(self.listener.accept().map(|p| p.0))
}
}
impl AsInner<net_imp::TcpListener> for TcpListener {
fn as_inner(&self) -> &net_imp::TcpListener { &self.0 }
}
impl FromInner<net_imp::TcpListener> for TcpListener {
fn from_inner(inner: net_imp::TcpListener) -> TcpListener {
TcpListener(inner)
}
}
impl IntoInner<net_imp::TcpListener> for TcpListener {
fn into_inner(self) -> net_imp::TcpListener { self.0 }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use io::ErrorKind;
use io::prelude::*;
use net::*;
use net::test::{next_test_ip4, next_test_ip6};
use sync::mpsc::channel;
use sys_common::AsInner;
use time::{Instant, Duration};
use thread;
fn each_ip(f: &mut FnMut(SocketAddr)) {
f(next_test_ip4());
f(next_test_ip6());
}
macro_rules! t {
($e:expr) => {
match $e {
Ok(t) => t,
Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
}
}
}
#[test]
fn bind_error() {
match TcpListener::bind("1.1.1.1:9999") {
Ok(..) => panic!(),
Err(e) =>
assert_eq!(e.kind(), ErrorKind::AddrNotAvailable),
}
}
#[test]
fn connect_error() {
match TcpStream::connect("0.0.0.0:1") {
Ok(..) => panic!(),
Err(e) => assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::InvalidInput ||
e.kind() == ErrorKind::AddrInUse ||
e.kind() == ErrorKind::AddrNotAvailable,
"bad error: {} {:?}", e, e.kind()),
}
}
#[test]
fn listen_localhost() {
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let _t = thread::spawn(move || {
let mut stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
t!(stream.write(&[144]));
});
let mut stream = t!(listener.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 144);
}
#[test]
fn connect_loopback() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let host = match addr {
SocketAddr::V4(..) => "127.0.0.1",
SocketAddr::V6(..) => "::1",
};
let mut stream = t!(TcpStream::connect(&(host, addr.port())));
t!(stream.write(&[66]));
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 66);
})
}
#[test]
fn smoke_test() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
tx.send(t!(stream.local_addr())).unwrap();
});
let (mut stream, addr) = t!(acceptor.accept());
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
assert_eq!(addr, t!(rx.recv()));
})
}
#[test]
fn read_eof() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let _stream = t!(TcpStream::connect(&addr));
// Close
});
let mut stream = t!(acceptor.accept()).0;
let mut buf = [0];
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
let nread = t!(stream.read(&mut buf));
assert_eq!(nread, 0);
})
}
#[test]
fn write_close() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
drop(t!(TcpStream::connect(&addr)));
tx.send(()).unwrap();
});
let mut stream = t!(acceptor.accept()).0;
rx.recv().unwrap();
let buf = [0];
match stream.write(&buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionReset ||
e.kind() == ErrorKind::BrokenPipe ||
e.kind() == ErrorKind::ConnectionAborted,
"unknown error: {}", e);
}
}
})
}
#[test]
fn multiple_connect_serial() {
each_ip(&mut |addr| {
let max = 10;
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for _ in 0..max {
let mut stream = t!(TcpStream::connect(&addr));
t!(stream.write(&[99]));
}
});
for stream in acceptor.incoming().take(max) {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert_eq!(buf[0], 99);
}
})
}
#[test]
fn multiple_connect_interleaved_greedy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let acceptor = acceptor;
for (i, stream) in acceptor.incoming().enumerate().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == i as u8);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
// Connect again before writing
connect(i + 1, addr);
t!(stream.write(&[i as u8]));
});
t.join().ok().unwrap();
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule() {
const MAX: usize = 10;
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
for stream in acceptor.incoming().take(MAX) {
// Start another thread to handle the connection
let _t = thread::spawn(move|| {
let mut stream = t!(stream);
let mut buf = [0];
t!(stream.read(&mut buf));
assert!(buf[0] == 99);
});
}
});
connect(0, addr);
});
fn connect(i: usize, addr: SocketAddr) {
if i == MAX { return }
let t = thread::spawn(move|| {
let mut stream = t!(TcpStream::connect(&addr));
connect(i + 1, addr);
t!(stream.write(&[99]));
});
t.join().ok().unwrap();
}
}
#[test]
fn socket_and_peer_name() {
each_ip(&mut |addr| {
let listener = t!(TcpListener::bind(&addr));
let so_name = t!(listener.local_addr());
assert_eq!(addr, so_name);
let _t = thread::spawn(move|| {
t!(listener.accept());
});
let stream = t!(TcpStream::connect(&addr));
assert_eq!(addr, t!(stream.peer_addr()));
})
}
#[test]
fn partial_read() {
each_ip(&mut |addr| {
let (tx, rx) = channel();
let srv = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut cl = t!(srv.accept()).0;
cl.write(&[10]).unwrap();
let mut b = [0];
t!(cl.read(&mut b));
tx.send(()).unwrap();
});
let mut c = t!(TcpStream::connect(&addr));
let mut b = [0; 10];
assert_eq!(c.read(&mut b).unwrap(), 1);
t!(c.write(&[1]));
rx.recv().unwrap();
})
}
#[test]
fn double_bind() {
each_ip(&mut |addr| {
let _listener = t!(TcpListener::bind(&addr));
match TcpListener::bind(&addr) {
Ok(..) => panic!(),
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionRefused ||
e.kind() == ErrorKind::Other ||
e.kind() == ErrorKind::AddrInUse,
"unknown error: {} {:?}", e, e.kind());
}
}
})
}
#[test]
fn fast_rebind() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
t!(TcpStream::connect(&addr));
});
t!(acceptor.accept());
drop(acceptor);
t!(TcpListener::bind(&addr));
});
}
#[test]
fn tcp_clone_smoke() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 0];
assert_eq!(s.read(&mut buf).unwrap(), 1);
assert_eq!(buf[0], 1);
t!(s.write(&[2]));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
rx1.recv().unwrap();
t!(s2.write(&[1]));
tx2.send(()).unwrap();
});
tx1.send(()).unwrap();
let mut buf = [0, 0];
assert_eq!(s1.read(&mut buf).unwrap(), 1);
rx2.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_read() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel();
let tx2 = tx1.clone();
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
t!(s.write(&[1]));
rx.recv().unwrap();
t!(s.write(&[2]));
rx.recv().unwrap();
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
let mut buf = [0, 0];
t!(s2.read(&mut buf));
tx2.send(()).unwrap();
done.send(()).unwrap();
});
let mut buf = [0, 0];
t!(s1.read(&mut buf));
tx1.send(()).unwrap();
rx.recv().unwrap();
})
}
#[test]
fn tcp_clone_two_write() {
each_ip(&mut |addr| {
let acceptor = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut s = t!(TcpStream::connect(&addr));
let mut buf = [0, 1];
t!(s.read(&mut buf));
t!(s.read(&mut buf));
});
let mut s1 = t!(acceptor.accept()).0;
let s2 = t!(s1.try_clone());
let (done, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
t!(s2.write(&[1]));
done.send(()).unwrap();
});
t!(s1.write(&[2]));
rx.recv().unwrap();
})
}
#[test]
fn shutdown_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let _t = thread::spawn(move|| {
let mut c = t!(a.accept()).0;
let mut b = [0];
assert_eq!(c.read(&mut b).unwrap(), 0);
t!(c.write(&[1]));
});
let mut s = t!(TcpStream::connect(&addr));
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[1]).is_err());
let mut b = [0, 0];
assert_eq!(t!(s.read(&mut b)), 1);
assert_eq!(b[0], 1);
})
}
#[test]
fn close_readwrite_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let mut b = [0];
let mut s = t!(TcpStream::connect(&addr));
let mut s2 = t!(s.try_clone());
// closing should prevent reads/writes
t!(s.shutdown(Shutdown::Write));
assert!(s.write(&[0]).is_err());
t!(s.shutdown(Shutdown::Read));
assert_eq!(s.read(&mut b).unwrap(), 0);
// closing should affect previous handles
assert!(s2.write(&[0]).is_err());
assert_eq!(s2.read(&mut b).unwrap(), 0);
// closing should affect new handles
let mut s3 = t!(s.try_clone());
assert!(s3.write(&[0]).is_err());
assert_eq!(s3.read(&mut b).unwrap(), 0);
// make sure these don't die
let _ = s2.shutdown(Shutdown::Read);
let _ = s2.shutdown(Shutdown::Write);
let _ = s3.shutdown(Shutdown::Read);
let _ = s3.shutdown(Shutdown::Write);
drop(tx);
})
}
#[test]
fn close_read_wakes_up() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let (tx1, rx) = channel::<()>();
let _t = thread::spawn(move|| {
let _s = t!(a.accept());
let _ = rx.recv();
});
let s = t!(TcpStream::connect(&addr));
let s2 = t!(s.try_clone());
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let mut s2 = s2;
assert_eq!(t!(s2.read(&mut [0])), 0);
tx.send(()).unwrap();
});
// this should wake up the child thread
t!(s.shutdown(Shutdown::Read));
// this test will never finish if the child doesn't wake up
rx.recv().unwrap();
drop(tx1);
})
}
#[test]
fn clone_while_reading() {
each_ip(&mut |addr| {
let accept = t!(TcpListener::bind(&addr));
// Enqueue a thread to write to a socket
let (tx, rx) = channel();
let (txdone, rxdone) = channel();
let txdone2 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp = t!(TcpStream::connect(&addr));
rx.recv().unwrap();
t!(tcp.write(&[0]));
txdone2.send(()).unwrap();
});
// Spawn off a reading clone
let tcp = t!(accept.accept()).0;
let tcp2 = t!(tcp.try_clone());
let txdone3 = txdone.clone();
let _t = thread::spawn(move|| {
let mut tcp2 = tcp2;
t!(tcp2.read(&mut [0]));
txdone3.send(()).unwrap();
});
// Try to ensure that the reading clone is indeed reading
for _ in 0..50 {
thread::yield_now();
}
// clone the handle again while it's reading, then let it finish the
// read.
let _ = t!(tcp.try_clone());
tx.send(()).unwrap();
rxdone.recv().unwrap();
rxdone.recv().unwrap();
})
}
#[test]
fn clone_accept_smoke() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
t!(a.accept());
t!(a2.accept());
})
}
#[test]
fn clone_accept_concurrent() {
each_ip(&mut |addr| {
let a = t!(TcpListener::bind(&addr));
let a2 = t!(a.try_clone());
let (tx, rx) = channel();
let tx2 = tx.clone();
let _t = thread::spawn(move|| {
tx.send(t!(a.accept())).unwrap();
});
let _t = thread::spawn(move|| {
tx2.send(t!(a2.accept())).unwrap();
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
let _t = thread::spawn(move|| {
let _ = TcpStream::connect(&addr);
});
rx.recv().unwrap();
rx.recv().unwrap();
})
}
#[test]
fn debug() {
let name = if cfg!(windows) {"socket"} else {"fd"};
let socket_addr = next_test_ip4();
let listener = t!(TcpListener::bind(&socket_addr));
let listener_inner = listener.0.socket().as_inner();
let compare = format!("TcpListener {{ addr: {:?}, {}: {:?} }}",
socket_addr, name, listener_inner);
assert_eq!(format!("{:?}", listener), compare);
let stream = t!(TcpStream::connect(&("localhost",
socket_addr.port())));
let stream_inner = stream.0.socket().as_inner();
let compare = format!("TcpStream {{ addr: {:?}, \
peer: {:?}, {}: {:?} }}",
stream.local_addr().unwrap(),
stream.peer_addr().unwrap(),
name,
stream_inner);
assert_eq!(format!("{:?}", stream), compare);
}
// FIXME: re-enabled bitrig/openbsd tests once their socket timeout code
// no longer has rounding errors.
#[cfg_attr(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"), ignore)]
#[test]
fn timeouts() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
let dur = Duration::new(15410, 0);
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_read_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.read_timeout()));
assert_eq!(None, t!(stream.write_timeout()));
t!(stream.set_write_timeout(Some(dur)));
assert_eq!(Some(dur), t!(stream.write_timeout()));
t!(stream.set_read_timeout(None));
assert_eq!(None, t!(stream.read_timeout()));
t!(stream.set_write_timeout(None));
assert_eq!(None, t!(stream.write_timeout()));
drop(listener);
}
#[test]
fn test_read_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut buf = [0; 10];
let start = Instant::now();
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
assert!(start.elapsed() > Duration::from_millis(400));
drop(listener);
}
#[test]
fn test_read_with_timeout() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
let mut other_end = t!(listener.accept()).0;
t!(other_end.write_all(b"hello world"));
let mut buf = [0; 11];
t!(stream.read(&mut buf));
assert_eq!(b"hello world", &buf[..]);
let start = Instant::now();
let kind = stream.read(&mut buf).err().expect("expected error").kind();
assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
assert!(start.elapsed() > Duration::from_millis(400));
drop(listener);
}
#[test]
fn nodelay() {
let addr = next_test_ip4();
let _listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
assert_eq!(false, t!(stream.nodelay()));
t!(stream.set_nodelay(true));
assert_eq!(true, t!(stream.nodelay()));
t!(stream.set_nodelay(false));
assert_eq!(false, t!(stream.nodelay()));
}
#[test]
fn ttl() {
let ttl = 100;
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
t!(listener.set_ttl(ttl));
assert_eq!(ttl, t!(listener.ttl()));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_ttl(ttl));
assert_eq!(ttl, t!(stream.ttl()));
}
#[test]
fn set_nonblocking() {
let addr = next_test_ip4();
let listener = t!(TcpListener::bind(&addr));
t!(listener.set_nonblocking(true));
t!(listener.set_nonblocking(false));
let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
t!(stream.set_nonblocking(false));
t!(stream.set_nonblocking(true));
let mut buf = [0];
match stream.read(&mut buf) {
Ok(_) => panic!("expected error"),
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
Err(e) => panic!("unexpected error {}", e),
}
}
}
|
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Interfaces to the operating system provided random number
//! generators.
pub use self::imp::OsRng;
#[cfg(all(unix, not(target_os = "ios")))]
mod imp {
use prelude::v1::*;
use self::OsRngInner::*;
use fs::File;
use io;
use libc;
use mem;
use rand::Rng;
use rand::reader::ReaderRng;
use sys::os::errno;
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc")))]
fn getrandom(buf: &mut [u8]) -> libc::c_long {
extern "C" {
fn syscall(number: libc::c_long, ...) -> libc::c_long;
}
#[cfg(target_arch = "x86_64")]
const NR_GETRANDOM: libc::c_long = 318;
#[cfg(target_arch = "x86")]
const NR_GETRANDOM: libc::c_long = 355;
#[cfg(any(target_arch = "arm", target_arch = "aarch64", target_arch = "powerpc"))]
const NR_GETRANDOM: libc::c_long = 384;
unsafe {
syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), 0)
}
}
#[cfg(not(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc"))))]
fn getrandom(_buf: &mut [u8]) -> libc::c_long { -1 }
fn getrandom_fill_bytes(v: &mut [u8]) {
let mut read = 0;
let len = v.len();
while read < len {
let result = getrandom(&mut v[read..]);
if result == -1 {
let err = errno() as libc::c_int;
if err == libc::EINTR {
continue;
} else {
panic!("unexpected getrandom error: {}", err);
}
} else {
read += result as usize;
}
}
}
fn getrandom_next_u32() -> u32 {
let mut buf: [u8; 4] = [0; 4];
getrandom_fill_bytes(&mut buf);
unsafe { mem::transmute::<[u8; 4], u32>(buf) }
}
fn getrandom_next_u64() -> u64 {
let mut buf: [u8; 8] = [0; 8];
getrandom_fill_bytes(&mut buf);
unsafe { mem::transmute::<[u8; 8], u64>(buf) }
}
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc")))]
fn is_getrandom_available() -> bool {
use sync::atomic::{AtomicBool, Ordering};
use sync::Once;
static CHECKER: Once = Once::new();
static AVAILABLE: AtomicBool = AtomicBool::new(false);
CHECKER.call_once(|| {
let mut buf: [u8; 0] = [];
let result = getrandom(&mut buf);
let available = if result == -1 {
let err = io::Error::last_os_error().raw_os_error();
err != Some(libc::ENOSYS)
} else {
true
};
AVAILABLE.store(available, Ordering::Relaxed);
});
AVAILABLE.load(Ordering::Relaxed)
}
#[cfg(not(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc"))))]
fn is_getrandom_available() -> bool { false }
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
///
/// This does not block.
pub struct OsRng {
inner: OsRngInner,
}
enum OsRngInner {
OsGetrandomRng,
OsReaderRng(ReaderRng<File>),
}
impl OsRng {
/// Create a new `OsRng`.
pub fn new() -> io::Result<OsRng> {
if is_getrandom_available() {
return Ok(OsRng { inner: OsGetrandomRng });
}
let reader = try!(File::open("/dev/urandom"));
let reader_rng = ReaderRng::new(reader);
Ok(OsRng { inner: OsReaderRng(reader_rng) })
}
}
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
match self.inner {
OsGetrandomRng => getrandom_next_u32(),
OsReaderRng(ref mut rng) => rng.next_u32(),
}
}
fn next_u64(&mut self) -> u64 {
match self.inner {
OsGetrandomRng => getrandom_next_u64(),
OsReaderRng(ref mut rng) => rng.next_u64(),
}
}
fn fill_bytes(&mut self, v: &mut [u8]) {
match self.inner {
OsGetrandomRng => getrandom_fill_bytes(v),
OsReaderRng(ref mut rng) => rng.fill_bytes(v)
}
}
}
}
#[cfg(target_os = "ios")]
mod imp {
use prelude::v1::*;
use io;
use mem;
use rand::Rng;
use libc::{c_int, size_t};
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
///
/// This does not block.
pub struct OsRng {
// dummy field to ensure that this struct cannot be constructed outside
// of this module
_dummy: (),
}
#[repr(C)]
struct SecRandom;
#[allow(non_upper_case_globals)]
const kSecRandomDefault: *const SecRandom = 0 as *const SecRandom;
#[link(name = "Security", kind = "framework")]
extern "C" {
fn SecRandomCopyBytes(rnd: *const SecRandom,
count: size_t, bytes: *mut u8) -> c_int;
}
impl OsRng {
/// Create a new `OsRng`.
pub fn new() -> io::Result<OsRng> {
Ok(OsRng { _dummy: () })
}
}
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
let mut v = [0; 4];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn next_u64(&mut self) -> u64 {
let mut v = [0; 8];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn fill_bytes(&mut self, v: &mut [u8]) {
let ret = unsafe {
SecRandomCopyBytes(kSecRandomDefault, v.len() as size_t,
v.as_mut_ptr())
};
if ret == -1 {
panic!("couldn't generate random bytes: {}",
io::Error::last_os_error());
}
}
}
}
#[cfg(windows)]
mod imp {
use prelude::v1::*;
use io;
use mem;
use rand::Rng;
use libc::types::os::arch::extra::{LONG_PTR};
use libc::{DWORD, BYTE, LPCSTR, BOOL};
type HCRYPTPROV = LONG_PTR;
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
///
/// This does not block.
pub struct OsRng {
hcryptprov: HCRYPTPROV
}
const PROV_RSA_FULL: DWORD = 1;
const CRYPT_SILENT: DWORD = 64;
const CRYPT_VERIFYCONTEXT: DWORD = 0xF0000000;
#[allow(non_snake_case)]
#[link(name = "advapi32")]
extern "system" {
fn CryptAcquireContextA(phProv: *mut HCRYPTPROV,
pszContainer: LPCSTR,
pszProvider: LPCSTR,
dwProvType: DWORD,
dwFlags: DWORD) -> BOOL;
fn CryptGenRandom(hProv: HCRYPTPROV,
dwLen: DWORD,
pbBuffer: *mut BYTE) -> BOOL;
fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) -> BOOL;
}
impl OsRng {
/// Create a new `OsRng`.
pub fn new() -> io::Result<OsRng> {
let mut hcp = 0;
let ret = unsafe {
CryptAcquireContextA(&mut hcp, 0 as LPCSTR, 0 as LPCSTR,
PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT | CRYPT_SILENT)
};
if ret == 0 {
Err(io::Error::last_os_error())
} else {
Ok(OsRng { hcryptprov: hcp })
}
}
}
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
let mut v = [0; 4];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn next_u64(&mut self) -> u64 {
let mut v = [0; 8];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn fill_bytes(&mut self, v: &mut [u8]) {
let ret = unsafe {
CryptGenRandom(self.hcryptprov, v.len() as DWORD,
v.as_mut_ptr())
};
if ret == 0 {
panic!("couldn't generate random bytes: {}",
io::Error::last_os_error());
}
}
}
impl Drop for OsRng {
fn drop(&mut self) {
let ret = unsafe {
CryptReleaseContext(self.hcryptprov, 0)
};
if ret == 0 {
panic!("couldn't release context: {}",
io::Error::last_os_error());
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use sync::mpsc::channel;
use rand::Rng;
use super::OsRng;
use thread;
#[test]
fn test_os_rng() {
let mut r = OsRng::new().unwrap();
r.next_u32();
r.next_u64();
let mut v = [0; 1000];
r.fill_bytes(&mut v);
}
#[test]
fn test_os_rng_tasks() {
let mut txs = vec!();
for _ in 0..20 {
let (tx, rx) = channel();
txs.push(tx);
thread::spawn(move|| {
// wait until all the threads are ready to go.
rx.recv().unwrap();
// deschedule to attempt to interleave things as much
// as possible (XXX: is this a good test?)
let mut r = OsRng::new().unwrap();
thread::yield_now();
let mut v = [0; 1000];
for _ in 0..100 {
r.next_u32();
thread::yield_now();
r.next_u64();
thread::yield_now();
r.fill_bytes(&mut v);
thread::yield_now();
}
});
}
// start all the threads
for tx in &txs {
tx.send(()).unwrap();
}
}
}
Auto merge of #27267 - tamird:fix-ios-improper-ctypes, r=alexcrichton
Fixes #27263.
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Interfaces to the operating system provided random number
//! generators.
pub use self::imp::OsRng;
#[cfg(all(unix, not(target_os = "ios")))]
mod imp {
use prelude::v1::*;
use self::OsRngInner::*;
use fs::File;
use io;
use libc;
use mem;
use rand::Rng;
use rand::reader::ReaderRng;
use sys::os::errno;
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc")))]
fn getrandom(buf: &mut [u8]) -> libc::c_long {
extern "C" {
fn syscall(number: libc::c_long, ...) -> libc::c_long;
}
#[cfg(target_arch = "x86_64")]
const NR_GETRANDOM: libc::c_long = 318;
#[cfg(target_arch = "x86")]
const NR_GETRANDOM: libc::c_long = 355;
#[cfg(any(target_arch = "arm", target_arch = "aarch64", target_arch = "powerpc"))]
const NR_GETRANDOM: libc::c_long = 384;
unsafe {
syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), 0)
}
}
#[cfg(not(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc"))))]
fn getrandom(_buf: &mut [u8]) -> libc::c_long { -1 }
fn getrandom_fill_bytes(v: &mut [u8]) {
let mut read = 0;
let len = v.len();
while read < len {
let result = getrandom(&mut v[read..]);
if result == -1 {
let err = errno() as libc::c_int;
if err == libc::EINTR {
continue;
} else {
panic!("unexpected getrandom error: {}", err);
}
} else {
read += result as usize;
}
}
}
fn getrandom_next_u32() -> u32 {
let mut buf: [u8; 4] = [0; 4];
getrandom_fill_bytes(&mut buf);
unsafe { mem::transmute::<[u8; 4], u32>(buf) }
}
fn getrandom_next_u64() -> u64 {
let mut buf: [u8; 8] = [0; 8];
getrandom_fill_bytes(&mut buf);
unsafe { mem::transmute::<[u8; 8], u64>(buf) }
}
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc")))]
fn is_getrandom_available() -> bool {
use sync::atomic::{AtomicBool, Ordering};
use sync::Once;
static CHECKER: Once = Once::new();
static AVAILABLE: AtomicBool = AtomicBool::new(false);
CHECKER.call_once(|| {
let mut buf: [u8; 0] = [];
let result = getrandom(&mut buf);
let available = if result == -1 {
let err = io::Error::last_os_error().raw_os_error();
err != Some(libc::ENOSYS)
} else {
true
};
AVAILABLE.store(available, Ordering::Relaxed);
});
AVAILABLE.load(Ordering::Relaxed)
}
#[cfg(not(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "powerpc"))))]
fn is_getrandom_available() -> bool { false }
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
///
/// This does not block.
pub struct OsRng {
inner: OsRngInner,
}
enum OsRngInner {
OsGetrandomRng,
OsReaderRng(ReaderRng<File>),
}
impl OsRng {
/// Create a new `OsRng`.
pub fn new() -> io::Result<OsRng> {
if is_getrandom_available() {
return Ok(OsRng { inner: OsGetrandomRng });
}
let reader = try!(File::open("/dev/urandom"));
let reader_rng = ReaderRng::new(reader);
Ok(OsRng { inner: OsReaderRng(reader_rng) })
}
}
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
match self.inner {
OsGetrandomRng => getrandom_next_u32(),
OsReaderRng(ref mut rng) => rng.next_u32(),
}
}
fn next_u64(&mut self) -> u64 {
match self.inner {
OsGetrandomRng => getrandom_next_u64(),
OsReaderRng(ref mut rng) => rng.next_u64(),
}
}
fn fill_bytes(&mut self, v: &mut [u8]) {
match self.inner {
OsGetrandomRng => getrandom_fill_bytes(v),
OsReaderRng(ref mut rng) => rng.fill_bytes(v)
}
}
}
}
#[cfg(target_os = "ios")]
mod imp {
use prelude::v1::*;
use io;
use mem;
use rand::Rng;
use libc::{c_int, c_void, size_t};
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
///
/// This does not block.
pub struct OsRng {
// dummy field to ensure that this struct cannot be constructed outside
// of this module
_dummy: (),
}
// Fake definition; this is actually a struct, but we don't use the
// contents here.
type SecRandom = c_void;
#[allow(non_upper_case_globals)]
const kSecRandomDefault: *const SecRandom = 0 as *const SecRandom;
#[link(name = "Security", kind = "framework")]
extern "C" {
fn SecRandomCopyBytes(rnd: *const SecRandom,
count: size_t, bytes: *mut u8) -> c_int;
}
impl OsRng {
/// Create a new `OsRng`.
pub fn new() -> io::Result<OsRng> {
Ok(OsRng { _dummy: () })
}
}
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
let mut v = [0; 4];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn next_u64(&mut self) -> u64 {
let mut v = [0; 8];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn fill_bytes(&mut self, v: &mut [u8]) {
let ret = unsafe {
SecRandomCopyBytes(kSecRandomDefault, v.len() as size_t,
v.as_mut_ptr())
};
if ret == -1 {
panic!("couldn't generate random bytes: {}",
io::Error::last_os_error());
}
}
}
}
#[cfg(windows)]
mod imp {
use prelude::v1::*;
use io;
use mem;
use rand::Rng;
use libc::types::os::arch::extra::{LONG_PTR};
use libc::{DWORD, BYTE, LPCSTR, BOOL};
type HCRYPTPROV = LONG_PTR;
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
///
/// This does not block.
pub struct OsRng {
hcryptprov: HCRYPTPROV
}
const PROV_RSA_FULL: DWORD = 1;
const CRYPT_SILENT: DWORD = 64;
const CRYPT_VERIFYCONTEXT: DWORD = 0xF0000000;
#[allow(non_snake_case)]
#[link(name = "advapi32")]
extern "system" {
fn CryptAcquireContextA(phProv: *mut HCRYPTPROV,
pszContainer: LPCSTR,
pszProvider: LPCSTR,
dwProvType: DWORD,
dwFlags: DWORD) -> BOOL;
fn CryptGenRandom(hProv: HCRYPTPROV,
dwLen: DWORD,
pbBuffer: *mut BYTE) -> BOOL;
fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) -> BOOL;
}
impl OsRng {
/// Create a new `OsRng`.
pub fn new() -> io::Result<OsRng> {
let mut hcp = 0;
let ret = unsafe {
CryptAcquireContextA(&mut hcp, 0 as LPCSTR, 0 as LPCSTR,
PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT | CRYPT_SILENT)
};
if ret == 0 {
Err(io::Error::last_os_error())
} else {
Ok(OsRng { hcryptprov: hcp })
}
}
}
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
let mut v = [0; 4];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn next_u64(&mut self) -> u64 {
let mut v = [0; 8];
self.fill_bytes(&mut v);
unsafe { mem::transmute(v) }
}
fn fill_bytes(&mut self, v: &mut [u8]) {
let ret = unsafe {
CryptGenRandom(self.hcryptprov, v.len() as DWORD,
v.as_mut_ptr())
};
if ret == 0 {
panic!("couldn't generate random bytes: {}",
io::Error::last_os_error());
}
}
}
impl Drop for OsRng {
fn drop(&mut self) {
let ret = unsafe {
CryptReleaseContext(self.hcryptprov, 0)
};
if ret == 0 {
panic!("couldn't release context: {}",
io::Error::last_os_error());
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use sync::mpsc::channel;
use rand::Rng;
use super::OsRng;
use thread;
#[test]
fn test_os_rng() {
let mut r = OsRng::new().unwrap();
r.next_u32();
r.next_u64();
let mut v = [0; 1000];
r.fill_bytes(&mut v);
}
#[test]
fn test_os_rng_tasks() {
let mut txs = vec!();
for _ in 0..20 {
let (tx, rx) = channel();
txs.push(tx);
thread::spawn(move|| {
// wait until all the threads are ready to go.
rx.recv().unwrap();
// deschedule to attempt to interleave things as much
// as possible (XXX: is this a good test?)
let mut r = OsRng::new().unwrap();
thread::yield_now();
let mut v = [0; 1000];
for _ in 0..100 {
r.next_u32();
thread::yield_now();
r.next_u64();
thread::yield_now();
r.fill_bytes(&mut v);
thread::yield_now();
}
});
}
// start all the threads
for tx in &txs {
tx.send(()).unwrap();
}
}
}
|
use crate::commit_line::format_commit_line;
use crate::io::ErrorKind;
use std::io::{self, BufWriter, Write};
use std::process::exit;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::thread::{self, JoinHandle};
use crate::{constants::*, refiner};
use regex::Regex;
use threadpool::ThreadPool;
const HUNK_HEADER: &str = "\x1b[36m"; // Cyan
lazy_static! {
static ref STATIC_HEADER_PREFIXES: Vec<(&'static str, &'static str)> = vec![
("diff ", FAINT),
("index ", FAINT),
("Binary files ", BOLD),
("copy from ", FAINT),
("copy to ", BOLD),
("rename from ", FAINT),
("rename to ", BOLD),
("similarity index ", FAINT),
("new file mode ", FAINT),
("deleted file mode ", FAINT),
("--- /dev/null", FAINT),
("+++ /dev/null", FAINT),
];
static ref ANSI_COLOR_REGEX: Regex = Regex::new("\x1b[^m]*m").unwrap();
}
#[must_use]
fn get_fixed_highlight(line: &str) -> Option<&str> {
for static_header_prefix in STATIC_HEADER_PREFIXES.iter() {
let prefix = static_header_prefix.0;
if line.starts_with(prefix) {
return Some(static_header_prefix.1);
}
}
return None;
}
fn print<W: io::Write + Send>(stream: &mut BufWriter<W>, text: &str) {
if let Err(error) = stream.write_all(text.as_bytes()) {
if error.kind() == ErrorKind::BrokenPipe {
// This is fine, somebody probably just quit their pager before it
// was done reading our output.
exit(0);
}
panic!("Error writing diff to pager: {:?}", error);
}
}
/**
A StringFuture can perform diffing in a background thread.
Doing get() on a future that isn't done yet will block until the result is
available.
*/
struct StringFuture {
// This field is only valid if we're done with the result_receiver (next
// field)
result: String,
// If available, get() will await a result on this receiver, then populate
// the result field and return it
result_receiver: Option<Receiver<String>>,
}
impl StringFuture {
/// Create an already-finished future
pub fn from_string(result: String) -> StringFuture {
return StringFuture {
result,
result_receiver: None,
};
}
/// Call get() to get the result of this diff
pub fn from_oldnew(
old_text: String,
new_text: String,
thread_pool: &ThreadPool,
) -> StringFuture {
// Create a String channel
let (sender, receiver): (SyncSender<String>, Receiver<String>) = sync_channel(1);
// Start diffing in a thread
thread_pool.execute(move || {
let mut result = String::new();
for line in refiner::format(&old_text, &new_text) {
result.push_str(&line);
result.push('\n');
}
// Done, channel the result!
sender.send(result).unwrap();
});
return StringFuture {
result: "".to_string(),
result_receiver: Some(receiver),
};
}
#[allow(clippy::wrong_self_convention)]
pub fn is_empty(&mut self) -> bool {
return self.get().is_empty();
}
pub fn get(&mut self) -> &str {
// If the result is still pending...
if let Some(receiver) = &self.result_receiver {
// ... wait for it
self.result = receiver.recv().unwrap();
self.result_receiver = None;
}
return &self.result;
}
}
/**
The way this thing works from the outside is that you initialize it with an
output stream, you pass it one line of input at a time, and it writes
formatted lines to the output stream.
From the inside, it will collect blocks of either diff lines or not-diff-lines.
The not-diff-lines blocks will be enqueued for printing by the printing thread.
The diff lines blocks will also be enqueued for printing, but the actual diffing
will happen in background threads.
*/
pub struct LineCollector {
old_text: String,
new_text: String,
plain_text: String,
diff_seen: bool,
consumer_thread: Option<JoinHandle<()>>,
diffing_threads: ThreadPool,
// FIXME: I'd rather have had a SyncSender of some trait here. That would
// enable us to have two separate result implementations, one which just
// returns a string and another that does a background computation first.
// But I failed to figure out how when I tried, more Googling needed!
queue_putter: SyncSender<StringFuture>,
}
impl Drop for LineCollector {
fn drop(&mut self) {
// Flush any outstanding lines. This can be done in any order, at most
// one of them is going to do anything anyway.
self.drain_oldnew();
self.drain_plain();
// Tell the consumer thread to drain and quit. Sending an empty string
// like this is the secret handshake for requesting a shutdown.
self.queue_putter
.send(StringFuture::from_string("".to_string()))
.unwrap();
// Wait for the consumer thread to finish
// https://stackoverflow.com/q/57670145/473672
self.consumer_thread.take().map(JoinHandle::join);
}
}
impl LineCollector {
pub fn new<W: io::Write + Send + 'static>(output: W) -> LineCollector {
// This is how many entries we can look ahead. An "entry" in this case
// being either a plain text section or an oldnew section.
//
// Benchmark timings with different multipliers on an 8 logical cores
// machine with a 6.6M lines / 208MB diff:
//
// 500x => 5.68s <-- Not much better than 100x
// 100x => 5.71s <-- Somewhat better than 50x
// 50x => 5.98s
// 10x >= 7.41s <-- Much worse than 50x
let queue_size = num_cpus::get() * 100;
// Allocate a queue where we can push our futures to the consumer thread
let (queue_putter, queue_getter): (SyncSender<StringFuture>, Receiver<StringFuture>) =
sync_channel(queue_size);
// This thread takes futures and prints their results
let thread_builder = thread::Builder::new().name("Output Printer Thread".to_string());
let consumer = thread_builder
.spawn(move || {
let mut output = BufWriter::new(output);
loop {
if let Ok(mut print_me) = queue_getter.recv() {
if print_me.is_empty() {
// Secret handshake received, done!
break;
}
print(&mut output, print_me.get());
}
}
})
.unwrap();
return LineCollector {
old_text: String::from(""),
new_text: String::from(""),
plain_text: String::from(""),
diff_seen: false,
consumer_thread: Some(consumer),
diffing_threads: ThreadPool::new(num_cpus::get()),
queue_putter,
};
}
fn drain_oldnew(&mut self) {
if self.old_text.is_empty() && self.new_text.is_empty() {
return;
}
self.queue_putter
.send(StringFuture::from_oldnew(
self.old_text.clone(),
self.new_text.clone(),
&self.diffing_threads,
))
.unwrap();
self.old_text.clear();
self.new_text.clear();
}
fn drain_plain(&mut self) {
if self.plain_text.is_empty() {
return;
}
// Enqueue an already-resolved future
self.queue_putter
.send(StringFuture::from_string(String::from(&self.plain_text)))
.unwrap();
self.plain_text.clear();
}
fn consume_plain_line(&mut self, line: &str) {
self.drain_oldnew();
self.plain_text.push_str(line);
self.plain_text.push('\n');
}
/// Like consume_plain_line(), but without outputting any trailing linefeed.
fn consume_plain_linepart(&mut self, linepart: &str) {
self.drain_oldnew();
self.plain_text.push_str(linepart);
}
fn consume_old_line(&mut self, line: &str) {
self.drain_plain();
self.old_text.push_str(&line[1..]);
self.old_text.push('\n');
}
fn consume_new_line(&mut self, line: &str) {
self.drain_plain();
self.new_text.push_str(&line[1..]);
self.new_text.push('\n');
}
fn consume_no_eof_newline_marker(&mut self) {
if !self.new_text.is_empty() {
// New section comes after old, so if we get in here it's a new
// section that doesn't end in a newline. Remove its trailing
// newline.
assert!(self.new_text.pop().unwrap() == '\n');
return;
}
if !self.old_text.is_empty() {
// Old text doesn't end in a newline, remove its trailing newline
assert!(self.old_text.pop().unwrap() == '\n');
return;
}
// It's a piece of unchanged text that doesn't end in a newline, just
// consume the colorized marker as plain text
self.consume_plain_line(&format!(
"{}{}{}",
NO_EOF_NEWLINE_COLOR, &NO_EOF_NEWLINE_MARKER, NORMAL
))
}
pub fn consume_plusminus_header(&mut self, line: &str) {
self.consume_plain_linepart(BOLD);
if let Some(last_tab_index) = line.rfind('\t') {
self.consume_plain_linepart(&line[..last_tab_index]);
// When I ran plain "diff" (no git involved), this trailing part
// contained very precise file timestamps. I don't think those
// provide much value, so let's faint them out.
self.consume_plain_linepart(FAINT);
self.consume_plain_linepart(&line[last_tab_index..]);
} else {
self.consume_plain_linepart(line);
}
self.consume_plain_line(NORMAL);
}
fn consume_hunk_header(&mut self, line: &str) {
self.consume_plain_linepart(HUNK_HEADER);
if let Some(second_atat_index) = line.find(" @@ ") {
// Highlight the function name
self.consume_plain_linepart(FAINT);
self.consume_plain_linepart(&line[..(second_atat_index + 4)]);
self.consume_plain_linepart(BOLD);
self.consume_plain_linepart(&line[(second_atat_index + 4)..]);
} else {
self.consume_plain_linepart(line);
}
self.consume_plain_line(NORMAL);
}
fn without_ansi_escape_codes(input: &'_ str) -> std::borrow::Cow<'_, str> {
return ANSI_COLOR_REGEX.replace_all(input, "");
}
pub fn consume_line(&mut self, line: String) {
// Strip out incoming ANSI formatting. This enables us to highlight
// already-colored input.
let line = LineCollector::without_ansi_escape_codes(&line);
if line.starts_with("diff") {
self.diff_seen = true;
}
if let Some(fixed_highlight) = get_fixed_highlight(&line) {
self.consume_plain_linepart(fixed_highlight);
self.consume_plain_linepart(&line);
self.consume_plain_line(NORMAL); // consume_plain_line() will add a linefeed to the output
return;
}
if line.starts_with("commit") {
self.consume_plain_line(&format_commit_line(&line, self.diff_seen));
return;
}
if line.starts_with("---") || line.starts_with("+++") {
self.consume_plusminus_header(&line);
return;
}
if line.starts_with("@@ ") {
self.consume_hunk_header(&line);
return;
}
if line.is_empty() {
self.consume_plain_line("");
return;
}
if line.starts_with('-') {
self.consume_old_line(&line);
return;
}
if line.starts_with('+') {
self.consume_new_line(&line);
return;
}
if line == NO_EOF_NEWLINE_MARKER {
self.consume_no_eof_newline_marker();
return;
}
self.consume_plain_line(&line);
}
}
Add a (failing) test
use crate::commit_line::format_commit_line;
use crate::io::ErrorKind;
use std::io::{self, BufWriter, Write};
use std::process::exit;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::thread::{self, JoinHandle};
use crate::{constants::*, refiner};
use regex::Regex;
use threadpool::ThreadPool;
const HUNK_HEADER: &str = "\x1b[36m"; // Cyan
lazy_static! {
static ref STATIC_HEADER_PREFIXES: Vec<(&'static str, &'static str)> = vec![
("diff ", FAINT),
("index ", FAINT),
("Binary files ", BOLD),
("copy from ", FAINT),
("copy to ", BOLD),
("rename from ", FAINT),
("rename to ", BOLD),
("similarity index ", FAINT),
("new file mode ", FAINT),
("deleted file mode ", FAINT),
("--- /dev/null", FAINT),
("+++ /dev/null", FAINT),
];
static ref ANSI_COLOR_REGEX: Regex = Regex::new("\x1b[^m]*m").unwrap();
}
#[must_use]
fn get_fixed_highlight(line: &str) -> Option<&str> {
for static_header_prefix in STATIC_HEADER_PREFIXES.iter() {
let prefix = static_header_prefix.0;
if line.starts_with(prefix) {
return Some(static_header_prefix.1);
}
}
return None;
}
fn print<W: io::Write + Send>(stream: &mut BufWriter<W>, text: &str) {
if let Err(error) = stream.write_all(text.as_bytes()) {
if error.kind() == ErrorKind::BrokenPipe {
// This is fine, somebody probably just quit their pager before it
// was done reading our output.
exit(0);
}
panic!("Error writing diff to pager: {:?}", error);
}
}
/**
A StringFuture can perform diffing in a background thread.
Doing get() on a future that isn't done yet will block until the result is
available.
*/
struct StringFuture {
// This field is only valid if we're done with the result_receiver (next
// field)
result: String,
// If available, get() will await a result on this receiver, then populate
// the result field and return it
result_receiver: Option<Receiver<String>>,
}
impl StringFuture {
/// Create an already-finished future
pub fn from_string(result: String) -> StringFuture {
return StringFuture {
result,
result_receiver: None,
};
}
/// Call get() to get the result of this diff
pub fn from_oldnew(
old_text: String,
new_text: String,
thread_pool: &ThreadPool,
) -> StringFuture {
// Create a String channel
let (sender, receiver): (SyncSender<String>, Receiver<String>) = sync_channel(1);
// Start diffing in a thread
thread_pool.execute(move || {
let mut result = String::new();
for line in refiner::format(&old_text, &new_text) {
result.push_str(&line);
result.push('\n');
}
// Done, channel the result!
sender.send(result).unwrap();
});
return StringFuture {
result: "".to_string(),
result_receiver: Some(receiver),
};
}
#[allow(clippy::wrong_self_convention)]
pub fn is_empty(&mut self) -> bool {
return self.get().is_empty();
}
pub fn get(&mut self) -> &str {
// If the result is still pending...
if let Some(receiver) = &self.result_receiver {
// ... wait for it
self.result = receiver.recv().unwrap();
self.result_receiver = None;
}
return &self.result;
}
}
/**
The way this thing works from the outside is that you initialize it with an
output stream, you pass it one line of input at a time, and it writes
formatted lines to the output stream.
From the inside, it will collect blocks of either diff lines or not-diff-lines.
The not-diff-lines blocks will be enqueued for printing by the printing thread.
The diff lines blocks will also be enqueued for printing, but the actual diffing
will happen in background threads.
*/
pub struct LineCollector {
old_text: String,
new_text: String,
plain_text: String,
diff_seen: bool,
consumer_thread: Option<JoinHandle<()>>,
diffing_threads: ThreadPool,
// FIXME: I'd rather have had a SyncSender of some trait here. That would
// enable us to have two separate result implementations, one which just
// returns a string and another that does a background computation first.
// But I failed to figure out how when I tried, more Googling needed!
queue_putter: SyncSender<StringFuture>,
}
impl Drop for LineCollector {
fn drop(&mut self) {
// Flush any outstanding lines. This can be done in any order, at most
// one of them is going to do anything anyway.
self.drain_oldnew();
self.drain_plain();
// Tell the consumer thread to drain and quit. Sending an empty string
// like this is the secret handshake for requesting a shutdown.
self.queue_putter
.send(StringFuture::from_string("".to_string()))
.unwrap();
// Wait for the consumer thread to finish
// https://stackoverflow.com/q/57670145/473672
self.consumer_thread.take().map(JoinHandle::join);
}
}
impl LineCollector {
pub fn new<W: io::Write + Send + 'static>(output: W) -> LineCollector {
// This is how many entries we can look ahead. An "entry" in this case
// being either a plain text section or an oldnew section.
//
// Benchmark timings with different multipliers on an 8 logical cores
// machine with a 6.6M lines / 208MB diff:
//
// 500x => 5.68s <-- Not much better than 100x
// 100x => 5.71s <-- Somewhat better than 50x
// 50x => 5.98s
// 10x >= 7.41s <-- Much worse than 50x
let queue_size = num_cpus::get() * 100;
// Allocate a queue where we can push our futures to the consumer thread
let (queue_putter, queue_getter): (SyncSender<StringFuture>, Receiver<StringFuture>) =
sync_channel(queue_size);
// This thread takes futures and prints their results
let thread_builder = thread::Builder::new().name("Output Printer Thread".to_string());
let consumer = thread_builder
.spawn(move || {
let mut output = BufWriter::new(output);
loop {
if let Ok(mut print_me) = queue_getter.recv() {
if print_me.is_empty() {
// Secret handshake received, done!
break;
}
print(&mut output, print_me.get());
}
}
})
.unwrap();
return LineCollector {
old_text: String::from(""),
new_text: String::from(""),
plain_text: String::from(""),
diff_seen: false,
consumer_thread: Some(consumer),
diffing_threads: ThreadPool::new(num_cpus::get()),
queue_putter,
};
}
fn drain_oldnew(&mut self) {
if self.old_text.is_empty() && self.new_text.is_empty() {
return;
}
self.queue_putter
.send(StringFuture::from_oldnew(
self.old_text.clone(),
self.new_text.clone(),
&self.diffing_threads,
))
.unwrap();
self.old_text.clear();
self.new_text.clear();
}
fn drain_plain(&mut self) {
if self.plain_text.is_empty() {
return;
}
// Enqueue an already-resolved future
self.queue_putter
.send(StringFuture::from_string(String::from(&self.plain_text)))
.unwrap();
self.plain_text.clear();
}
fn consume_plain_line(&mut self, line: &str) {
self.drain_oldnew();
self.plain_text.push_str(line);
self.plain_text.push('\n');
}
/// Like consume_plain_line(), but without outputting any trailing linefeed.
fn consume_plain_linepart(&mut self, linepart: &str) {
self.drain_oldnew();
self.plain_text.push_str(linepart);
}
fn consume_old_line(&mut self, line: &str) {
self.drain_plain();
self.old_text.push_str(&line[1..]);
self.old_text.push('\n');
}
fn consume_new_line(&mut self, line: &str) {
self.drain_plain();
self.new_text.push_str(&line[1..]);
self.new_text.push('\n');
}
fn consume_no_eof_newline_marker(&mut self) {
if !self.new_text.is_empty() {
// New section comes after old, so if we get in here it's a new
// section that doesn't end in a newline. Remove its trailing
// newline.
assert!(self.new_text.pop().unwrap() == '\n');
return;
}
if !self.old_text.is_empty() {
// Old text doesn't end in a newline, remove its trailing newline
assert!(self.old_text.pop().unwrap() == '\n');
return;
}
// It's a piece of unchanged text that doesn't end in a newline, just
// consume the colorized marker as plain text
self.consume_plain_line(&format!(
"{}{}{}",
NO_EOF_NEWLINE_COLOR, &NO_EOF_NEWLINE_MARKER, NORMAL
))
}
pub fn consume_plusminus_header(&mut self, line: &str) {
self.consume_plain_linepart(BOLD);
if let Some(last_tab_index) = line.rfind('\t') {
self.consume_plain_linepart(&line[..last_tab_index]);
// When I ran plain "diff" (no git involved), this trailing part
// contained very precise file timestamps. I don't think those
// provide much value, so let's faint them out.
self.consume_plain_linepart(FAINT);
self.consume_plain_linepart(&line[last_tab_index..]);
} else {
self.consume_plain_linepart(line);
}
self.consume_plain_line(NORMAL);
}
fn consume_hunk_header(&mut self, line: &str) {
self.consume_plain_linepart(HUNK_HEADER);
if let Some(second_atat_index) = line.find(" @@ ") {
// Highlight the function name
self.consume_plain_linepart(FAINT);
self.consume_plain_linepart(&line[..(second_atat_index + 4)]);
self.consume_plain_linepart(BOLD);
self.consume_plain_linepart(&line[(second_atat_index + 4)..]);
} else {
self.consume_plain_linepart(line);
}
self.consume_plain_line(NORMAL);
}
fn without_ansi_escape_codes(input: &'_ str) -> std::borrow::Cow<'_, str> {
return ANSI_COLOR_REGEX.replace_all(input, "");
}
pub fn consume_line(&mut self, line: String) {
// Strip out incoming ANSI formatting. This enables us to highlight
// already-colored input.
let line = LineCollector::without_ansi_escape_codes(&line);
if line.starts_with("diff") {
self.diff_seen = true;
}
if let Some(fixed_highlight) = get_fixed_highlight(&line) {
self.consume_plain_linepart(fixed_highlight);
self.consume_plain_linepart(&line);
self.consume_plain_line(NORMAL); // consume_plain_line() will add a linefeed to the output
return;
}
if line.starts_with("commit") {
self.consume_plain_line(&format_commit_line(&line, self.diff_seen));
return;
}
if line.starts_with("---") || line.starts_with("+++") {
self.consume_plusminus_header(&line);
return;
}
if line.starts_with("@@ ") {
self.consume_hunk_header(&line);
return;
}
if line.is_empty() {
self.consume_plain_line("");
return;
}
if line.starts_with('-') {
self.consume_old_line(&line);
return;
}
if line.starts_with('+') {
self.consume_new_line(&line);
return;
}
if line == NO_EOF_NEWLINE_MARKER {
self.consume_no_eof_newline_marker();
return;
}
self.consume_plain_line(&line);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(test)]
use pretty_assertions::assert_eq;
#[test]
fn test_non_sgr() {
assert_eq!(
LineCollector::without_ansi_escape_codes("hel\x1b[0Klo"),
"hello"
);
}
#[test]
fn test_sgr() {
assert_eq!(
LineCollector::without_ansi_escape_codes("hel\x1b[33mlo"),
"hello"
);
}
}
|
use std::collections::HashMap;
use std::fmt;
/// Data associated with a single metadata block.
#[derive(Debug)]
pub struct Metadata {
/// Marks whether the current metadata block is the last.
is_last: bool,
/// The length, in bytes, of the block being parsed. This does not include
/// the metadata block header.
length: u32,
/// Block data containing one of the eight different types of metadata.
pub data: Data,
}
impl Metadata {
pub fn new(is_last: bool, length: u32, data: Data) -> Self {
Metadata {
is_last: is_last,
length: length,
data: data,
}
}
}
/// General enum that hold all the different metadata block data.
#[derive(Debug, PartialEq, Eq)]
pub enum Data {
/// Information regarding the entire audio stream.
StreamInfo(StreamInfo),
/// Block that represents a number of padded bytes.
Padding(u32),
/// Data used by third-party applications.
Application(Application),
/// Table of multiple points to seek, or skip, to within the FLAC file.
SeekTable(Vec<SeekPoint>),
/// Stores human-readable name/value pairs.
VorbisComment(VorbisComment),
/// Stores cue information
CueSheet(CueSheet),
/// Stores pictures associated with the FLAC file.
Picture(Picture),
/// A type of block data that isn't know or doesn't match the type above.
Unknown(Vec<u8>),
}
/// Information regarding the entire audio stream.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StreamInfo {
/// Minimum block size, in samples, used in the stream.
pub min_block_size: u16,
/// Maximum block size, in samples, used in the stream.
pub max_block_size: u16,
/// Minimum frame size, in bytes, used in the stream. May be zero to imply
/// the value isn't know.
pub min_frame_size: u32,
/// Maximum frame size, in bytes, used in the stream. May be zero to imply
/// the value isn't know.
pub max_frame_size: u32,
/// Sample rate in hertz (Hz).
pub sample_rate: u32,
/// Number of channels. FLAC supports one to eight channel.
pub channels: u8,
/// Bits per sample. FLAC supports four to thirty-two bits per sample.
pub bits_per_sample: u8,
/// Total samples in the stream. A value of zero means the number is
/// unknown.
pub total_samples: u64,
/// MD5 signature of the unencoded audio data.
pub md5_sum: [u8; 16],
}
impl StreamInfo {
pub fn new() -> StreamInfo {
StreamInfo {
min_block_size: 0,
max_block_size: 0,
min_frame_size: 0,
max_frame_size: 0,
sample_rate: 0,
channels: 0,
bits_per_sample: 0,
total_samples: 0,
md5_sum: [0; 16],
}
}
#[inline]
pub fn is_varied_block_size(&self) -> bool {
self.min_block_size != self.max_block_size
}
#[inline]
pub fn is_fixed_block_size(&self) -> bool {
self.min_block_size == self.max_block_size
}
}
/// Data used by third-party applications.
#[derive(Debug, PartialEq, Eq)]
pub struct Application {
/// Registered application ID.
pub id: String,
/// Data used by the third-party application.
pub data: Vec<u8>,
}
/// Seek, or skip, to a point within the FLAC file.
#[derive(Debug, PartialEq, Eq)]
pub struct SeekPoint {
/// Sample number of the first sample in the target frame.
pub sample_number: u64,
/// Byte offset of the target frame's header.
pub stream_offset: u64,
/// Number of samples in the target frame.
pub frame_samples: u16,
}
/// Stores human-readable name/value pairs.
#[derive(Debug, PartialEq, Eq)]
pub struct VorbisComment {
/// Vendor name.
pub vendor_string: String,
/// Comments associated with a name, or category, followed by it's
/// contents.
pub comments: HashMap<String, String>,
}
/// Stores cue information.
///
/// Generally for storing information from Compact Disk Digital Audio, but
/// can be used as a cueing mechanism for playback.
#[derive(Debug, PartialEq, Eq)]
pub struct CueSheet {
/// Media catalog number.
pub media_catalog_number: String,
/// Number of lead-in samples.
pub lead_in: u64,
/// Whether or not this `CueSheet` corresponds to a Compact Disc.
pub is_cd: bool,
/// One or more tracks.
pub tracks: Vec<CueSheetTrack>,
}
/// Track information inside a cue sheet.
#[derive(Debug, PartialEq, Eq)]
pub struct CueSheetTrack {
/// Track offset, in samples, relative to the beginning of the FLAC audio
/// stream.
pub offset: u64,
/// Track number.
pub number: u8,
/// Twelve digit alphanumeric code.
pub isrc: String,
/// Whether the cue sheet track is audio.
pub is_audio: bool,
/// Whether the cue sheet track is pre-emphasis.
pub is_pre_emphasis: bool,
/// For all tracks except the lead-out track.
pub indices: Vec<CueSheetTrackIndex>,
}
/// An index point within a track, inside of a cue sheet.
#[derive(Debug, PartialEq, Eq)]
pub struct CueSheetTrackIndex {
/// Offset, in samples, relative to the track offset of the index point.
pub offset: u64,
/// Index point number.
pub number: u8,
}
/// Stores pictures associated with the FLAC file.
///
/// More than likely these pictures will be cover art, but you can have more
/// than one within a file, which are distinguished by `PictureType`and it's
/// mime type string.
#[derive(Debug, PartialEq, Eq)]
pub struct Picture {
/// Picture type, based on the ID3v2 APIC frame.
pub picture_type: PictureType,
/// Multipurpose Internet Mail Extensions (MIME) type.
pub mime_type: String,
/// A string describing the picture.
pub description: String,
/// Width of the picture in pixels.
pub width: u32,
/// Height of the picture in pixels.
pub height: u32,
/// Color depth of the picture in bits-per-pixel.
pub depth: u32,
/// Number of colors used.
pub colors: u32,
/// Binary picture data.
pub data: Vec<u8>,
}
/// The picture type according to the ID3v2 attached picture frame.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum PictureType {
/// Other picture type not categorized in this enum.
Other,
/// 32x32 pixels 'file icon'.
FileIconStandard,
/// Other, or non-standard, file icon.
FileIcon,
/// Cover (front).
FrontCover,
/// Cover (back).
BackCover,
/// Leaflet page.
LeafletPage,
/// Media, like label side of a CD.
Media,
/// Lead artist, lead performer, or soloist.
LeadArtist,
/// Artist or performer.
Artist,
/// Conductor.
Conductor,
/// Band or orchestra.
Band,
/// Composer.
Composer,
/// Lyricist or text writer.
Lyricist,
/// Recording location.
RecordingLocation,
/// During recording.
DuringRecording,
/// During performance.
DuringPerformace,
/// Movie, or video, screen capture.
VideoScreenCapture,
/// A bright colored fish.
Fish,
/// Illustration.
Illustration,
/// Band, or artist, logotype.
BandLogo,
/// Publisher, or studio, logotype.
PublisherLogo,
}
impl fmt::Display for PictureType {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "{}", match *self {
PictureType::Other => "Other",
PictureType::FileIconStandard => "File Icon (standard)",
PictureType::FileIcon => "File Icon",
PictureType::FrontCover => "Cover (front)",
PictureType::BackCover => "Cover (back)",
PictureType::LeafletPage => "Leaflet Page",
PictureType::Media => "Media",
PictureType::LeadArtist => "Lead Artist",
PictureType::Artist => "Arist",
PictureType::Conductor => "Conductor",
PictureType::Band => "Band",
PictureType::Composer => "Composer",
PictureType::Lyricist => "Lyricist",
PictureType::RecordingLocation => "Recoding Location",
PictureType::DuringRecording => "During Recording",
PictureType::DuringPerformace => "During Performace",
PictureType::VideoScreenCapture => "Video Screen Capture",
PictureType::Fish => "Fish",
PictureType::Illustration => "Illustration",
PictureType::BandLogo => "Band Logo",
PictureType::PublisherLogo => "Publisher Logo",
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_varied_block_size() {
let mut info = StreamInfo::new();
info.min_block_size = 512;
info.max_block_size = 1024;
assert!(info.is_varied_block_size());
info.min_block_size = 2048;
info.max_block_size = 2048;
assert!(!info.is_varied_block_size());
}
#[test]
fn test_is_fixed_block_size() {
let mut info = StreamInfo::new();
info.min_block_size = 512;
info.max_block_size = 512;
assert!(info.is_fixed_block_size());
info.min_block_size = 1024;
info.max_block_size = 2048;
assert!(!info.is_fixed_block_size());
}
}
Add `Metadata::is_last` method
use std::collections::HashMap;
use std::fmt;
/// Data associated with a single metadata block.
#[derive(Debug)]
pub struct Metadata {
/// Marks whether the current metadata block is the last.
is_last: bool,
/// The length, in bytes, of the block being parsed. This does not include
/// the metadata block header.
length: u32,
/// Block data containing one of the eight different types of metadata.
pub data: Data,
}
impl Metadata {
pub fn new(is_last: bool, length: u32, data: Data) -> Self {
Metadata {
is_last: is_last,
length: length,
data: data,
}
}
/// Return whether the current metadata block is the last.
#[inline]
pub fn is_last(&self) -> bool {
self.is_last
}
}
/// General enum that hold all the different metadata block data.
#[derive(Debug, PartialEq, Eq)]
pub enum Data {
/// Information regarding the entire audio stream.
StreamInfo(StreamInfo),
/// Block that represents a number of padded bytes.
Padding(u32),
/// Data used by third-party applications.
Application(Application),
/// Table of multiple points to seek, or skip, to within the FLAC file.
SeekTable(Vec<SeekPoint>),
/// Stores human-readable name/value pairs.
VorbisComment(VorbisComment),
/// Stores cue information
CueSheet(CueSheet),
/// Stores pictures associated with the FLAC file.
Picture(Picture),
/// A type of block data that isn't know or doesn't match the type above.
Unknown(Vec<u8>),
}
/// Information regarding the entire audio stream.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StreamInfo {
/// Minimum block size, in samples, used in the stream.
pub min_block_size: u16,
/// Maximum block size, in samples, used in the stream.
pub max_block_size: u16,
/// Minimum frame size, in bytes, used in the stream. May be zero to imply
/// the value isn't know.
pub min_frame_size: u32,
/// Maximum frame size, in bytes, used in the stream. May be zero to imply
/// the value isn't know.
pub max_frame_size: u32,
/// Sample rate in hertz (Hz).
pub sample_rate: u32,
/// Number of channels. FLAC supports one to eight channel.
pub channels: u8,
/// Bits per sample. FLAC supports four to thirty-two bits per sample.
pub bits_per_sample: u8,
/// Total samples in the stream. A value of zero means the number is
/// unknown.
pub total_samples: u64,
/// MD5 signature of the unencoded audio data.
pub md5_sum: [u8; 16],
}
impl StreamInfo {
pub fn new() -> StreamInfo {
StreamInfo {
min_block_size: 0,
max_block_size: 0,
min_frame_size: 0,
max_frame_size: 0,
sample_rate: 0,
channels: 0,
bits_per_sample: 0,
total_samples: 0,
md5_sum: [0; 16],
}
}
#[inline]
pub fn is_varied_block_size(&self) -> bool {
self.min_block_size != self.max_block_size
}
#[inline]
pub fn is_fixed_block_size(&self) -> bool {
self.min_block_size == self.max_block_size
}
}
/// Data used by third-party applications.
#[derive(Debug, PartialEq, Eq)]
pub struct Application {
/// Registered application ID.
pub id: String,
/// Data used by the third-party application.
pub data: Vec<u8>,
}
/// Seek, or skip, to a point within the FLAC file.
#[derive(Debug, PartialEq, Eq)]
pub struct SeekPoint {
/// Sample number of the first sample in the target frame.
pub sample_number: u64,
/// Byte offset of the target frame's header.
pub stream_offset: u64,
/// Number of samples in the target frame.
pub frame_samples: u16,
}
/// Stores human-readable name/value pairs.
#[derive(Debug, PartialEq, Eq)]
pub struct VorbisComment {
/// Vendor name.
pub vendor_string: String,
/// Comments associated with a name, or category, followed by it's
/// contents.
pub comments: HashMap<String, String>,
}
/// Stores cue information.
///
/// Generally for storing information from Compact Disk Digital Audio, but
/// can be used as a cueing mechanism for playback.
#[derive(Debug, PartialEq, Eq)]
pub struct CueSheet {
/// Media catalog number.
pub media_catalog_number: String,
/// Number of lead-in samples.
pub lead_in: u64,
/// Whether or not this `CueSheet` corresponds to a Compact Disc.
pub is_cd: bool,
/// One or more tracks.
pub tracks: Vec<CueSheetTrack>,
}
/// Track information inside a cue sheet.
#[derive(Debug, PartialEq, Eq)]
pub struct CueSheetTrack {
/// Track offset, in samples, relative to the beginning of the FLAC audio
/// stream.
pub offset: u64,
/// Track number.
pub number: u8,
/// Twelve digit alphanumeric code.
pub isrc: String,
/// Whether the cue sheet track is audio.
pub is_audio: bool,
/// Whether the cue sheet track is pre-emphasis.
pub is_pre_emphasis: bool,
/// For all tracks except the lead-out track.
pub indices: Vec<CueSheetTrackIndex>,
}
/// An index point within a track, inside of a cue sheet.
#[derive(Debug, PartialEq, Eq)]
pub struct CueSheetTrackIndex {
/// Offset, in samples, relative to the track offset of the index point.
pub offset: u64,
/// Index point number.
pub number: u8,
}
/// Stores pictures associated with the FLAC file.
///
/// More than likely these pictures will be cover art, but you can have more
/// than one within a file, which are distinguished by `PictureType`and it's
/// mime type string.
#[derive(Debug, PartialEq, Eq)]
pub struct Picture {
/// Picture type, based on the ID3v2 APIC frame.
pub picture_type: PictureType,
/// Multipurpose Internet Mail Extensions (MIME) type.
pub mime_type: String,
/// A string describing the picture.
pub description: String,
/// Width of the picture in pixels.
pub width: u32,
/// Height of the picture in pixels.
pub height: u32,
/// Color depth of the picture in bits-per-pixel.
pub depth: u32,
/// Number of colors used.
pub colors: u32,
/// Binary picture data.
pub data: Vec<u8>,
}
/// The picture type according to the ID3v2 attached picture frame.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum PictureType {
/// Other picture type not categorized in this enum.
Other,
/// 32x32 pixels 'file icon'.
FileIconStandard,
/// Other, or non-standard, file icon.
FileIcon,
/// Cover (front).
FrontCover,
/// Cover (back).
BackCover,
/// Leaflet page.
LeafletPage,
/// Media, like label side of a CD.
Media,
/// Lead artist, lead performer, or soloist.
LeadArtist,
/// Artist or performer.
Artist,
/// Conductor.
Conductor,
/// Band or orchestra.
Band,
/// Composer.
Composer,
/// Lyricist or text writer.
Lyricist,
/// Recording location.
RecordingLocation,
/// During recording.
DuringRecording,
/// During performance.
DuringPerformace,
/// Movie, or video, screen capture.
VideoScreenCapture,
/// A bright colored fish.
Fish,
/// Illustration.
Illustration,
/// Band, or artist, logotype.
BandLogo,
/// Publisher, or studio, logotype.
PublisherLogo,
}
impl fmt::Display for PictureType {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "{}", match *self {
PictureType::Other => "Other",
PictureType::FileIconStandard => "File Icon (standard)",
PictureType::FileIcon => "File Icon",
PictureType::FrontCover => "Cover (front)",
PictureType::BackCover => "Cover (back)",
PictureType::LeafletPage => "Leaflet Page",
PictureType::Media => "Media",
PictureType::LeadArtist => "Lead Artist",
PictureType::Artist => "Arist",
PictureType::Conductor => "Conductor",
PictureType::Band => "Band",
PictureType::Composer => "Composer",
PictureType::Lyricist => "Lyricist",
PictureType::RecordingLocation => "Recoding Location",
PictureType::DuringRecording => "During Recording",
PictureType::DuringPerformace => "During Performace",
PictureType::VideoScreenCapture => "Video Screen Capture",
PictureType::Fish => "Fish",
PictureType::Illustration => "Illustration",
PictureType::BandLogo => "Band Logo",
PictureType::PublisherLogo => "Publisher Logo",
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_varied_block_size() {
let mut info = StreamInfo::new();
info.min_block_size = 512;
info.max_block_size = 1024;
assert!(info.is_varied_block_size());
info.min_block_size = 2048;
info.max_block_size = 2048;
assert!(!info.is_varied_block_size());
}
#[test]
fn test_is_fixed_block_size() {
let mut info = StreamInfo::new();
info.min_block_size = 512;
info.max_block_size = 512;
assert!(info.is_fixed_block_size());
info.min_block_size = 1024;
info.max_block_size = 2048;
assert!(!info.is_fixed_block_size());
}
}
|
extern crate libc;
use self::libc::{c_double, c_int, size_t};
use super::{Function, Parameterised, Linear, VFunction, QFunction};
use ndarray::{Axis, Array1};
use geometry::{Space, RegularSpace};
use geometry::dimensions::Continuous;
#[link(name="tiles", kind="static")]
extern {
fn tiles(tile_indices: *mut size_t, nt: c_int, memory: c_int,
floats: *const c_double, nf: c_int,
ints: *const c_int, ni: c_int);
}
pub struct SuttonTiles {
weights: Array1<c_double>,
n_outputs: usize,
n_tilings: i32,
memory_size: i32,
int_array: [c_int; 1]
}
impl SuttonTiles {
pub fn new(n_tilings: i32, memory_size: i32, n_outputs: usize) -> Self {
SuttonTiles {
weights: Array1::<c_double>::zeros(memory_size as usize),
n_outputs: n_outputs,
n_tilings: n_tilings,
memory_size: memory_size,
int_array: [0],
}
}
fn load_tiles(&self, floats: &[c_double], ints: &[c_int]) -> Vec<size_t> {
let mut ti = vec![0; self.n_tilings as usize];
unsafe {
tiles(ti.as_mut_ptr(), self.n_tilings, self.memory_size,
floats.as_ptr(), floats.len() as c_int,
ints.as_ptr(), ints.len() as c_int);
}
ti
}
fn evaluate_index(&self, input: &Vec<f64>, index: c_int) -> f64 {
let tiles = self.load_tiles(input, &[index]);
self.weights.select(Axis(0), tiles.as_slice()).scalar_sum() / self.n_tilings as f64
}
}
impl Function<Vec<f64>, f64> for SuttonTiles {
fn evaluate(&self, input: &Vec<f64>) -> f64 {
self.evaluate_index(input, 0)
}
}
impl Function<Vec<f64>, Vec<f64>> for SuttonTiles {
fn evaluate(&self, input: &Vec<f64>) -> Vec<f64> {
(0..self.n_outputs).map(|c| self.evaluate_index(input, c as c_int)).collect()
}
}
impl Parameterised<Vec<f64>, f64> for SuttonTiles {
fn update(&mut self, input: &Vec<f64>, error: f64) {
self.int_array[0] = 0;
for t in self.load_tiles(input, &self.int_array) {
self.weights[t] += error;
}
}
}
impl Parameterised<Vec<f64>, Vec<f64>> for SuttonTiles {
fn update(&mut self, input: &Vec<f64>, errors: Vec<f64>) {
for c in 0..self.n_outputs {
<Self as QFunction<RegularSpace<Continuous>>>::update_action(self, input, c, errors[c]);
}
}
}
// TODO: Implement Linear - problem is that phi will be a function of the state
// and action for this implementation of tile coding.
impl VFunction<RegularSpace<Continuous>> for SuttonTiles {}
impl QFunction<RegularSpace<Continuous>> for SuttonTiles
{
fn evaluate_action(&self, input: &Vec<f64>, action: usize) -> f64 {
self.evaluate_index(input, action as c_int)
}
fn update_action(&mut self, input: &Vec<f64>, action: usize, error: f64) {
self.int_array[0] = action as c_int;
for t in self.load_tiles(input, &self.int_array) {
self.weights[t] += error;
}
}
}
#[cfg(test)]
mod tests {
use super::SuttonTiles;
use fa::{Function, Parameterised};
use geometry::RegularSpace;
use geometry::dimensions::Partition;
#[test]
fn test_simple() {
let mut t = SuttonTiles::new(1, 1000, 1);
t.update(&vec![1.5], 25.5);
let out: f64 = t.evaluate(&vec![1.5]);
assert_eq!(out, 25.5);
t.update(&vec![1.5], -12.75);
let out: f64 = t.evaluate(&vec![1.5]);
assert_eq!(out, 12.75);
}
}
Added test for unit generalisation in SuttonTiles
extern crate libc;
use self::libc::{c_double, c_int, size_t};
use super::{Function, Parameterised, Linear, VFunction, QFunction};
use ndarray::{Axis, Array1};
use geometry::{Space, RegularSpace};
use geometry::dimensions::Continuous;
#[link(name="tiles", kind="static")]
extern {
fn tiles(tile_indices: *mut size_t, nt: c_int, memory: c_int,
floats: *const c_double, nf: c_int,
ints: *const c_int, ni: c_int);
}
pub struct SuttonTiles {
weights: Array1<c_double>,
n_outputs: usize,
n_tilings: i32,
memory_size: i32,
int_array: [c_int; 1]
}
impl SuttonTiles {
pub fn new(n_tilings: i32, memory_size: i32, n_outputs: usize) -> Self {
SuttonTiles {
weights: Array1::<c_double>::zeros(memory_size as usize),
n_outputs: n_outputs,
n_tilings: n_tilings,
memory_size: memory_size,
int_array: [0],
}
}
fn load_tiles(&self, floats: &[c_double], ints: &[c_int]) -> Vec<size_t> {
let mut ti = vec![0; self.n_tilings as usize];
unsafe {
tiles(ti.as_mut_ptr(), self.n_tilings, self.memory_size,
floats.as_ptr(), floats.len() as c_int,
ints.as_ptr(), ints.len() as c_int);
}
ti
}
fn evaluate_index(&self, input: &Vec<f64>, index: c_int) -> f64 {
let tiles = self.load_tiles(input, &[index]);
self.weights.select(Axis(0), tiles.as_slice()).scalar_sum() / self.n_tilings as f64
}
}
impl Function<Vec<f64>, f64> for SuttonTiles {
fn evaluate(&self, input: &Vec<f64>) -> f64 {
self.evaluate_index(input, 0)
}
}
impl Function<Vec<f64>, Vec<f64>> for SuttonTiles {
fn evaluate(&self, input: &Vec<f64>) -> Vec<f64> {
(0..self.n_outputs).map(|c| self.evaluate_index(input, c as c_int)).collect()
}
}
impl Parameterised<Vec<f64>, f64> for SuttonTiles {
fn update(&mut self, input: &Vec<f64>, error: f64) {
self.int_array[0] = 0;
for t in self.load_tiles(input, &self.int_array) {
self.weights[t] += error;
}
}
}
impl Parameterised<Vec<f64>, Vec<f64>> for SuttonTiles {
fn update(&mut self, input: &Vec<f64>, errors: Vec<f64>) {
for c in 0..self.n_outputs {
<Self as QFunction<RegularSpace<Continuous>>>::update_action(self, input, c, errors[c]);
}
}
}
// TODO: Implement Linear - problem is that phi will be a function of the state
// and action for this implementation of tile coding.
impl VFunction<RegularSpace<Continuous>> for SuttonTiles {}
impl QFunction<RegularSpace<Continuous>> for SuttonTiles
{
fn evaluate_action(&self, input: &Vec<f64>, action: usize) -> f64 {
self.evaluate_index(input, action as c_int)
}
fn update_action(&mut self, input: &Vec<f64>, action: usize, error: f64) {
self.int_array[0] = action as c_int;
for t in self.load_tiles(input, &self.int_array) {
self.weights[t] += error;
}
}
}
#[cfg(test)]
mod tests {
use super::SuttonTiles;
use fa::{Function, Parameterised};
use geometry::RegularSpace;
use geometry::dimensions::Partition;
#[test]
fn test_simple() {
let mut t = SuttonTiles::new(1, 1000, 1);
t.update(&vec![1.5], 25.5);
let out: f64 = t.evaluate(&vec![1.5]);
assert_eq!(out, 25.5);
t.update(&vec![1.5], -12.75);
let out: f64 = t.evaluate(&vec![1.5]);
assert_eq!(out, 12.75);
}
#[test]
fn test_generalisation() {
let mut t = SuttonTiles::new(1, 1000, 1);
t.update(&vec![0.5], 1.0);
for i in 1..10 {
let out: f64 = t.evaluate(&vec![i as f64 / 10.0]);
assert_eq!(out, 1.0);
}
let out: f64 = t.evaluate(&vec![-0.000001]);
assert_eq!(out, 0.0);
let out: f64 = t.evaluate(&vec![1.000001]);
assert_eq!(out, 0.0);
}
}
|
//! Match pairs of records based on a key.
//!
//! The various `join` implementations require that the units of each collection can be multiplied, and that
//! the multiplication distributes over addition. That is, we will repeatedly evaluate (a + b) * c as (a * c)
//! + (b * c), and if this is not equal to the former term, little is known about the actual output.
use std::fmt::Debug;
use std::ops::Mul;
use timely::progress::Timestamp;
use timely::dataflow::Scope;
use timely::dataflow::operators::Binary;
use timely::dataflow::channels::pact::Pipeline;
use timely::dataflow::operators::Capability;
use timely_sort::Unsigned;
use timely::dataflow::operators::OutputHandle;
use timely::dataflow::channels::pushers::tee::Tee;
use hashable::{Hashable, UnsignedWrapper};
use ::{Data, Diff, Collection, AsCollection};
use lattice::Lattice;
use operators::arrange::{Arrange, Arranged, ArrangeByKey, ArrangeBySelf};
use trace::{BatchReader, Cursor, Trace, consolidate};
use operators::ValueHistory2;
// use trace::implementations::hash::HashValSpine as DefaultValTrace;
// use trace::implementations::hash::HashKeySpine as DefaultKeyTrace;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
use trace::TraceReader;
/// Join implementations for `(key,val)` data.
pub trait Join<G: Scope, K: Data, V: Data, R: Diff> {
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![((0,'a'),1),((1,'B'),1)].into_iter().to_stream(scope);
///
/// // should produce records `(0 + 0,'a')` and `(1 + 2,'B')`.
/// col1.join_map(&col2, |k,v1,v2| (*k + *v1, *v2)).capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((0,'a'),1), ((3,'B'),1)]);
/// ```
fn join<V2: Data, R2: Diff>(&self, other: &Collection<G, (K,V2), R2>) -> Collection<G, (K,V,V2), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff
{
self.join_map(other, |k,v,v2| (k.clone(),v.clone(),v2.clone()))
}
/// Like `join`, but with an randomly distributed unsigned key.
fn join_u<V2: Data, R2: Diff>(&self, other: &Collection<G, (K,V2), R2>) -> Collection<G, (K,V,V2), <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
self.join_map_u(other, |k,v,v2| (k.clone(),v.clone(),v2.clone()))
}
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![((0,'a'),1),((1,'B'),1)].into_iter().to_stream(scope);
///
/// // should produce records `(0 + 0,'a')` and `(1 + 2,'B')`.
/// col1.join_map(&col2, |k,v1,v2| (*k + *v1, *v2)).capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((0,'a'),1), ((3,'B'),1)]);
/// ```
fn join_map<V2, R2: Diff, D, L>(&self, other: &Collection<G, (K,V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where V2: Data, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, D: Data, L: Fn(&K, &V, &V2)->D+'static;
/// Like `join_map`, but with a randomly distributed unsigned key.
fn join_map_u<V2, R2: Diff, D, L>(&self, other: &Collection<G, (K,V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, V2: Data, D: Data, L: Fn(&K, &V, &V2)->D+'static;
/// Matches pairs `(key,val1)` and `key` based on `key`, filtering the first collection by values present in the second.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![(0,1)].into_iter().to_stream(scope);
///
/// // should retain record `(0,0)` and discard `(1,2)`.
/// col1.semijoin(&col2).capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((0,0),1)]);
/// ```
fn semijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff;
/// Like `semijoin`, but with a randomly distributed unsigned key.
fn semijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff;
/// Matches pairs `(key,val1)` and `key` based on `key`, discarding values
/// in the first collection if their key is present in the second.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![(0,1)].into_iter().to_stream(scope);
///
/// // should retain record `(1,2)` and discard `(0,0)`.
/// col1.antijoin(&col2).consolidate().capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((1,2),1)]);
/// ```
fn antijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R2: Diff, R: Mul<R2, Output = R>;
/// Like `antijoin`, but with a randomly distributed unsigned key.
fn antijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2, Output=R>;
}
impl<G, K, V, R> Join<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Default+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_map<V2: Data, R2: Diff, D: Data, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff, L: Fn(&K, &V, &V2)->D+'static {
let arranged1 = self.arrange_by_key_hashed();
let arranged2 = other.arrange_by_key_hashed();
arranged1.join_arranged(&arranged2, move |k,v1,v2| logic(&k.item,v1,v2))
}
fn semijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged1 = self.arrange_by_key_hashed();
let arranged2 = other.arrange_by_self();
arranged1.join_arranged(&arranged2, |k,v,_| (k.item.clone(), v.clone()))
}
fn antijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R: Mul<R2, Output=R> {
self.concat(&self.semijoin(other).negate())
}
fn join_map_u<V2, R2, D, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where K: Unsigned+Copy, V2: Data, R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, D: Data, L: Fn(&K, &V, &V2)->D+'static {
let arranged1 = self.map(|(k,v)| (UnsignedWrapper::from(k), v))
.arrange(DefaultValTrace::new());
let arranged2 = other.map(|(k,v)| (UnsignedWrapper::from(k), v))
.arrange(DefaultValTrace::new());
arranged1.join_arranged(&arranged2, move |k,v1,v2| logic(&k.item,v1,v2))
}
fn semijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged1 = self.map(|(k,v)| (UnsignedWrapper::from(k), v))
.arrange(DefaultValTrace::new());
let arranged2 = other.map(|k| (UnsignedWrapper::from(k), ()))
.arrange(DefaultKeyTrace::new());
arranged1.join_arranged(&arranged2, |k,v,_| (k.item.clone(), v.clone()))
}
fn antijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2, Output=R> {
self.concat(&self.semijoin(other).negate())
}
}
/// Matches the elements of two arranged traces.
///
/// This method is used by the various `join` implementations, but it can also be used
/// directly in the event that one has a handle to an `Arranged<G,T>`, perhaps because
/// the arrangement is available for re-use, or from the output of a `group` operator.
pub trait JoinArranged<G: Scope, K: 'static, V: 'static, R: Diff> where G::Timestamp: Lattice+Ord {
/// Joins two arranged collections with the same key type.
///
/// Each matching pair of records `(key, val1)` and `(key, val2)` are subjected to the `result` function,
/// producing a corresponding output record.
///
/// This trait is implemented for arrangements (`Arranged<G, T>`) rather than collections. The `Join` trait
/// contains the implementations for collections.
fn join_arranged<V2,T2,R2,D,L> (&self, stream2: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,D,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K, V2, G::Timestamp, R2>+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
D: Data,
L: Fn(&K,&V,&V2)->D+'static;
}
impl<G, K, V, R1, T1> JoinArranged<G, K, V, R1> for Arranged<G,K,V,R1,T1>
where
G: Scope,
G::Timestamp: Lattice+Ord+Debug,
K: Debug+Eq+'static,
V: Ord+Clone+Debug+'static,
R1: Diff,
T1: TraceReader<K,V,G::Timestamp, R1>+'static,
T1::Batch: BatchReader<K,V,G::Timestamp,R1>+'static+Debug {
fn join_arranged<V2,T2,R2,D,L>(&self, other: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,D,<R1 as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K,V2,G::Timestamp,R2>+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R1: Mul<R2>,
<R1 as Mul<R2>>::Output: Diff,
D: Data,
L: Fn(&K,&V,&V2)->D+'static {
// handles to shared trace data structures.
let mut trace1 = Some(self.trace.clone());
let mut trace2 = Some(other.trace.clone());
// acknowledged frontier for each input.
let mut acknowledged1 = vec![G::Timestamp::min()];
let mut acknowledged2 = vec![G::Timestamp::min()];
// deferred work of batches from each input.
let mut todo1 = Vec::new();
let mut todo2 = Vec::new();
self.stream.binary_notify(&other.stream, Pipeline, Pipeline, "Join", vec![], move |input1, input2, output, notificator| {
// The join computation repeatedly accepts batches of updates from each of its inputs.
//
// For each accepted batch, it prepares a work-item to join the batch against previously "accepted"
// updates from its other input. It is important to track which updates have been accepted, through
// a combination of the input's frontier and the most recently received batch's upper bound, because
// we use a shared trace and there may be updates present that are in advance of this accepted bound.
// drain input 1, prepare work.
input1.for_each(|capability, data| {
if let Some(ref mut trace2) = trace2 {
for batch1 in data.drain(..) {
let trace2_cursor = trace2.cursor_through(&acknowledged2[..]).unwrap();
let batch1_cursor = batch1.item.cursor();
todo1.push(Deferred::new(trace2_cursor, batch1_cursor, capability.clone(), |r2,r1| *r1 * *r2));
debug_assert!(batch1.item.description().lower() == &acknowledged1[..]);
acknowledged1 = batch1.item.description().upper().to_vec();
}
}
});
// drain input 2, prepare work.
input2.for_each(|capability, data| {
if let Some(ref mut trace1) = trace1 {
for batch2 in data.drain(..) {
let trace1_cursor = trace1.cursor_through(&acknowledged1[..]).unwrap();
let batch2_cursor = batch2.item.cursor();
todo2.push(Deferred::new(trace1_cursor, batch2_cursor, capability.clone(), |r1,r2| *r1 * *r2));
debug_assert!(batch2.item.description().lower() == &acknowledged2[..]);
acknowledged2 = batch2.item.description().upper().to_vec();
}
}
});
// shut down or advance trace2. if the frontier is empty we can shut it down,
// and otherwise we can advance the trace by the acknowledged elements of the other input,
// as we may still use them as thresholds (ie we must preserve `le` wrt `acknowledged`).
if trace2.is_some() && notificator.frontier(0).len() == 0 { trace2 = None; }
if let Some(ref mut trace2) = trace2 {
trace2.advance_by(notificator.frontier(0));
trace2.distinguish_since(&acknowledged2[..]);
}
// shut down or advance trace1.
if trace1.is_some() && notificator.frontier(1).len() == 0 { trace1 = None; }
if let Some(ref mut trace1) = trace1 {
trace1.advance_by(notificator.frontier(1));
trace1.distinguish_since(&acknowledged1[..]);
}
let mut fuel = 1_000_000;
// perform some amount of outstanding work.
while todo1.len() > 0 && fuel > 0 {
todo1[0].work(output, &|k,v2,v1| result(k,v1,v2), &mut fuel);
if !todo1[0].work_remains() { todo1.remove(0); }
}
// perform some amount of outstanding work.
while todo2.len() > 0 && fuel > 0 {
todo2[0].work(output, &|k,v1,v2| result(k,v1,v2), &mut fuel);
if !todo2[0].work_remains() { todo2.remove(0); }
}
})
.as_collection()
}
}
/// Deferred join computation.
///
/// The structure wraps cursors which allow us to play out join computation at whatever rate we like.
/// This allows us to avoid producing and buffering massive amounts of data, without giving the timely
/// dataflow system a chance to run operators that can consume and aggregate the data.
struct Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M>
where
V1: Ord+Clone,
V2: Ord+Clone,
T: Timestamp+Lattice+Ord+Debug,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
{
phant: ::std::marker::PhantomData<(K, V1, V2, R1, R2)>,
trace: C1,
batch: C2,
capability: Capability<T>,
mult: M,
}
impl<K, V1, V2, T, R1, R2, R3, C1, C2, M> Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M>
where
K: Debug+Eq,
V1: Ord+Clone+Debug,
V2: Ord+Clone+Debug,
T: Timestamp+Lattice+Ord+Debug,
R1: Diff,
R2: Diff,
R3: Diff,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
{
fn new(trace: C1, batch: C2, capability: Capability<T>, mult: M) -> Self {
Deferred {
phant: ::std::marker::PhantomData,
trace: trace,
batch: batch,
capability: capability,
mult: mult,
}
}
fn work_remains(&self) -> bool {
self.batch.key_valid()
}
/// Process keys until at least `limit` output tuples produced, or the work is exhausted.
#[inline(never)]
fn work<D, L>(&mut self, output: &mut OutputHandle<T, (D, T, R3), Tee<T, (D, T, R3)>>, logic: &L, fuel: &mut usize)
where D: Ord+Clone+Data, L: Fn(&K, &V1, &V2)->D {
let meet = self.capability.time();
let mut effort = 0;
let mut session = output.session(&self.capability);
let trace = &mut self.trace;
let batch = &mut self.batch;
let mult = &self.mult;
let mut temp = Vec::new();
let mut thinker = JoinThinker::<V1, V2, T, R1, R2>::new();
while batch.key_valid() && effort < *fuel {
trace.seek_key(batch.key());
if trace.key_valid() && trace.key() == batch.key() {
thinker.history1.edits.load(trace, |time| time.join(&meet));
thinker.history2.edits.load(batch, |time| time.clone());
// populate `temp` with the results in the best way we know how.
thinker.think(|v1,v2,t,r1,r2| temp.push(((logic(batch.key(), v1, v2), t), mult(r1,r2))));
consolidate(&mut temp, 0);
effort += temp.len();
for ((d, t), r) in temp.drain(..) {
session.give((d, t, r));
}
}
batch.step_key();
}
if effort > *fuel { *fuel = 0; }
else { *fuel -= effort; }
}
}
struct JoinThinker<V1: Ord+Clone, V2: Ord+Clone, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> {
pub history1: ValueHistory2<V1, T, R1>,
pub history2: ValueHistory2<V2, T, R2>,
}
impl<V1: Ord+Clone, V2: Ord+Clone, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> JoinThinker<V1, V2, T, R1, R2>
where V1: Debug, V2: Debug, T: Debug
{
fn new() -> Self {
JoinThinker {
history1: ValueHistory2::new(),
history2: ValueHistory2::new(),
}
}
fn think<F: FnMut(&V1,&V2,T,&R1,&R2)>(&mut self, mut results: F) {
// for reasonably sized edits, do the dead-simple thing.
if self.history1.edits.len() < 10 || self.history2.edits.len() < 10 {
self.history1.edits.map(|v1, t1, d1| {
self.history2.edits.map(|v2, t2, d2| {
results(v1, v2, t1.join(t2), &d1, &d2);
})
})
}
else {
self.history1.order();
self.history2.order();
while !self.history1.is_done() && !self.history2.is_done() {
if self.history1.time().unwrap().cmp(&self.history2.time().unwrap()) == ::std::cmp::Ordering::Less {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
else {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
while !self.history1.is_done() {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
while !self.history2.is_done() {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
}
}
batch and trace seek'd symmetrically
//! Match pairs of records based on a key.
//!
//! The various `join` implementations require that the units of each collection can be multiplied, and that
//! the multiplication distributes over addition. That is, we will repeatedly evaluate (a + b) * c as (a * c)
//! + (b * c), and if this is not equal to the former term, little is known about the actual output.
use std::fmt::Debug;
use std::ops::Mul;
use std::cmp::Ordering;
use timely::progress::Timestamp;
use timely::dataflow::Scope;
use timely::dataflow::operators::Binary;
use timely::dataflow::channels::pact::Pipeline;
use timely::dataflow::operators::Capability;
use timely_sort::Unsigned;
use timely::dataflow::operators::OutputHandle;
use timely::dataflow::channels::pushers::tee::Tee;
use hashable::{Hashable, UnsignedWrapper, OrdWrapper};
use ::{Data, Diff, Collection, AsCollection};
use lattice::Lattice;
use operators::arrange::{Arrange, Arranged, ArrangeByKey, ArrangeBySelf};
use trace::{BatchReader, Cursor, Trace, consolidate};
use operators::ValueHistory2;
// use trace::implementations::hash::HashValSpine as DefaultValTrace;
// use trace::implementations::hash::HashKeySpine as DefaultKeyTrace;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
use trace::TraceReader;
/// Join implementations for `(key,val)` data.
pub trait Join<G: Scope, K: Data, V: Data, R: Diff> {
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![((0,'a'),1),((1,'B'),1)].into_iter().to_stream(scope);
///
/// // should produce records `(0 + 0,'a')` and `(1 + 2,'B')`.
/// col1.join_map(&col2, |k,v1,v2| (*k + *v1, *v2)).capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((0,'a'),1), ((3,'B'),1)]);
/// ```
fn join<V2: Data, R2: Diff>(&self, other: &Collection<G, (K,V2), R2>) -> Collection<G, (K,V,V2), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff
{
self.join_map(other, |k,v,v2| (k.clone(),v.clone(),v2.clone()))
}
/// Like `join`, but with an randomly distributed unsigned key.
fn join_u<V2: Data, R2: Diff>(&self, other: &Collection<G, (K,V2), R2>) -> Collection<G, (K,V,V2), <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
self.join_map_u(other, |k,v,v2| (k.clone(),v.clone(),v2.clone()))
}
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![((0,'a'),1),((1,'B'),1)].into_iter().to_stream(scope);
///
/// // should produce records `(0 + 0,'a')` and `(1 + 2,'B')`.
/// col1.join_map(&col2, |k,v1,v2| (*k + *v1, *v2)).capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((0,'a'),1), ((3,'B'),1)]);
/// ```
fn join_map<V2, R2: Diff, D, L>(&self, other: &Collection<G, (K,V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where V2: Data, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, D: Data, L: Fn(&K, &V, &V2)->D+'static;
/// Like `join_map`, but with a randomly distributed unsigned key.
fn join_map_u<V2, R2: Diff, D, L>(&self, other: &Collection<G, (K,V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, V2: Data, D: Data, L: Fn(&K, &V, &V2)->D+'static;
/// Matches pairs `(key,val1)` and `key` based on `key`, filtering the first collection by values present in the second.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![(0,1)].into_iter().to_stream(scope);
///
/// // should retain record `(0,0)` and discard `(1,2)`.
/// col1.semijoin(&col2).capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((0,0),1)]);
/// ```
fn semijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff;
/// Like `semijoin`, but with a randomly distributed unsigned key.
fn semijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff;
/// Matches pairs `(key,val1)` and `key` based on `key`, discarding values
/// in the first collection if their key is present in the second.
///
/// #Examples
/// ```ignore
/// extern crate timely;
/// use timely::dataflow::operators::{ToStream, Capture};
/// use timely::dataflow::operators::capture::Extract;
/// use differential_dataflow::operators::Join;
///
/// let data = timely::example(|scope| {
/// let col1 = vec![((0,0),1),((1,2),1)].into_iter().to_stream(scope);
/// let col2 = vec![(0,1)].into_iter().to_stream(scope);
///
/// // should retain record `(1,2)` and discard `(0,0)`.
/// col1.antijoin(&col2).consolidate().capture();
/// });
///
/// let extracted = data.extract();
/// assert_eq!(extracted.len(), 1);
/// assert_eq!(extracted[0].1, vec![((1,2),1)]);
/// ```
fn antijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R2: Diff, R: Mul<R2, Output = R>;
/// Like `antijoin`, but with a randomly distributed unsigned key.
fn antijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2, Output=R>;
}
impl<G, K, V, R> Join<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Default+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_map<V2: Data, R2: Diff, D: Data, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff, L: Fn(&K, &V, &V2)->D+'static {
let arranged1 = self.arrange_by_key_hashed();
let arranged2 = other.arrange_by_key_hashed();
arranged1.join_arranged(&arranged2, move |k,v1,v2| logic(&k.item,v1,v2))
}
fn semijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged1 = self.arrange_by_key_hashed();
let arranged2 = other.arrange_by_self();
arranged1.join_arranged(&arranged2, |k,v,_| (k.item.clone(), v.clone()))
}
fn antijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R: Mul<R2, Output=R> {
self.concat(&self.semijoin(other).negate())
}
fn join_map_u<V2, R2, D, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where K: Unsigned+Copy, V2: Data, R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, D: Data, L: Fn(&K, &V, &V2)->D+'static {
let arranged1 = self.map(|(k,v)| (UnsignedWrapper::from(k), v))
.arrange(DefaultValTrace::new());
let arranged2 = other.map(|(k,v)| (UnsignedWrapper::from(k), v))
.arrange(DefaultValTrace::new());
arranged1.join_arranged(&arranged2, move |k,v1,v2| logic(&k.item,v1,v2))
}
fn semijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged1 = self.map(|(k,v)| (UnsignedWrapper::from(k), v))
.arrange(DefaultValTrace::new());
let arranged2 = other.map(|k| (UnsignedWrapper::from(k), ()))
.arrange(DefaultKeyTrace::new());
arranged1.join_arranged(&arranged2, |k,v,_| (k.item.clone(), v.clone()))
}
fn antijoin_u<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where K: Unsigned+Copy, R2: Diff, R: Mul<R2, Output=R> {
self.concat(&self.semijoin(other).negate())
}
}
/// Matches the elements of two arranged traces.
///
/// This method is used by the various `join` implementations, but it can also be used
/// directly in the event that one has a handle to an `Arranged<G,T>`, perhaps because
/// the arrangement is available for re-use, or from the output of a `group` operator.
pub trait JoinArranged<G: Scope, K: 'static, V: 'static, R: Diff> where G::Timestamp: Lattice+Ord {
/// Joins two arranged collections with the same key type.
///
/// Each matching pair of records `(key, val1)` and `(key, val2)` are subjected to the `result` function,
/// producing a corresponding output record.
///
/// This trait is implemented for arrangements (`Arranged<G, T>`) rather than collections. The `Join` trait
/// contains the implementations for collections.
fn join_arranged<V2,T2,R2,D,L> (&self, stream2: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,D,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K, V2, G::Timestamp, R2>+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
D: Data,
L: Fn(&K,&V,&V2)->D+'static;
}
impl<G, K, V, R> JoinArranged<G, OrdWrapper<K>, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Default+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_arranged<V2,T2,R2,D,L> (&self, stream2: &Arranged<G,OrdWrapper<K>,V2,R2,T2>, result: L) -> Collection<G,D,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<OrdWrapper<K>, V2, G::Timestamp, R2>+'static,
T2::Batch: BatchReader<OrdWrapper<K>, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
D: Data,
L: Fn(&OrdWrapper<K>,&V,&V2)->D+'static {
self.arrange_by_key_hashed()
.join_arranged(stream2, result)
}
}
impl<G, K, V, R1, T1> JoinArranged<G, K, V, R1> for Arranged<G,K,V,R1,T1>
where
K: Ord,
G: Scope,
G::Timestamp: Lattice+Ord+Debug,
K: Debug+Eq+'static,
V: Ord+Clone+Debug+'static,
R1: Diff,
T1: TraceReader<K,V,G::Timestamp, R1>+'static,
T1::Batch: BatchReader<K,V,G::Timestamp,R1>+'static+Debug {
fn join_arranged<V2,T2,R2,D,L>(&self, other: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,D,<R1 as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K,V2,G::Timestamp,R2>+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R1: Mul<R2>,
<R1 as Mul<R2>>::Output: Diff,
D: Data,
L: Fn(&K,&V,&V2)->D+'static {
// handles to shared trace data structures.
let mut trace1 = Some(self.trace.clone());
let mut trace2 = Some(other.trace.clone());
// acknowledged frontier for each input.
let mut acknowledged1 = vec![G::Timestamp::min()];
let mut acknowledged2 = vec![G::Timestamp::min()];
// deferred work of batches from each input.
let mut todo1 = Vec::new();
let mut todo2 = Vec::new();
self.stream.binary_notify(&other.stream, Pipeline, Pipeline, "Join", vec![], move |input1, input2, output, notificator| {
// The join computation repeatedly accepts batches of updates from each of its inputs.
//
// For each accepted batch, it prepares a work-item to join the batch against previously "accepted"
// updates from its other input. It is important to track which updates have been accepted, through
// a combination of the input's frontier and the most recently received batch's upper bound, because
// we use a shared trace and there may be updates present that are in advance of this accepted bound.
// drain input 1, prepare work.
input1.for_each(|capability, data| {
if let Some(ref mut trace2) = trace2 {
for batch1 in data.drain(..) {
let trace2_cursor = trace2.cursor_through(&acknowledged2[..]).unwrap();
let batch1_cursor = batch1.item.cursor();
todo1.push(Deferred::new(trace2_cursor, batch1_cursor, capability.clone(), |r2,r1| *r1 * *r2));
debug_assert!(batch1.item.description().lower() == &acknowledged1[..]);
acknowledged1 = batch1.item.description().upper().to_vec();
}
}
});
// drain input 2, prepare work.
input2.for_each(|capability, data| {
if let Some(ref mut trace1) = trace1 {
for batch2 in data.drain(..) {
let trace1_cursor = trace1.cursor_through(&acknowledged1[..]).unwrap();
let batch2_cursor = batch2.item.cursor();
todo2.push(Deferred::new(trace1_cursor, batch2_cursor, capability.clone(), |r1,r2| *r1 * *r2));
debug_assert!(batch2.item.description().lower() == &acknowledged2[..]);
acknowledged2 = batch2.item.description().upper().to_vec();
}
}
});
// shut down or advance trace2. if the frontier is empty we can shut it down,
// and otherwise we can advance the trace by the acknowledged elements of the other input,
// as we may still use them as thresholds (ie we must preserve `le` wrt `acknowledged`).
if trace2.is_some() && notificator.frontier(0).len() == 0 { trace2 = None; }
if let Some(ref mut trace2) = trace2 {
trace2.advance_by(notificator.frontier(0));
trace2.distinguish_since(&acknowledged2[..]);
}
// shut down or advance trace1.
if trace1.is_some() && notificator.frontier(1).len() == 0 { trace1 = None; }
if let Some(ref mut trace1) = trace1 {
trace1.advance_by(notificator.frontier(1));
trace1.distinguish_since(&acknowledged1[..]);
}
let mut fuel = 1_000_000;
// perform some amount of outstanding work.
while todo1.len() > 0 && fuel > 0 {
todo1[0].work(output, &|k,v2,v1| result(k,v1,v2), &mut fuel);
if !todo1[0].work_remains() { todo1.remove(0); }
}
// perform some amount of outstanding work.
while todo2.len() > 0 && fuel > 0 {
todo2[0].work(output, &|k,v1,v2| result(k,v1,v2), &mut fuel);
if !todo2[0].work_remains() { todo2.remove(0); }
}
})
.as_collection()
}
}
/// Deferred join computation.
///
/// The structure wraps cursors which allow us to play out join computation at whatever rate we like.
/// This allows us to avoid producing and buffering massive amounts of data, without giving the timely
/// dataflow system a chance to run operators that can consume and aggregate the data.
struct Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M>
where
V1: Ord+Clone,
V2: Ord+Clone,
T: Timestamp+Lattice+Ord+Debug,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
{
phant: ::std::marker::PhantomData<(K, V1, V2, R1, R2)>,
trace: C1,
batch: C2,
capability: Capability<T>,
mult: M,
done: bool,
}
impl<K, V1, V2, T, R1, R2, R3, C1, C2, M> Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M>
where
K: Ord+Debug+Eq,
V1: Ord+Clone+Debug,
V2: Ord+Clone+Debug,
T: Timestamp+Lattice+Ord+Debug,
R1: Diff,
R2: Diff,
R3: Diff,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
{
fn new(trace: C1, batch: C2, capability: Capability<T>, mult: M) -> Self {
Deferred {
phant: ::std::marker::PhantomData,
trace: trace,
batch: batch,
capability: capability,
mult: mult,
done: false,
}
}
fn work_remains(&self) -> bool {
!self.done
}
/// Process keys until at least `limit` output tuples produced, or the work is exhausted.
#[inline(never)]
fn work<D, L>(&mut self, output: &mut OutputHandle<T, (D, T, R3), Tee<T, (D, T, R3)>>, logic: &L, fuel: &mut usize)
where D: Ord+Clone+Data, L: Fn(&K, &V1, &V2)->D {
let meet = self.capability.time();
let mut effort = 0;
let mut session = output.session(&self.capability);
let trace = &mut self.trace;
let batch = &mut self.batch;
let mult = &self.mult;
let mut temp = Vec::new();
let mut thinker = JoinThinker::<V1, V2, T, R1, R2>::new();
while batch.key_valid() && trace.key_valid() && effort < *fuel {
// println!("{:?} v {:?}", batch.key(), trace.key());
match trace.key().cmp(batch.key()) {
Ordering::Less => trace.seek_key(batch.key()),
Ordering::Greater => batch.seek_key(trace.key()),
Ordering::Equal => {
thinker.history1.edits.load(trace, |time| time.join(&meet));
thinker.history2.edits.load(batch, |time| time.clone());
// populate `temp` with the results in the best way we know how.
thinker.think(|v1,v2,t,r1,r2| temp.push(((logic(batch.key(), v1, v2), t), mult(r1,r2))));
consolidate(&mut temp, 0);
effort += temp.len();
for ((d, t), r) in temp.drain(..) {
session.give((d, t, r));
}
batch.step_key();
trace.step_key();
}
}
}
self.done = !batch.key_valid() || !trace.key_valid();
// println!("done!");
// while batch.key_valid() && effort < *fuel {
// trace.seek_key(batch.key());
// if trace.key_valid() && trace.key() == batch.key() {
// thinker.history1.edits.load(trace, |time| time.join(&meet));
// thinker.history2.edits.load(batch, |time| time.clone());
// // populate `temp` with the results in the best way we know how.
// thinker.think(|v1,v2,t,r1,r2| temp.push(((logic(batch.key(), v1, v2), t), mult(r1,r2))));
// consolidate(&mut temp, 0);
// effort += temp.len();
// for ((d, t), r) in temp.drain(..) {
// session.give((d, t, r));
// }
// }
// batch.step_key();
// }
if effort > *fuel { *fuel = 0; }
else { *fuel -= effort; }
}
}
struct JoinThinker<V1: Ord+Clone, V2: Ord+Clone, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> {
pub history1: ValueHistory2<V1, T, R1>,
pub history2: ValueHistory2<V2, T, R2>,
}
impl<V1: Ord+Clone, V2: Ord+Clone, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> JoinThinker<V1, V2, T, R1, R2>
where V1: Debug, V2: Debug, T: Debug
{
fn new() -> Self {
JoinThinker {
history1: ValueHistory2::new(),
history2: ValueHistory2::new(),
}
}
fn think<F: FnMut(&V1,&V2,T,&R1,&R2)>(&mut self, mut results: F) {
// for reasonably sized edits, do the dead-simple thing.
if self.history1.edits.len() < 10 || self.history2.edits.len() < 10 {
self.history1.edits.map(|v1, t1, d1| {
self.history2.edits.map(|v2, t2, d2| {
results(v1, v2, t1.join(t2), &d1, &d2);
})
})
}
else {
self.history1.order();
self.history2.order();
while !self.history1.is_done() && !self.history2.is_done() {
if self.history1.time().unwrap().cmp(&self.history2.time().unwrap()) == ::std::cmp::Ordering::Less {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
else {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
while !self.history1.is_done() {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
while !self.history2.is_done() {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
}
} |
//! Match pairs of records based on a key.
//!
//! The various `join` implementations require that the units of each collection can be multiplied, and that
//! the multiplication distributes over addition. That is, we will repeatedly evaluate (a + b) * c as (a * c)
//! + (b * c), and if this is not equal to the former term, little is known about the actual output.
use std::fmt::Debug;
use std::ops::Mul;
use std::cmp::Ordering;
use timely::progress::Timestamp;
use timely::dataflow::Scope;
use timely::dataflow::operators::generic::{Binary, OutputHandle};
use timely::dataflow::channels::pact::Pipeline;
use timely::dataflow::operators::Capability;
use timely::dataflow::channels::pushers::tee::Tee;
use hashable::Hashable;
use ::{Data, Diff, Collection, AsCollection};
use lattice::Lattice;
use operators::arrange::{Arranged, ArrangeByKey, ArrangeBySelf};
use trace::{BatchReader, Cursor, consolidate};
use operators::ValueHistory2;
use trace::TraceReader;
/// Join implementations for `(key,val)` data.
pub trait Join<G: Scope, K: Data, V: Data, R: Diff> {
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![(0, 'a'), (1, 'b')]).1;
/// let z = scope.new_collection_from(vec![(0, 1, 'a'), (1, 3, 'b')]).1;
///
/// x.join(&y)
/// .assert_eq(&z);
/// });
/// }
/// ```
fn join<V2: Data, R2: Diff>(&self, other: &Collection<G, (K,V2), R2>) -> Collection<G, (K,V,V2), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff
{
self.join_map(other, |k,v,v2| (k.clone(),v.clone(),v2.clone()))
}
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![(0, 'a'), (1, 'b')]).1;
/// let z = scope.new_collection_from(vec![(1, 'a'), (3, 'b')]).1;
///
/// x.join_map(&y, |_key, &a, &b| (a,b))
/// .assert_eq(&z);
/// });
/// }
/// ```
fn join_map<V2, R2: Diff, D, L>(&self, other: &Collection<G, (K,V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where V2: Data, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, D: Data, L: Fn(&K, &V, &V2)->D+'static;
/// Matches pairs `(key, val)` and `key` based on `key`, producing the former with frequencies multiplied.
///
/// When the second collection contains frequencies that are either zero or one this is the more traditional
/// relational semijoin. When the second collection may contain multiplicities, this operation may scale up
/// the counts of the records in the first input.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![0, 2]).1;
/// let z = scope.new_collection_from(vec![(0, 1)]).1;
///
/// x.semijoin(&y)
/// .assert_eq(&z);
/// });
/// }
/// ```
fn semijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff;
/// Matches pairs `(key, val)` and `key` based on `key`, discarding values
/// in the first collection if their key is present in the second.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![0, 2]).1;
/// let z = scope.new_collection_from(vec![(1, 3)]).1;
///
/// x.antijoin(&y)
/// .assert_eq(&z);
/// });
/// }
/// ```
fn antijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R2: Diff, R: Mul<R2, Output = R>;
}
impl<G, K, V, R> Join<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_map<V2: Data, R2: Diff, D: Data, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff, L: Fn(&K, &V, &V2)->D+'static {
let arranged1 = self.arrange_by_key();
let arranged2 = other.arrange_by_key();
arranged1.join_core(&arranged2, move |k,v1,v2| Some(logic(k,v1,v2)))
}
fn semijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged1 = self.arrange_by_key();
let arranged2 = other.arrange_by_self();
arranged1.join_core(&arranged2, |k,v,_| Some((k.clone(), v.clone())))
}
fn antijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R: Mul<R2, Output=R> {
self.concat(&self.semijoin(other).negate())
}
}
impl<G, K, V, R, T> Join<G, K, V, R> for Arranged<G,K,V,R,T>
where
G: Scope,
G::Timestamp: Lattice+Ord+Debug,
K: Data+Hashable,
V: Data,
R: Diff,
T: TraceReader<K,V,G::Timestamp,R>+Clone+'static,
T::Batch: BatchReader<K,V,G::Timestamp,R>+'static+Debug {
fn join_map<V2: Data, R2: Diff, D: Data, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff, L: Fn(&K, &V, &V2)->D+'static {
let arranged2 = other.arrange_by_key();
self.join_core(&arranged2, move |k,v1,v2| Some(logic(k,v1,v2)))
}
fn semijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged2 = other.arrange_by_self();
self.join_core(&arranged2, |k,v,_| Some((k.clone(), v.clone())))
}
fn antijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R: Mul<R2, Output=R> {
self.as_collection(|k,v| (k.clone(), v.clone()))
.concat(&self.semijoin(other).negate())
}
}
/// Matches the elements of two arranged traces.
///
/// This method is used by the various `join` implementations, but it can also be used
/// directly in the event that one has a handle to an `Arranged<G,T>`, perhaps because
/// the arrangement is available for re-use, or from the output of a `group` operator.
pub trait JoinCore<G: Scope, K: 'static, V: 'static, R: Diff> where G::Timestamp: Lattice+Ord {
/// Joins two arranged collections with the same key type.
///
/// Each matching pair of records `(key, val1)` and `(key, val2)` are subjected to the `result` function,
/// which produces something implementing `IntoIterator`, where the output collection will have
///
/// This trait is implemented for arrangements (`Arranged<G, T>`) rather than collections. The `Join` trait
/// contains the implementations for collections.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::arrange::Arrange;
/// use differential_dataflow::operators::join::JoinCore;
/// use differential_dataflow::trace::Trace;
/// use differential_dataflow::trace::implementations::ord::OrdValSpine;
/// use differential_dataflow::hashable::OrdWrapper;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0u32, 1), (1, 3)]).1
/// .map(|(x,y)| (OrdWrapper { item: x }, y))
/// .arrange(OrdValSpine::new());
/// let y = scope.new_collection_from(vec![(0, 'a'), (1, 'b')]).1
/// .map(|(x,y)| (OrdWrapper { item: x }, y))
/// .arrange(OrdValSpine::new());
///
/// let z = scope.new_collection_from(vec![(1, 'a'), (3, 'b')]).1;
///
/// x.join_core(&y, |_key, &a, &b| Some((a, b)))
/// .assert_eq(&z);
/// });
/// }
/// ```
fn join_core<V2,T2,R2,I,L> (&self, stream2: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,I::Item,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K, V2, G::Timestamp, R2>+Clone+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
I: IntoIterator,
I::Item: Data,
L: Fn(&K,&V,&V2)->I+'static,
;
}
impl<G, K, V, R> JoinCore<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_core<V2,T2,R2,I,L> (&self, stream2: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,I::Item,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K, V2, G::Timestamp, R2>+Clone+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
I: IntoIterator,
I::Item: Data,
L: Fn(&K,&V,&V2)->I+'static {
self.arrange_by_key()
.join_core(stream2, result)
}
}
impl<G, K, V, R1, T1> JoinCore<G, K, V, R1> for Arranged<G,K,V,R1,T1>
where
K: Ord,
G: Scope,
G::Timestamp: Lattice+Ord+Debug,
K: Debug+Eq+'static,
V: Ord+Clone+Debug+'static,
R1: Diff,
T1: TraceReader<K,V,G::Timestamp, R1>+Clone+'static,
T1::Batch: BatchReader<K,V,G::Timestamp,R1>+'static+Debug {
fn join_core<V2,T2,R2,I,L>(&self, other: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,I::Item,<R1 as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K,V2,G::Timestamp,R2>+Clone+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R1: Mul<R2>,
<R1 as Mul<R2>>::Output: Diff,
I: IntoIterator,
I::Item: Data,
L: Fn(&K,&V,&V2)->I+'static {
// handles to shared trace data structures.
let mut trace1 = Some(self.trace.clone());
let mut trace2 = Some(other.trace.clone());
// acknowledged frontier for each input.
let mut acknowledged1 = vec![G::Timestamp::minimum()];
let mut acknowledged2 = vec![G::Timestamp::minimum()];
// deferred work of batches from each input.
let mut todo1 = Vec::new();
let mut todo2 = Vec::new();
self.stream.binary_notify(&other.stream, Pipeline, Pipeline, "Join", vec![], move |input1, input2, output, notificator| {
// The join computation repeatedly accepts batches of updates from each of its inputs.
//
// For each accepted batch, it prepares a work-item to join the batch against previously "accepted"
// updates from its other input. It is important to track which updates have been accepted, through
// a combination of the input's frontier and the most recently received batch's upper bound, because
// we use a shared trace and there may be updates present that are in advance of this accepted bound.
// drain input 1, prepare work.
input1.for_each(|capability, data| {
if let Some(ref mut trace2) = trace2 {
for batch1 in data.drain(..) {
let (trace2_cursor, trace2_storage) = trace2.cursor_through(&acknowledged2[..]).unwrap();
let (batch1_cursor, batch1_storage) = batch1.item.cursor();
todo1.push(Deferred::new(trace2_cursor, trace2_storage, batch1_cursor, batch1_storage, capability.clone(), |r2,r1| *r1 * *r2));
debug_assert!(batch1.item.description().lower() == &acknowledged1[..]);
acknowledged1 = batch1.item.description().upper().to_vec();
}
}
});
// drain input 2, prepare work.
input2.for_each(|capability, data| {
if let Some(ref mut trace1) = trace1 {
for batch2 in data.drain(..) {
let (trace1_cursor, trace1_storage) = trace1.cursor_through(&acknowledged1[..]).unwrap();
let (batch2_cursor, batch2_storage) = batch2.item.cursor();
todo2.push(Deferred::new(trace1_cursor, trace1_storage, batch2_cursor, batch2_storage, capability.clone(), |r1,r2| *r1 * *r2));
debug_assert!(batch2.item.description().lower() == &acknowledged2[..]);
acknowledged2 = batch2.item.description().upper().to_vec();
}
}
});
// An arbitrary number, whose value guides the "responsiveness" of `join`; the operator
// yields after producing this many records, to allow downstream operators to work and
// move the produced records around.
let mut fuel = 1_000_000;
// perform some amount of outstanding work.
while todo1.len() > 0 && fuel > 0 {
todo1[0].work(output, &|k,v2,v1| result(k,v1,v2), &mut fuel);
if !todo1[0].work_remains() { todo1.remove(0); }
}
// perform some amount of outstanding work.
while todo2.len() > 0 && fuel > 0 {
todo2[0].work(output, &|k,v1,v2| result(k,v1,v2), &mut fuel);
if !todo2[0].work_remains() { todo2.remove(0); }
}
// shut down or advance trace2. if the frontier is empty we can shut it down,
// and otherwise we can advance the trace by the acknowledged elements of the other input,
// as we may still use them as thresholds (ie we must preserve `le` wrt `acknowledged`).
// NOTE: We release capabilities here to allow light work to complete, which may result in
// unique ownership which would enable `advance_mut`.
if trace2.is_some() && notificator.frontier(0).len() == 0 { trace2 = None; }
if let Some(ref mut trace2) = trace2 {
trace2.advance_by(notificator.frontier(0));
trace2.distinguish_since(&acknowledged2[..]);
}
// shut down or advance trace1.
if trace1.is_some() && notificator.frontier(1).len() == 0 { trace1 = None; }
if let Some(ref mut trace1) = trace1 {
trace1.advance_by(notificator.frontier(1));
trace1.distinguish_since(&acknowledged1[..]);
}
})
.as_collection()
}
}
/// Deferred join computation.
///
/// The structure wraps cursors which allow us to play out join computation at whatever rate we like.
/// This allows us to avoid producing and buffering massive amounts of data, without giving the timely
/// dataflow system a chance to run operators that can consume and aggregate the data.
struct Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M, D>
where
V1: Ord+Clone,
V2: Ord+Clone,
T: Timestamp+Lattice+Ord+Debug,
R1: Diff,
R2: Diff,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
D: Ord+Clone+Data,
{
phant: ::std::marker::PhantomData<(K, V1, V2, R1, R2)>,
trace: C1,
trace_storage: C1::Storage,
batch: C2,
batch_storage: C2::Storage,
capability: Capability<T>,
mult: M,
done: bool,
temp: Vec<((D, T), R3)>,
// thinker: JoinThinker<V1, V2, T, R1, R2>,
}
impl<K, V1, V2, T, R1, R2, R3, C1, C2, M, D> Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M, D>
where
K: Ord+Debug+Eq,
V1: Ord+Clone+Debug,
V2: Ord+Clone+Debug,
T: Timestamp+Lattice+Ord+Debug,
R1: Diff,
R2: Diff,
R3: Diff,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
D: Ord+Clone+Data,
{
fn new(trace: C1, trace_storage: C1::Storage, batch: C2, batch_storage: C2::Storage, capability: Capability<T>, mult: M) -> Self {
Deferred {
phant: ::std::marker::PhantomData,
trace: trace,
trace_storage: trace_storage,
batch: batch,
batch_storage: batch_storage,
capability: capability,
mult: mult,
done: false,
temp: Vec::new(),
// thinker: JoinThinker::new(),
}
}
fn work_remains(&self) -> bool {
!self.done
}
/// Process keys until at least `limit` output tuples produced, or the work is exhausted.
#[inline(never)]
fn work<L, I>(&mut self, output: &mut OutputHandle<T, (D, T, R3), Tee<T, (D, T, R3)>>, logic: &L, fuel: &mut usize)
where I: IntoIterator<Item=D>, L: Fn(&K, &V1, &V2)->I {
let meet = self.capability.time();
let mut effort = 0;
let mut session = output.session(&self.capability);
let trace_storage = &self.trace_storage;
let batch_storage = &self.batch_storage;
let trace = &mut self.trace;
let batch = &mut self.batch;
let mult = &self.mult;
let temp = &mut self.temp;
// let thinker = &mut self.thinker;
let mut thinker = JoinThinker::new();
while batch.key_valid(batch_storage) && trace.key_valid(trace_storage) && effort < *fuel {
match trace.key(trace_storage).cmp(batch.key(batch_storage)) {
Ordering::Less => trace.seek_key(trace_storage, batch.key(batch_storage)),
Ordering::Greater => batch.seek_key(batch_storage, trace.key(trace_storage)),
Ordering::Equal => {
thinker.history1.edits.load(trace, trace_storage, |time| time.join(&meet));
thinker.history2.edits.load(batch, batch_storage, |time| time.clone());
assert_eq!(temp.len(), 0);
// populate `temp` with the results in the best way we know how.
thinker.think(|v1,v2,t,r1,r2|
for result in logic(batch.key(batch_storage), v1, v2) {
temp.push(((result, t.clone()), mult(r1, r2)));
}
);
// TODO: This consolidation is optional, and it may not be very
// helpful. We might try harder to understand whether we
// should do this work here, or downstream at consumers.
// TODO: Perhaps `thinker` should have the buffer, do smarter
// consolidation, and then deposit results in `session`.
consolidate(temp, 0);
effort += temp.len();
for ((d, t), r) in temp.drain(..) {
session.give((d, t, r));
}
batch.step_key(batch_storage);
trace.step_key(trace_storage);
thinker.history1.clear();
thinker.history2.clear();
}
}
}
self.done = !batch.key_valid(batch_storage) || !trace.key_valid(trace_storage);
if effort > *fuel { *fuel = 0; }
else { *fuel -= effort; }
}
}
struct JoinThinker<'a, V1: Ord+Clone+'a, V2: Ord+Clone+'a, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> {
pub history1: ValueHistory2<'a, V1, T, R1>,
pub history2: ValueHistory2<'a, V2, T, R2>,
}
impl<'a, V1: Ord+Clone, V2: Ord+Clone, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> JoinThinker<'a, V1, V2, T, R1, R2>
where V1: Debug, V2: Debug, T: Debug
{
fn new() -> Self {
JoinThinker {
history1: ValueHistory2::new(),
history2: ValueHistory2::new(),
}
}
fn think<F: FnMut(&V1,&V2,T,&R1,&R2)>(&mut self, mut results: F) {
// for reasonably sized edits, do the dead-simple thing.
if self.history1.edits.len() < 10 || self.history2.edits.len() < 10 {
self.history1.edits.map(|v1, t1, d1| {
self.history2.edits.map(|v2, t2, d2| {
results(v1, v2, t1.join(t2), &d1, &d2);
})
})
}
else {
self.history1.order();
self.history2.order();
// TODO: It seems like there is probably a good deal of redundant `advance_buffer_by`
// in here. If a time is ever repeated, for example, the call will be identical
// and accomplish nothing. If only a single record has been added, it may not
// be worth the time to collapse (advance, re-sort) the data when a linear scan
// is sufficient.
while !self.history1.is_done() && !self.history2.is_done() {
if self.history1.time().unwrap().cmp(&self.history2.time().unwrap()) == ::std::cmp::Ordering::Less {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
else {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
while !self.history1.is_done() {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
while !self.history2.is_done() {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
}
}
remove debug requirement
//! Match pairs of records based on a key.
//!
//! The various `join` implementations require that the units of each collection can be multiplied, and that
//! the multiplication distributes over addition. That is, we will repeatedly evaluate (a + b) * c as (a * c)
//! + (b * c), and if this is not equal to the former term, little is known about the actual output.
use std::fmt::Debug;
use std::ops::Mul;
use std::cmp::Ordering;
use timely::progress::Timestamp;
use timely::dataflow::Scope;
use timely::dataflow::operators::generic::{Binary, OutputHandle};
use timely::dataflow::channels::pact::Pipeline;
use timely::dataflow::operators::Capability;
use timely::dataflow::channels::pushers::tee::Tee;
use hashable::Hashable;
use ::{Data, Diff, Collection, AsCollection};
use lattice::Lattice;
use operators::arrange::{Arranged, ArrangeByKey, ArrangeBySelf};
use trace::{BatchReader, Cursor, consolidate};
use operators::ValueHistory2;
use trace::TraceReader;
/// Join implementations for `(key,val)` data.
pub trait Join<G: Scope, K: Data, V: Data, R: Diff> {
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![(0, 'a'), (1, 'b')]).1;
/// let z = scope.new_collection_from(vec![(0, 1, 'a'), (1, 3, 'b')]).1;
///
/// x.join(&y)
/// .assert_eq(&z);
/// });
/// }
/// ```
fn join<V2: Data, R2: Diff>(&self, other: &Collection<G, (K,V2), R2>) -> Collection<G, (K,V,V2), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff
{
self.join_map(other, |k,v,v2| (k.clone(),v.clone(),v2.clone()))
}
/// Matches pairs `(key,val1)` and `(key,val2)` based on `key` and then applies a function.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![(0, 'a'), (1, 'b')]).1;
/// let z = scope.new_collection_from(vec![(1, 'a'), (3, 'b')]).1;
///
/// x.join_map(&y, |_key, &a, &b| (a,b))
/// .assert_eq(&z);
/// });
/// }
/// ```
fn join_map<V2, R2: Diff, D, L>(&self, other: &Collection<G, (K,V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where V2: Data, R: Mul<R2>, <R as Mul<R2>>::Output: Diff, D: Data, L: Fn(&K, &V, &V2)->D+'static;
/// Matches pairs `(key, val)` and `key` based on `key`, producing the former with frequencies multiplied.
///
/// When the second collection contains frequencies that are either zero or one this is the more traditional
/// relational semijoin. When the second collection may contain multiplicities, this operation may scale up
/// the counts of the records in the first input.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![0, 2]).1;
/// let z = scope.new_collection_from(vec![(0, 1)]).1;
///
/// x.semijoin(&y)
/// .assert_eq(&z);
/// });
/// }
/// ```
fn semijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R2: Diff, R: Mul<R2>, <R as Mul<R2>>::Output: Diff;
/// Matches pairs `(key, val)` and `key` based on `key`, discarding values
/// in the first collection if their key is present in the second.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::Join;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0, 1), (1, 3)]).1;
/// let y = scope.new_collection_from(vec![0, 2]).1;
/// let z = scope.new_collection_from(vec![(1, 3)]).1;
///
/// x.antijoin(&y)
/// .assert_eq(&z);
/// });
/// }
/// ```
fn antijoin<R2>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R2: Diff, R: Mul<R2, Output = R>;
}
impl<G, K, V, R> Join<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_map<V2: Data, R2: Diff, D: Data, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff, L: Fn(&K, &V, &V2)->D+'static {
let arranged1 = self.arrange_by_key();
let arranged2 = other.arrange_by_key();
arranged1.join_core(&arranged2, move |k,v1,v2| Some(logic(k,v1,v2)))
}
fn semijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged1 = self.arrange_by_key();
let arranged2 = other.arrange_by_self();
arranged1.join_core(&arranged2, |k,v,_| Some((k.clone(), v.clone())))
}
fn antijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R: Mul<R2, Output=R> {
self.concat(&self.semijoin(other).negate())
}
}
impl<G, K, V, R, T> Join<G, K, V, R> for Arranged<G,K,V,R,T>
where
G: Scope,
G::Timestamp: Lattice+Ord+Debug,
K: Data+Hashable,
V: Data,
R: Diff,
T: TraceReader<K,V,G::Timestamp,R>+Clone+'static,
T::Batch: BatchReader<K,V,G::Timestamp,R>+'static+Debug {
fn join_map<V2: Data, R2: Diff, D: Data, L>(&self, other: &Collection<G, (K, V2), R2>, logic: L) -> Collection<G, D, <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff, L: Fn(&K, &V, &V2)->D+'static {
let arranged2 = other.arrange_by_key();
self.join_core(&arranged2, move |k,v1,v2| Some(logic(k,v1,v2)))
}
fn semijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), <R as Mul<R2>>::Output>
where R: Mul<R2>, <R as Mul<R2>>::Output: Diff {
let arranged2 = other.arrange_by_self();
self.join_core(&arranged2, |k,v,_| Some((k.clone(), v.clone())))
}
fn antijoin<R2: Diff>(&self, other: &Collection<G, K, R2>) -> Collection<G, (K, V), R>
where R: Mul<R2, Output=R> {
self.as_collection(|k,v| (k.clone(), v.clone()))
.concat(&self.semijoin(other).negate())
}
}
/// Matches the elements of two arranged traces.
///
/// This method is used by the various `join` implementations, but it can also be used
/// directly in the event that one has a handle to an `Arranged<G,T>`, perhaps because
/// the arrangement is available for re-use, or from the output of a `group` operator.
pub trait JoinCore<G: Scope, K: 'static, V: 'static, R: Diff> where G::Timestamp: Lattice+Ord {
/// Joins two arranged collections with the same key type.
///
/// Each matching pair of records `(key, val1)` and `(key, val2)` are subjected to the `result` function,
/// which produces something implementing `IntoIterator`, where the output collection will have
///
/// This trait is implemented for arrangements (`Arranged<G, T>`) rather than collections. The `Join` trait
/// contains the implementations for collections.
///
/// # Examples
///
/// ```
/// extern crate timely;
/// extern crate differential_dataflow;
///
/// use differential_dataflow::input::Input;
/// use differential_dataflow::operators::arrange::Arrange;
/// use differential_dataflow::operators::join::JoinCore;
/// use differential_dataflow::trace::Trace;
/// use differential_dataflow::trace::implementations::ord::OrdValSpine;
/// use differential_dataflow::hashable::OrdWrapper;
///
/// fn main() {
/// ::timely::example(|scope| {
///
/// let x = scope.new_collection_from(vec![(0u32, 1), (1, 3)]).1
/// .map(|(x,y)| (OrdWrapper { item: x }, y))
/// .arrange(OrdValSpine::new());
/// let y = scope.new_collection_from(vec![(0, 'a'), (1, 'b')]).1
/// .map(|(x,y)| (OrdWrapper { item: x }, y))
/// .arrange(OrdValSpine::new());
///
/// let z = scope.new_collection_from(vec![(1, 'a'), (3, 'b')]).1;
///
/// x.join_core(&y, |_key, &a, &b| Some((a, b)))
/// .assert_eq(&z);
/// });
/// }
/// ```
fn join_core<V2,T2,R2,I,L> (&self, stream2: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,I::Item,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K, V2, G::Timestamp, R2>+Clone+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
I: IntoIterator,
I::Item: Data,
L: Fn(&K,&V,&V2)->I+'static,
;
}
impl<G, K, V, R> JoinCore<G, K, V, R> for Collection<G, (K, V), R>
where
G: Scope,
K: Data+Hashable,
V: Data,
R: Diff,
G::Timestamp: Lattice+Ord,
{
fn join_core<V2,T2,R2,I,L> (&self, stream2: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,I::Item,<R as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K, V2, G::Timestamp, R2>+Clone+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R: Mul<R2>,
<R as Mul<R2>>::Output: Diff,
I: IntoIterator,
I::Item: Data,
L: Fn(&K,&V,&V2)->I+'static {
self.arrange_by_key()
.join_core(stream2, result)
}
}
impl<G, K, V, R1, T1> JoinCore<G, K, V, R1> for Arranged<G,K,V,R1,T1>
where
K: Ord,
G: Scope,
G::Timestamp: Lattice+Ord+Debug,
K: Debug+Eq+'static,
V: Ord+Clone+Debug+'static,
R1: Diff,
T1: TraceReader<K,V,G::Timestamp, R1>+Clone+'static,
T1::Batch: BatchReader<K,V,G::Timestamp,R1>+'static,
{
fn join_core<V2,T2,R2,I,L>(&self, other: &Arranged<G,K,V2,R2,T2>, result: L) -> Collection<G,I::Item,<R1 as Mul<R2>>::Output>
where
V2: Ord+Clone+Debug+'static,
T2: TraceReader<K,V2,G::Timestamp,R2>+Clone+'static,
T2::Batch: BatchReader<K, V2, G::Timestamp, R2>+'static,
R2: Diff,
R1: Mul<R2>,
<R1 as Mul<R2>>::Output: Diff,
I: IntoIterator,
I::Item: Data,
L: Fn(&K,&V,&V2)->I+'static {
// handles to shared trace data structures.
let mut trace1 = Some(self.trace.clone());
let mut trace2 = Some(other.trace.clone());
// acknowledged frontier for each input.
let mut acknowledged1 = vec![G::Timestamp::minimum()];
let mut acknowledged2 = vec![G::Timestamp::minimum()];
// deferred work of batches from each input.
let mut todo1 = Vec::new();
let mut todo2 = Vec::new();
self.stream.binary_notify(&other.stream, Pipeline, Pipeline, "Join", vec![], move |input1, input2, output, notificator| {
// The join computation repeatedly accepts batches of updates from each of its inputs.
//
// For each accepted batch, it prepares a work-item to join the batch against previously "accepted"
// updates from its other input. It is important to track which updates have been accepted, through
// a combination of the input's frontier and the most recently received batch's upper bound, because
// we use a shared trace and there may be updates present that are in advance of this accepted bound.
// drain input 1, prepare work.
input1.for_each(|capability, data| {
if let Some(ref mut trace2) = trace2 {
for batch1 in data.drain(..) {
let (trace2_cursor, trace2_storage) = trace2.cursor_through(&acknowledged2[..]).unwrap();
let (batch1_cursor, batch1_storage) = batch1.item.cursor();
todo1.push(Deferred::new(trace2_cursor, trace2_storage, batch1_cursor, batch1_storage, capability.clone(), |r2,r1| *r1 * *r2));
debug_assert!(batch1.item.description().lower() == &acknowledged1[..]);
acknowledged1 = batch1.item.description().upper().to_vec();
}
}
});
// drain input 2, prepare work.
input2.for_each(|capability, data| {
if let Some(ref mut trace1) = trace1 {
for batch2 in data.drain(..) {
let (trace1_cursor, trace1_storage) = trace1.cursor_through(&acknowledged1[..]).unwrap();
let (batch2_cursor, batch2_storage) = batch2.item.cursor();
todo2.push(Deferred::new(trace1_cursor, trace1_storage, batch2_cursor, batch2_storage, capability.clone(), |r1,r2| *r1 * *r2));
debug_assert!(batch2.item.description().lower() == &acknowledged2[..]);
acknowledged2 = batch2.item.description().upper().to_vec();
}
}
});
// An arbitrary number, whose value guides the "responsiveness" of `join`; the operator
// yields after producing this many records, to allow downstream operators to work and
// move the produced records around.
let mut fuel = 1_000_000;
// perform some amount of outstanding work.
while todo1.len() > 0 && fuel > 0 {
todo1[0].work(output, &|k,v2,v1| result(k,v1,v2), &mut fuel);
if !todo1[0].work_remains() { todo1.remove(0); }
}
// perform some amount of outstanding work.
while todo2.len() > 0 && fuel > 0 {
todo2[0].work(output, &|k,v1,v2| result(k,v1,v2), &mut fuel);
if !todo2[0].work_remains() { todo2.remove(0); }
}
// shut down or advance trace2. if the frontier is empty we can shut it down,
// and otherwise we can advance the trace by the acknowledged elements of the other input,
// as we may still use them as thresholds (ie we must preserve `le` wrt `acknowledged`).
// NOTE: We release capabilities here to allow light work to complete, which may result in
// unique ownership which would enable `advance_mut`.
if trace2.is_some() && notificator.frontier(0).len() == 0 { trace2 = None; }
if let Some(ref mut trace2) = trace2 {
trace2.advance_by(notificator.frontier(0));
trace2.distinguish_since(&acknowledged2[..]);
}
// shut down or advance trace1.
if trace1.is_some() && notificator.frontier(1).len() == 0 { trace1 = None; }
if let Some(ref mut trace1) = trace1 {
trace1.advance_by(notificator.frontier(1));
trace1.distinguish_since(&acknowledged1[..]);
}
})
.as_collection()
}
}
/// Deferred join computation.
///
/// The structure wraps cursors which allow us to play out join computation at whatever rate we like.
/// This allows us to avoid producing and buffering massive amounts of data, without giving the timely
/// dataflow system a chance to run operators that can consume and aggregate the data.
struct Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M, D>
where
V1: Ord+Clone,
V2: Ord+Clone,
T: Timestamp+Lattice+Ord+Debug,
R1: Diff,
R2: Diff,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
D: Ord+Clone+Data,
{
phant: ::std::marker::PhantomData<(K, V1, V2, R1, R2)>,
trace: C1,
trace_storage: C1::Storage,
batch: C2,
batch_storage: C2::Storage,
capability: Capability<T>,
mult: M,
done: bool,
temp: Vec<((D, T), R3)>,
// thinker: JoinThinker<V1, V2, T, R1, R2>,
}
impl<K, V1, V2, T, R1, R2, R3, C1, C2, M, D> Deferred<K, V1, V2, T, R1, R2, R3, C1, C2, M, D>
where
K: Ord+Debug+Eq,
V1: Ord+Clone+Debug,
V2: Ord+Clone+Debug,
T: Timestamp+Lattice+Ord+Debug,
R1: Diff,
R2: Diff,
R3: Diff,
C1: Cursor<K, V1, T, R1>,
C2: Cursor<K, V2, T, R2>,
M: Fn(&R1,&R2)->R3,
D: Ord+Clone+Data,
{
fn new(trace: C1, trace_storage: C1::Storage, batch: C2, batch_storage: C2::Storage, capability: Capability<T>, mult: M) -> Self {
Deferred {
phant: ::std::marker::PhantomData,
trace: trace,
trace_storage: trace_storage,
batch: batch,
batch_storage: batch_storage,
capability: capability,
mult: mult,
done: false,
temp: Vec::new(),
// thinker: JoinThinker::new(),
}
}
fn work_remains(&self) -> bool {
!self.done
}
/// Process keys until at least `limit` output tuples produced, or the work is exhausted.
#[inline(never)]
fn work<L, I>(&mut self, output: &mut OutputHandle<T, (D, T, R3), Tee<T, (D, T, R3)>>, logic: &L, fuel: &mut usize)
where I: IntoIterator<Item=D>, L: Fn(&K, &V1, &V2)->I {
let meet = self.capability.time();
let mut effort = 0;
let mut session = output.session(&self.capability);
let trace_storage = &self.trace_storage;
let batch_storage = &self.batch_storage;
let trace = &mut self.trace;
let batch = &mut self.batch;
let mult = &self.mult;
let temp = &mut self.temp;
// let thinker = &mut self.thinker;
let mut thinker = JoinThinker::new();
while batch.key_valid(batch_storage) && trace.key_valid(trace_storage) && effort < *fuel {
match trace.key(trace_storage).cmp(batch.key(batch_storage)) {
Ordering::Less => trace.seek_key(trace_storage, batch.key(batch_storage)),
Ordering::Greater => batch.seek_key(batch_storage, trace.key(trace_storage)),
Ordering::Equal => {
thinker.history1.edits.load(trace, trace_storage, |time| time.join(&meet));
thinker.history2.edits.load(batch, batch_storage, |time| time.clone());
assert_eq!(temp.len(), 0);
// populate `temp` with the results in the best way we know how.
thinker.think(|v1,v2,t,r1,r2|
for result in logic(batch.key(batch_storage), v1, v2) {
temp.push(((result, t.clone()), mult(r1, r2)));
}
);
// TODO: This consolidation is optional, and it may not be very
// helpful. We might try harder to understand whether we
// should do this work here, or downstream at consumers.
// TODO: Perhaps `thinker` should have the buffer, do smarter
// consolidation, and then deposit results in `session`.
consolidate(temp, 0);
effort += temp.len();
for ((d, t), r) in temp.drain(..) {
session.give((d, t, r));
}
batch.step_key(batch_storage);
trace.step_key(trace_storage);
thinker.history1.clear();
thinker.history2.clear();
}
}
}
self.done = !batch.key_valid(batch_storage) || !trace.key_valid(trace_storage);
if effort > *fuel { *fuel = 0; }
else { *fuel -= effort; }
}
}
struct JoinThinker<'a, V1: Ord+Clone+'a, V2: Ord+Clone+'a, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> {
pub history1: ValueHistory2<'a, V1, T, R1>,
pub history2: ValueHistory2<'a, V2, T, R2>,
}
impl<'a, V1: Ord+Clone, V2: Ord+Clone, T: Lattice+Ord+Clone, R1: Diff, R2: Diff> JoinThinker<'a, V1, V2, T, R1, R2>
where V1: Debug, V2: Debug, T: Debug
{
fn new() -> Self {
JoinThinker {
history1: ValueHistory2::new(),
history2: ValueHistory2::new(),
}
}
fn think<F: FnMut(&V1,&V2,T,&R1,&R2)>(&mut self, mut results: F) {
// for reasonably sized edits, do the dead-simple thing.
if self.history1.edits.len() < 10 || self.history2.edits.len() < 10 {
self.history1.edits.map(|v1, t1, d1| {
self.history2.edits.map(|v2, t2, d2| {
results(v1, v2, t1.join(t2), &d1, &d2);
})
})
}
else {
self.history1.order();
self.history2.order();
// TODO: It seems like there is probably a good deal of redundant `advance_buffer_by`
// in here. If a time is ever repeated, for example, the call will be identical
// and accomplish nothing. If only a single record has been added, it may not
// be worth the time to collapse (advance, re-sort) the data when a linear scan
// is sufficient.
while !self.history1.is_done() && !self.history2.is_done() {
if self.history1.time().unwrap().cmp(&self.history2.time().unwrap()) == ::std::cmp::Ordering::Less {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
else {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
while !self.history1.is_done() {
self.history2.advance_buffer_by(self.history1.meet().unwrap());
for &((ref val2, ref time2), ref diff2) in &self.history2.buffer {
let (val1, time1, ref diff1) = self.history1.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history1.step();
}
while !self.history2.is_done() {
self.history1.advance_buffer_by(self.history2.meet().unwrap());
for &((ref val1, ref time1), ref diff1) in &self.history1.buffer {
let (val2, time2, ref diff2) = self.history2.edit().unwrap();
results(val1, val2, time1.join(time2), diff1, diff2);
}
self.history2.step();
}
}
}
} |
use std::cmp;
use decoders::RawImage;
use imageops::{OpBuffer,ImageOp,Pipeline};
#[derive(Clone, Debug)]
pub struct OpBaseCurve {
xs: Vec<f32>,
ys: Vec<f32>,
}
impl OpBaseCurve {
pub fn new(_img: &RawImage) -> OpBaseCurve {
OpBaseCurve{
xs: vec![0.0, 0.30, 0.5, 0.70, 1.0],
ys: vec![0.0, 0.25, 0.5, 0.75, 1.0],
}
}
}
impl ImageOp for OpBaseCurve {
fn name(&self) -> &str {"basecurve"}
fn run(&self, _pipeline: &Pipeline, buf: &OpBuffer) -> OpBuffer {
let mut buf = buf.clone();
let func = SplineFunc::new(&self.xs, &self.ys);
buf.mutate_lines(&(|line: &mut [f32], _| {
for pix in line.chunks_mut(3) {
pix[0] = func.interpolate(pix[0]);
pix[1] = pix[1];
pix[2] = pix[2];
}
}));
buf
}
}
struct SplineFunc<'a> {
xs: &'a [f32],
ys: &'a [f32],
c1s: Vec<f32>,
c2s: Vec<f32>,
c3s: Vec<f32>,
}
impl<'a> SplineFunc<'a> {
// Monotone cubic interpolation code adapted from the Javascript example in Wikipedia
fn new(xs: &'a[f32], ys: &'a[f32]) -> SplineFunc<'a> {
if xs.len() != ys.len() { panic!("Different number of Xs and Ys for Spline"); }
if xs.len() < 2 { panic!("Need at least 2 points for Spline"); }
// Get consecutive differences and slopes
let mut dxs = Vec::new();
let mut dys = Vec::new();
let mut slopes = Vec::new();
for i in 0..(xs.len()-1) {
let dx = xs[i+1] - xs[i];
let dy = ys[i+1] - ys[i];
dxs.push(dx);
dys.push(dy);
slopes.push(dy/dx);
}
// Get degree-1 coefficients
let mut c1s = vec![slopes[0]];
for i in 0..(dxs.len()-1) {
let m = slopes[i];
let next = slopes[i+1];
if m*next <= 0.0 {
c1s.push(0.0);
} else {
let dx = dxs[i];
let dxnext = dxs[i+1];
let common = dx + dxnext;
c1s.push(3.0*common/((common+dxnext)/m + (common + dx)/next));
}
}
c1s.push(slopes[slopes.len()-1]);
// Get degree-2 and degree-3 coefficients
let mut c2s = Vec::new();
let mut c3s = Vec::new();
for i in 0..(c1s.len()-1) {
let c1 = c1s[i];
let slope = slopes[i];
let invdx = 1.0 / dxs[i];
let common = c1+c1s[i+1] - slope - slope;
c2s.push((slope-c1-common)*invdx);
c3s.push(common*invdx*invdx);
}
SplineFunc {
xs: xs,
ys: ys,
c1s: c1s,
c2s: c2s,
c3s: c3s,
}
}
fn interpolate(&self, val: f32) -> f32 {
// Anything at or beyond the last value returns the last value
let end = self.xs[self.xs.len()-1];
if val >= end {
return self.ys[self.ys.len()-1];
}
// Search for the interval x is in, returning the corresponding y if x is one of the original xs
let mut low: isize = 0;
let mut mid: isize;
let mut high: isize = (self.c3s.len() - 1) as isize;
while low <= high {
mid = (low+high)/2;
let xhere = self.xs[mid as usize];
if xhere < val { low = mid + 1; }
else if xhere > val { high = mid - 1; }
else { return self.ys[mid as usize] }
}
let i = cmp::max(0, high) as usize;
// Interpolate
let diff = val - self.xs[i];
self.ys[i] + self.c1s[i]*diff + self.c2s[i]*diff*diff + self.c3s[i]*diff*diff*diff
}
}
curves: get rid of tabs
use std::cmp;
use decoders::RawImage;
use imageops::{OpBuffer,ImageOp,Pipeline};
#[derive(Clone, Debug)]
pub struct OpBaseCurve {
xs: Vec<f32>,
ys: Vec<f32>,
}
impl OpBaseCurve {
pub fn new(_img: &RawImage) -> OpBaseCurve {
OpBaseCurve{
xs: vec![0.0, 0.30, 0.5, 0.70, 1.0],
ys: vec![0.0, 0.25, 0.5, 0.75, 1.0],
}
}
}
impl ImageOp for OpBaseCurve {
fn name(&self) -> &str {"basecurve"}
fn run(&self, _pipeline: &Pipeline, buf: &OpBuffer) -> OpBuffer {
let mut buf = buf.clone();
let func = SplineFunc::new(&self.xs, &self.ys);
buf.mutate_lines(&(|line: &mut [f32], _| {
for pix in line.chunks_mut(3) {
pix[0] = func.interpolate(pix[0]);
pix[1] = pix[1];
pix[2] = pix[2];
}
}));
buf
}
}
struct SplineFunc<'a> {
xs: &'a [f32],
ys: &'a [f32],
c1s: Vec<f32>,
c2s: Vec<f32>,
c3s: Vec<f32>,
}
impl<'a> SplineFunc<'a> {
// Monotone cubic interpolation code adapted from the Javascript example in Wikipedia
fn new(xs: &'a[f32], ys: &'a[f32]) -> SplineFunc<'a> {
if xs.len() != ys.len() { panic!("Different number of Xs and Ys for Spline"); }
if xs.len() < 2 { panic!("Need at least 2 points for Spline"); }
// Get consecutive differences and slopes
let mut dxs = Vec::new();
let mut dys = Vec::new();
let mut slopes = Vec::new();
for i in 0..(xs.len()-1) {
let dx = xs[i+1] - xs[i];
let dy = ys[i+1] - ys[i];
dxs.push(dx);
dys.push(dy);
slopes.push(dy/dx);
}
// Get degree-1 coefficients
let mut c1s = vec![slopes[0]];
for i in 0..(dxs.len()-1) {
let m = slopes[i];
let next = slopes[i+1];
if m*next <= 0.0 {
c1s.push(0.0);
} else {
let dx = dxs[i];
let dxnext = dxs[i+1];
let common = dx + dxnext;
c1s.push(3.0*common/((common+dxnext)/m + (common + dx)/next));
}
}
c1s.push(slopes[slopes.len()-1]);
// Get degree-2 and degree-3 coefficients
let mut c2s = Vec::new();
let mut c3s = Vec::new();
for i in 0..(c1s.len()-1) {
let c1 = c1s[i];
let slope = slopes[i];
let invdx = 1.0 / dxs[i];
let common = c1+c1s[i+1] - slope - slope;
c2s.push((slope-c1-common)*invdx);
c3s.push(common*invdx*invdx);
}
SplineFunc {
xs: xs,
ys: ys,
c1s: c1s,
c2s: c2s,
c3s: c3s,
}
}
fn interpolate(&self, val: f32) -> f32 {
// Anything at or beyond the last value returns the last value
let end = self.xs[self.xs.len()-1];
if val >= end {
return self.ys[self.ys.len()-1];
}
// Search for the interval x is in, returning the corresponding y if x is one of the original xs
let mut low: isize = 0;
let mut mid: isize;
let mut high: isize = (self.c3s.len() - 1) as isize;
while low <= high {
mid = (low+high)/2;
let xhere = self.xs[mid as usize];
if xhere < val { low = mid + 1; }
else if xhere > val { high = mid - 1; }
else { return self.ys[mid as usize] }
}
let i = cmp::max(0, high) as usize;
// Interpolate
let diff = val - self.xs[i];
self.ys[i] + self.c1s[i]*diff + self.c2s[i]*diff*diff + self.c3s[i]*diff*diff*diff
}
}
|
//! WebAssembly interpreter module.
/// Custom user error.
pub trait UserError: 'static + ::std::fmt::Display + ::std::fmt::Debug + Clone + PartialEq {
}
/// Internal interpreter error.
#[derive(Debug, Clone, PartialEq)]
pub enum Error<E> where E: UserError {
/// Program-level error.
Program(String),
/// Validation error.
Validation(String),
/// Initialization error.
Initialization(String),
/// Function-level error.
Function(String),
/// Table-level error.
Table(String),
/// Memory-level error.
Memory(String),
/// Variable-level error.
Variable(String),
/// Global-level error.
Global(String),
/// Local-level error.
Local(String),
/// Stack-level error.
Stack(String),
/// Value-level error.
Value(String),
/// Interpreter (code) error.
Interpreter(String),
/// Env module error.
Env(String),
/// Native module error.
Native(String),
/// Trap.
Trap(String),
/// Custom user error.
User(E),
}
impl<E> Into<String> for Error<E> where E: UserError {
fn into(self) -> String {
match self {
Error::Program(s) => s,
Error::Validation(s) => s,
Error::Initialization(s) => s,
Error::Function(s) => s,
Error::Table(s) => s,
Error::Memory(s) => s,
Error::Variable(s) => s,
Error::Global(s) => s,
Error::Local(s) => s,
Error::Stack(s) => s,
Error::Interpreter(s) => s,
Error::Value(s) => s,
Error::Env(s) => s,
Error::Native(s) => s,
Error::Trap(s) => format!("trap: {}", s),
Error::User(e) => format!("user: {}", e),
}
}
}
/// Dummy user error.
#[derive(Debug, Clone, PartialEq)]
pub struct DummyUserError;
impl UserError for DummyUserError {}
impl ::std::fmt::Display for DummyUserError {
fn fmt(&self, _f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { Ok(()) }
}
mod env;
mod env_native;
mod imports;
mod memory;
mod module;
mod program;
mod runner;
mod stack;
mod table;
mod validator;
mod value;
mod variable;
#[cfg(test)]
mod tests;
pub use self::memory::MemoryInstance;
pub use self::module::{ModuleInstance, ModuleInstanceInterface,
ItemIndex, ExportEntryType, CallerContext, ExecutionParams, FunctionSignature};
pub use self::table::TableInstance;
pub use self::program::ProgramInstance;
pub use self::value::RuntimeValue;
pub use self::variable::{VariableInstance, VariableType, ExternalVariableValue};
pub use self::env_native::{env_native_module, UserDefinedElements, UserFunctionExecutor, UserFunctionDescriptor};
pub use self::env::EnvParams;
/// Default type of Error if you do not need any custom user errors.
pub type DummyError = Error<DummyUserError>;
/// Default type of ProgramInstance if you do not need any custom user errors.
/// To work with custom user errors or interpreter internals, use CustomProgramInstance.
pub type DefaultProgramInstance = self::program::ProgramInstance<DummyUserError>;
/// Default type of ModuleInstance if you do not need any custom user errors.
/// To work with custom user errors or interpreter internals, use CustomModuleInstance.
pub type DefaultModuleInstance = self::module::ModuleInstance<DummyUserError>;
/// Default type of ModuleInstanceInterface if you do not need any custom user errors.
/// To work with custom user errors or interpreter internals, use CustomModuleInstanceInterface.
pub type DefaultModuleInstanceInterface = self::module::ModuleInstanceInterface<DummyUserError>;
from user error for error
//! WebAssembly interpreter module.
/// Custom user error.
pub trait UserError: 'static + ::std::fmt::Display + ::std::fmt::Debug + Clone + PartialEq {
}
/// Internal interpreter error.
#[derive(Debug, Clone, PartialEq)]
pub enum Error<E> where E: UserError {
/// Program-level error.
Program(String),
/// Validation error.
Validation(String),
/// Initialization error.
Initialization(String),
/// Function-level error.
Function(String),
/// Table-level error.
Table(String),
/// Memory-level error.
Memory(String),
/// Variable-level error.
Variable(String),
/// Global-level error.
Global(String),
/// Local-level error.
Local(String),
/// Stack-level error.
Stack(String),
/// Value-level error.
Value(String),
/// Interpreter (code) error.
Interpreter(String),
/// Env module error.
Env(String),
/// Native module error.
Native(String),
/// Trap.
Trap(String),
/// Custom user error.
User(E),
}
impl<E> Into<String> for Error<E> where E: UserError {
fn into(self) -> String {
match self {
Error::Program(s) => s,
Error::Validation(s) => s,
Error::Initialization(s) => s,
Error::Function(s) => s,
Error::Table(s) => s,
Error::Memory(s) => s,
Error::Variable(s) => s,
Error::Global(s) => s,
Error::Local(s) => s,
Error::Stack(s) => s,
Error::Interpreter(s) => s,
Error::Value(s) => s,
Error::Env(s) => s,
Error::Native(s) => s,
Error::Trap(s) => format!("trap: {}", s),
Error::User(e) => format!("user: {}", e),
}
}
}
/// Dummy user error.
#[derive(Debug, Clone, PartialEq)]
pub struct DummyUserError;
impl UserError for DummyUserError {}
impl ::std::fmt::Display for DummyUserError {
fn fmt(&self, _f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { Ok(()) }
}
impl<U> From<U> for Error<U> where U: UserError + Sized {
fn from(e: U) -> Self {
Error::User(e)
}
}
mod env;
mod env_native;
mod imports;
mod memory;
mod module;
mod program;
mod runner;
mod stack;
mod table;
mod validator;
mod value;
mod variable;
#[cfg(test)]
mod tests;
pub use self::memory::MemoryInstance;
pub use self::module::{ModuleInstance, ModuleInstanceInterface,
ItemIndex, ExportEntryType, CallerContext, ExecutionParams, FunctionSignature};
pub use self::table::TableInstance;
pub use self::program::ProgramInstance;
pub use self::value::RuntimeValue;
pub use self::variable::{VariableInstance, VariableType, ExternalVariableValue};
pub use self::env_native::{env_native_module, UserDefinedElements, UserFunctionExecutor, UserFunctionDescriptor};
pub use self::env::EnvParams;
/// Default type of Error if you do not need any custom user errors.
pub type DummyError = Error<DummyUserError>;
/// Default type of ProgramInstance if you do not need any custom user errors.
/// To work with custom user errors or interpreter internals, use CustomProgramInstance.
pub type DefaultProgramInstance = self::program::ProgramInstance<DummyUserError>;
/// Default type of ModuleInstance if you do not need any custom user errors.
/// To work with custom user errors or interpreter internals, use CustomModuleInstance.
pub type DefaultModuleInstance = self::module::ModuleInstance<DummyUserError>;
/// Default type of ModuleInstanceInterface if you do not need any custom user errors.
/// To work with custom user errors or interpreter internals, use CustomModuleInstanceInterface.
pub type DefaultModuleInstanceInterface = self::module::ModuleInstanceInterface<DummyUserError>;
|
// TODO rm allow(unused)
#![allow(unsafe_code)]
use std::{
convert::{TryFrom, TryInto},
fmt::{self, Debug},
fs::File,
mem::{transmute, MaybeUninit},
path::Path,
sync::{
atomic::{AtomicU32, Ordering::Acquire},
Arc,
},
};
use crossbeam_epoch::pin;
use crate::{
pagecache::{pread_exact, pwrite_all, MessageKind},
stack::Stack,
Error, Lsn, Result,
};
#[cfg(not(feature = "testing"))]
const MIN_SZ: u64 = 64 * 1024;
#[cfg(feature = "testing")]
const MIN_SZ: u64 = 32;
const MIN_TRAILING_ZEROS: u64 = MIN_SZ.trailing_zeros() as u64;
pub type SlabId = u8;
pub type SlabIdx = u32;
/// A unique identifier for a particular slot in the heap
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Copy, PartialOrd, Ord, Eq, PartialEq, Hash)]
pub struct HeapId {
pub location: u64,
pub original_lsn: Lsn,
}
impl Debug for HeapId {
fn fmt(
&self,
f: &mut fmt::Formatter<'_>,
) -> std::result::Result<(), fmt::Error> {
let (slab, idx, original_lsn) = self.decompose();
f.debug_struct("HeapId")
.field("slab", &slab)
.field("idx", &idx)
.field("original_lsn", &original_lsn)
.finish()
}
}
impl HeapId {
pub fn decompose(&self) -> (SlabId, SlabIdx, Lsn) {
const IDX_MASK: u64 = (1 << 32) - 1;
let slab_id =
u8::try_from((self.location >> 32).trailing_zeros()).unwrap();
let slab_idx = u32::try_from(self.location & IDX_MASK).unwrap();
(slab_id, slab_idx, self.original_lsn)
}
pub fn compose(
slab_id: SlabId,
slab_idx: SlabIdx,
original_lsn: Lsn,
) -> HeapId {
let slab = 1 << (32 + u64::from(slab_id));
let heap_id = slab | u64::from(slab_idx);
HeapId { location: heap_id, original_lsn }
}
fn offset(&self) -> u64 {
let (slab_id, idx, _) = self.decompose();
slab_id_to_size(slab_id) * u64::from(idx)
}
fn slab_size(&self) -> u64 {
let (slab_id, _idx, _lsn) = self.decompose();
slab_id_to_size(slab_id)
}
}
pub(crate) fn slab_size(size: u64) -> u64 {
slab_id_to_size(size_to_slab_id(size))
}
fn slab_id_to_size(slab_id: u8) -> u64 {
1 << (MIN_TRAILING_ZEROS + u64::from(slab_id))
}
fn size_to_slab_id(size: u64) -> SlabId {
// find the power of 2 that is at least 64k
let normalized_size = std::cmp::max(MIN_SZ, size.next_power_of_two());
// drop the lowest unused bits
let rebased_size = normalized_size >> MIN_TRAILING_ZEROS;
u8::try_from(rebased_size.trailing_zeros()).unwrap()
}
pub(crate) struct Reservation {
slab_free: Arc<Stack<u32>>,
completed: bool,
file: File,
pub heap_id: HeapId,
from_tip: bool,
}
impl Drop for Reservation {
fn drop(&mut self) {
if !self.completed {
let (_slab_id, idx, _) = self.heap_id.decompose();
self.slab_free.push(idx, &pin());
}
}
}
impl Reservation {
pub fn complete(mut self, data: &[u8]) -> Result<HeapId> {
log::trace!(
"Heap::complete({:?}) to offset {} in file {:?}",
self.heap_id,
self.heap_id.offset(),
self.file
);
assert_eq!(data.len() as u64, self.heap_id.slab_size());
// write data
pwrite_all(&self.file, data, self.heap_id.offset())?;
// sync data
if self.from_tip {
self.file.sync_all()?;
} else if cfg!(not(target_os = "linux")) {
self.file.sync_data()?;
} else {
#[allow(clippy::assertions_on_constants)]
{
assert!(cfg!(target_os = "linux"));
}
#[cfg(target_os = "linux")]
{
use std::os::unix::io::AsRawFd;
let ret = unsafe {
libc::sync_file_range(
self.file.as_raw_fd(),
i64::try_from(self.heap_id.offset()).unwrap(),
i64::try_from(data.len()).unwrap(),
libc::SYNC_FILE_RANGE_WAIT_BEFORE
| libc::SYNC_FILE_RANGE_WRITE
| libc::SYNC_FILE_RANGE_WAIT_AFTER,
)
};
if ret < 0 {
let err = std::io::Error::last_os_error();
if let Some(libc::ENOSYS) = err.raw_os_error() {
self.file.sync_all()?;
} else {
return Err(err.into());
}
}
}
}
// if this is not reached due to an IO error,
// the offset will be returned to the Slab in Drop
self.completed = true;
Ok(self.heap_id)
}
}
#[derive(Debug)]
pub(crate) struct Heap {
// each slab stores
// items that are double
// the size of the previous,
// ranging from 64k in the
// smallest slab to 2^48 in
// the last.
slabs: [Slab; 32],
}
impl Heap {
pub fn start<P: AsRef<Path>>(p: P) -> Result<Heap> {
let mut slabs: [MaybeUninit<Slab>; 32] = unsafe { std::mem::zeroed() };
for slab_id in 0..32 {
let slab = Slab::start(&p, slab_id)?;
slabs[slab_id as usize] = MaybeUninit::new(slab);
}
Ok(Heap { slabs: unsafe { transmute(slabs) } })
}
pub fn gc_unknown_items(&self, snapshot: &crate::pagecache::Snapshot) {
let mut bitmaps = vec![];
for slab in &self.slabs {
let tip = slab.tip.load(Acquire) as usize;
bitmaps.push(vec![0_u64; 1 + (tip / 64)]);
}
for page_state in &snapshot.pt {
for heap_id in page_state.heap_ids() {
let (slab_id, idx, _lsn) = heap_id.decompose();
// set the bit for this slot
let block = idx / 64;
let bit = idx % 64;
let bitmask = 1 << bit;
bitmaps[slab_id as usize][block as usize] |= bitmask;
}
}
let iter = self.slabs.iter().zip(bitmaps.into_iter());
for (slab, bitmap) in iter {
let tip = slab.tip.load(Acquire);
for idx in 0..tip {
let block = idx / 64;
let bit = idx % 64;
let bitmask = 1 << bit;
let free = bitmap[block as usize] & bitmask == 0;
if free {
slab.free(idx);
}
}
}
}
pub fn read(
&self,
heap_id: HeapId,
use_compression: bool,
) -> Result<(MessageKind, Vec<u8>)> {
log::trace!("Heap::read({:?})", heap_id);
let (slab_id, slab_idx, original_lsn) = heap_id.decompose();
self.slabs[slab_id as usize].read(
slab_idx,
original_lsn,
use_compression,
)
}
pub fn free(&self, heap_id: HeapId) {
log::trace!("Heap::free({:?})", heap_id);
let (slab_id, slab_idx, _) = heap_id.decompose();
self.slabs[slab_id as usize].free(slab_idx)
}
pub fn reserve(&self, size: u64, original_lsn: Lsn) -> Reservation {
assert!(size < 1 << 48);
let slab_id = size_to_slab_id(size);
let ret = self.slabs[slab_id as usize].reserve(original_lsn);
log::trace!("Heap::reserve({}) -> {:?}", size, ret.heap_id);
ret
}
}
#[derive(Debug)]
struct Slab {
file: File,
slab_id: u8,
tip: AtomicU32,
free: Arc<Stack<u32>>,
}
impl Slab {
pub fn start<P: AsRef<Path>>(directory: P, slab_id: u8) -> Result<Slab> {
let bs = slab_id_to_size(slab_id);
let free = Arc::new(Stack::default());
let mut options = std::fs::OpenOptions::new();
options.create(true);
options.read(true);
options.write(true);
let file =
options.open(directory.as_ref().join(format!("{:02}", slab_id)))?;
let len = file.metadata()?.len();
let max_idx = len / bs;
log::trace!(
"starting heap slab for sizes of {}. tip: {} max idx: {}",
bs,
len,
max_idx
);
let tip = AtomicU32::new(u32::try_from(max_idx).unwrap());
Ok(Slab { file, slab_id, tip, free })
}
fn read(
&self,
slab_idx: SlabIdx,
original_lsn: Lsn,
use_compression: bool,
) -> Result<(MessageKind, Vec<u8>)> {
let bs = slab_id_to_size(self.slab_id);
let offset = u64::from(slab_idx) * bs;
log::trace!("reading heap slab slot {} at offset {}", slab_idx, offset);
let mut heap_buf = vec![0; usize::try_from(bs).unwrap()];
pread_exact(&self.file, &mut heap_buf, offset)?;
let stored_crc =
u32::from_le_bytes(heap_buf[1..5].as_ref().try_into().unwrap());
let mut hasher = crc32fast::Hasher::new();
hasher.update(&heap_buf[0..1]);
hasher.update(&heap_buf[5..]);
let actual_crc = hasher.finalize();
if actual_crc == stored_crc {
let actual_lsn = Lsn::from_le_bytes(
heap_buf[5..13].as_ref().try_into().unwrap(),
);
if actual_lsn != original_lsn {
log::error!(
"heap slot lsn {} does not match expected original lsn {}",
actual_lsn,
original_lsn
);
return Err(Error::corruption(None));
}
let buf = heap_buf[13..].to_vec();
let buf = if use_compression {
crate::pagecache::decompress(buf)
} else {
buf
};
Ok((MessageKind::from(heap_buf[0]), buf))
} else {
log::error!(
"heap message CRC does not match contents. stored: {} actual: {}",
stored_crc,
actual_crc
);
Err(Error::corruption(None))
}
}
fn reserve(&self, original_lsn: Lsn) -> Reservation {
let (idx, from_tip) = if let Some(idx) = self.free.pop(&pin()) {
log::trace!(
"reusing heap index {} in slab for sizes of {}",
idx,
slab_id_to_size(self.slab_id),
);
(idx, false)
} else {
log::trace!(
"no free heap slots in slab for sizes of {}",
slab_id_to_size(self.slab_id),
);
(self.tip.fetch_add(1, Acquire), true)
};
log::trace!(
"heap reservation for slot {} in the slab for sizes of {}",
idx,
slab_id_to_size(self.slab_id),
);
let heap_id = HeapId::compose(self.slab_id, idx, original_lsn);
Reservation {
slab_free: self.free.clone(),
completed: false,
file: self.file.try_clone().unwrap(),
from_tip,
heap_id,
}
}
fn free(&self, idx: u32) {
self.punch_hole(idx);
self.free.push(idx, &pin());
}
fn punch_hole(&self, #[allow(unused)] idx: u32) {
#[cfg(all(target_os = "linux", not(miri)))]
{
use std::{
os::unix::io::AsRawFd,
sync::atomic::{AtomicBool, Ordering::Relaxed},
};
use libc::{fallocate, FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE};
static HOLE_PUNCHING_ENABLED: AtomicBool = AtomicBool::new(true);
const MODE: i32 = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
if HOLE_PUNCHING_ENABLED.load(Relaxed) {
let bs = i64::try_from(slab_id_to_size(self.slab_id)).unwrap();
let offset = i64::from(idx) * bs;
let fd = self.file.as_raw_fd();
let ret = unsafe {
fallocate(
fd,
MODE,
#[allow(clippy::useless_conversion)]
offset.try_into().unwrap(),
#[allow(clippy::useless_conversion)]
bs.try_into().unwrap(),
)
};
if ret != 0 {
let err = std::io::Error::last_os_error();
log::error!(
"failed to punch hole in heap file: {:?}. disabling hole punching",
err
);
HOLE_PUNCHING_ENABLED.store(false, Relaxed);
}
}
}
}
}
Reduce log severity for heap writes
// TODO rm allow(unused)
#![allow(unsafe_code)]
use std::{
convert::{TryFrom, TryInto},
fmt::{self, Debug},
fs::File,
mem::{transmute, MaybeUninit},
path::Path,
sync::{
atomic::{AtomicU32, Ordering::Acquire},
Arc,
},
};
use crossbeam_epoch::pin;
use crate::{
pagecache::{pread_exact, pwrite_all, MessageKind},
stack::Stack,
Error, Lsn, Result,
};
#[cfg(not(feature = "testing"))]
const MIN_SZ: u64 = 64 * 1024;
#[cfg(feature = "testing")]
const MIN_SZ: u64 = 32;
const MIN_TRAILING_ZEROS: u64 = MIN_SZ.trailing_zeros() as u64;
pub type SlabId = u8;
pub type SlabIdx = u32;
/// A unique identifier for a particular slot in the heap
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Copy, PartialOrd, Ord, Eq, PartialEq, Hash)]
pub struct HeapId {
pub location: u64,
pub original_lsn: Lsn,
}
impl Debug for HeapId {
fn fmt(
&self,
f: &mut fmt::Formatter<'_>,
) -> std::result::Result<(), fmt::Error> {
let (slab, idx, original_lsn) = self.decompose();
f.debug_struct("HeapId")
.field("slab", &slab)
.field("idx", &idx)
.field("original_lsn", &original_lsn)
.finish()
}
}
impl HeapId {
pub fn decompose(&self) -> (SlabId, SlabIdx, Lsn) {
const IDX_MASK: u64 = (1 << 32) - 1;
let slab_id =
u8::try_from((self.location >> 32).trailing_zeros()).unwrap();
let slab_idx = u32::try_from(self.location & IDX_MASK).unwrap();
(slab_id, slab_idx, self.original_lsn)
}
pub fn compose(
slab_id: SlabId,
slab_idx: SlabIdx,
original_lsn: Lsn,
) -> HeapId {
let slab = 1 << (32 + u64::from(slab_id));
let heap_id = slab | u64::from(slab_idx);
HeapId { location: heap_id, original_lsn }
}
fn offset(&self) -> u64 {
let (slab_id, idx, _) = self.decompose();
slab_id_to_size(slab_id) * u64::from(idx)
}
fn slab_size(&self) -> u64 {
let (slab_id, _idx, _lsn) = self.decompose();
slab_id_to_size(slab_id)
}
}
pub(crate) fn slab_size(size: u64) -> u64 {
slab_id_to_size(size_to_slab_id(size))
}
fn slab_id_to_size(slab_id: u8) -> u64 {
1 << (MIN_TRAILING_ZEROS + u64::from(slab_id))
}
fn size_to_slab_id(size: u64) -> SlabId {
// find the power of 2 that is at least 64k
let normalized_size = std::cmp::max(MIN_SZ, size.next_power_of_two());
// drop the lowest unused bits
let rebased_size = normalized_size >> MIN_TRAILING_ZEROS;
u8::try_from(rebased_size.trailing_zeros()).unwrap()
}
pub(crate) struct Reservation {
slab_free: Arc<Stack<u32>>,
completed: bool,
file: File,
pub heap_id: HeapId,
from_tip: bool,
}
impl Drop for Reservation {
fn drop(&mut self) {
if !self.completed {
let (_slab_id, idx, _) = self.heap_id.decompose();
self.slab_free.push(idx, &pin());
}
}
}
impl Reservation {
pub fn complete(mut self, data: &[u8]) -> Result<HeapId> {
log::trace!(
"Heap::complete({:?}) to offset {} in file {:?}",
self.heap_id,
self.heap_id.offset(),
self.file
);
assert_eq!(data.len() as u64, self.heap_id.slab_size());
// write data
pwrite_all(&self.file, data, self.heap_id.offset())?;
// sync data
if self.from_tip {
self.file.sync_all()?;
} else if cfg!(not(target_os = "linux")) {
self.file.sync_data()?;
} else {
#[allow(clippy::assertions_on_constants)]
{
assert!(cfg!(target_os = "linux"));
}
#[cfg(target_os = "linux")]
{
use std::os::unix::io::AsRawFd;
let ret = unsafe {
libc::sync_file_range(
self.file.as_raw_fd(),
i64::try_from(self.heap_id.offset()).unwrap(),
i64::try_from(data.len()).unwrap(),
libc::SYNC_FILE_RANGE_WAIT_BEFORE
| libc::SYNC_FILE_RANGE_WRITE
| libc::SYNC_FILE_RANGE_WAIT_AFTER,
)
};
if ret < 0 {
let err = std::io::Error::last_os_error();
if let Some(libc::ENOSYS) = err.raw_os_error() {
self.file.sync_all()?;
} else {
return Err(err.into());
}
}
}
}
// if this is not reached due to an IO error,
// the offset will be returned to the Slab in Drop
self.completed = true;
Ok(self.heap_id)
}
}
#[derive(Debug)]
pub(crate) struct Heap {
// each slab stores
// items that are double
// the size of the previous,
// ranging from 64k in the
// smallest slab to 2^48 in
// the last.
slabs: [Slab; 32],
}
impl Heap {
pub fn start<P: AsRef<Path>>(p: P) -> Result<Heap> {
let mut slabs: [MaybeUninit<Slab>; 32] = unsafe { std::mem::zeroed() };
for slab_id in 0..32 {
let slab = Slab::start(&p, slab_id)?;
slabs[slab_id as usize] = MaybeUninit::new(slab);
}
Ok(Heap { slabs: unsafe { transmute(slabs) } })
}
pub fn gc_unknown_items(&self, snapshot: &crate::pagecache::Snapshot) {
let mut bitmaps = vec![];
for slab in &self.slabs {
let tip = slab.tip.load(Acquire) as usize;
bitmaps.push(vec![0_u64; 1 + (tip / 64)]);
}
for page_state in &snapshot.pt {
for heap_id in page_state.heap_ids() {
let (slab_id, idx, _lsn) = heap_id.decompose();
// set the bit for this slot
let block = idx / 64;
let bit = idx % 64;
let bitmask = 1 << bit;
bitmaps[slab_id as usize][block as usize] |= bitmask;
}
}
let iter = self.slabs.iter().zip(bitmaps.into_iter());
for (slab, bitmap) in iter {
let tip = slab.tip.load(Acquire);
for idx in 0..tip {
let block = idx / 64;
let bit = idx % 64;
let bitmask = 1 << bit;
let free = bitmap[block as usize] & bitmask == 0;
if free {
slab.free(idx);
}
}
}
}
pub fn read(
&self,
heap_id: HeapId,
use_compression: bool,
) -> Result<(MessageKind, Vec<u8>)> {
log::trace!("Heap::read({:?})", heap_id);
let (slab_id, slab_idx, original_lsn) = heap_id.decompose();
self.slabs[slab_id as usize].read(
slab_idx,
original_lsn,
use_compression,
)
}
pub fn free(&self, heap_id: HeapId) {
log::trace!("Heap::free({:?})", heap_id);
let (slab_id, slab_idx, _) = heap_id.decompose();
self.slabs[slab_id as usize].free(slab_idx)
}
pub fn reserve(&self, size: u64, original_lsn: Lsn) -> Reservation {
assert!(size < 1 << 48);
let slab_id = size_to_slab_id(size);
let ret = self.slabs[slab_id as usize].reserve(original_lsn);
log::trace!("Heap::reserve({}) -> {:?}", size, ret.heap_id);
ret
}
}
#[derive(Debug)]
struct Slab {
file: File,
slab_id: u8,
tip: AtomicU32,
free: Arc<Stack<u32>>,
}
impl Slab {
pub fn start<P: AsRef<Path>>(directory: P, slab_id: u8) -> Result<Slab> {
let bs = slab_id_to_size(slab_id);
let free = Arc::new(Stack::default());
let mut options = std::fs::OpenOptions::new();
options.create(true);
options.read(true);
options.write(true);
let file =
options.open(directory.as_ref().join(format!("{:02}", slab_id)))?;
let len = file.metadata()?.len();
let max_idx = len / bs;
log::trace!(
"starting heap slab for sizes of {}. tip: {} max idx: {}",
bs,
len,
max_idx
);
let tip = AtomicU32::new(u32::try_from(max_idx).unwrap());
Ok(Slab { file, slab_id, tip, free })
}
fn read(
&self,
slab_idx: SlabIdx,
original_lsn: Lsn,
use_compression: bool,
) -> Result<(MessageKind, Vec<u8>)> {
let bs = slab_id_to_size(self.slab_id);
let offset = u64::from(slab_idx) * bs;
log::trace!("reading heap slab slot {} at offset {}", slab_idx, offset);
let mut heap_buf = vec![0; usize::try_from(bs).unwrap()];
pread_exact(&self.file, &mut heap_buf, offset)?;
let stored_crc =
u32::from_le_bytes(heap_buf[1..5].as_ref().try_into().unwrap());
let mut hasher = crc32fast::Hasher::new();
hasher.update(&heap_buf[0..1]);
hasher.update(&heap_buf[5..]);
let actual_crc = hasher.finalize();
if actual_crc == stored_crc {
let actual_lsn = Lsn::from_le_bytes(
heap_buf[5..13].as_ref().try_into().unwrap(),
);
if actual_lsn != original_lsn {
log::debug!(
"heap slot lsn {} does not match expected original lsn {}",
actual_lsn,
original_lsn
);
return Err(Error::corruption(None));
}
let buf = heap_buf[13..].to_vec();
let buf = if use_compression {
crate::pagecache::decompress(buf)
} else {
buf
};
Ok((MessageKind::from(heap_buf[0]), buf))
} else {
log::debug!(
"heap message CRC does not match contents. stored: {} actual: {}",
stored_crc,
actual_crc
);
Err(Error::corruption(None))
}
}
fn reserve(&self, original_lsn: Lsn) -> Reservation {
let (idx, from_tip) = if let Some(idx) = self.free.pop(&pin()) {
log::trace!(
"reusing heap index {} in slab for sizes of {}",
idx,
slab_id_to_size(self.slab_id),
);
(idx, false)
} else {
log::trace!(
"no free heap slots in slab for sizes of {}",
slab_id_to_size(self.slab_id),
);
(self.tip.fetch_add(1, Acquire), true)
};
log::trace!(
"heap reservation for slot {} in the slab for sizes of {}",
idx,
slab_id_to_size(self.slab_id),
);
let heap_id = HeapId::compose(self.slab_id, idx, original_lsn);
Reservation {
slab_free: self.free.clone(),
completed: false,
file: self.file.try_clone().unwrap(),
from_tip,
heap_id,
}
}
fn free(&self, idx: u32) {
self.punch_hole(idx);
self.free.push(idx, &pin());
}
fn punch_hole(&self, #[allow(unused)] idx: u32) {
#[cfg(all(target_os = "linux", not(miri)))]
{
use std::{
os::unix::io::AsRawFd,
sync::atomic::{AtomicBool, Ordering::Relaxed},
};
use libc::{fallocate, FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE};
static HOLE_PUNCHING_ENABLED: AtomicBool = AtomicBool::new(true);
const MODE: i32 = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
if HOLE_PUNCHING_ENABLED.load(Relaxed) {
let bs = i64::try_from(slab_id_to_size(self.slab_id)).unwrap();
let offset = i64::from(idx) * bs;
let fd = self.file.as_raw_fd();
let ret = unsafe {
fallocate(
fd,
MODE,
#[allow(clippy::useless_conversion)]
offset.try_into().unwrap(),
#[allow(clippy::useless_conversion)]
bs.try_into().unwrap(),
)
};
if ret != 0 {
let err = std::io::Error::last_os_error();
log::error!(
"failed to punch hole in heap file: {:?}. disabling hole punching",
err
);
HOLE_PUNCHING_ENABLED.store(false, Relaxed);
}
}
}
}
}
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::rc::Rc;
use super::constants::*;
use super::nodes::*;
/// Store a command tree while populating it. This is used
/// to construct a [`RootNode`] to be used with the [`Parser`].
///
/// The lifetime parameter `'a` refers to the lifetime
/// of the strings used for [command] and [parameter] names and
/// help text.
///
/// [command]: struct.Command.html
/// [parameter]: struct.Parameter.html
/// [`Parser`]: struct.Parser.html
/// [`RootNode`]: struct.RootNode.html
pub struct CommandTree<'a> {
commands: Vec<Command<'a>>,
}
impl<'a> Default for CommandTree<'a> {
fn default() -> Self {
CommandTree { commands: vec![] }
}
}
impl<'a> CommandTree<'a> {
/// Create a new `CommandTree`.
pub fn new() -> Self {
Default::default()
}
/// Add a `Command` to the `CommandTree`.
pub fn command(&mut self, command: Command<'a>) {
self.commands.push(command);
}
/// Construct the `CommandTree` and produce a `RootNode`.
pub fn finalize(&self) -> Rc<Node> {
let mut successors: Vec<Rc<Node>> = vec![];
for c in &self.commands {
successors.push(Rc::new(Node::Command(self.build_command(c))));
}
Rc::new(Node::Root(RootNode::new(successors)))
}
fn build_command(&self, command: &Command) -> CommandNode {
let mut parameters: Vec<Rc<Node>> = vec![];
let mut successors: Vec<Rc<Node>> = vec![];
for parameter in &command.parameters {
match parameter.kind {
ParameterKind::Flag => {
self.build_flag_parameter(parameter, &mut parameters, &mut successors);
}
ParameterKind::Named => {
self.build_named_parameter(parameter, &mut parameters, &mut successors);
}
ParameterKind::Simple => {
self.build_simple_parameter(parameter, &mut parameters, &mut successors);
}
};
}
// We'll want to find the right node for the wrapped_root
// and pass it along here.
CommandNode::new(command.name,
command.help_text,
command.hidden,
command.priority,
successors,
None,
parameters)
}
fn build_flag_parameter(&self,
parameter: &Parameter,
parameters: &mut Vec<Rc<Node>>,
successors: &mut Vec<Rc<Node>>) {
let p = ParameterNode::new(parameter.name,
parameter.help_text,
parameter.hidden,
parameter.priority.unwrap_or(PRIORITY_DEFAULT),
vec![],
parameter.repeatable,
None,
parameter.kind,
parameter.required);
let p = Rc::new(Node::Parameter(p));
parameters.push(p.clone());
successors.push(p);
}
fn build_named_parameter(&self,
parameter: &Parameter,
parameters: &mut Vec<Rc<Node>>,
successors: &mut Vec<Rc<Node>>) {
let p = ParameterNode::new(parameter.name,
parameter.help_text,
parameter.hidden,
parameter.priority
.unwrap_or(PRIORITY_PARAMETER),
vec![],
parameter.repeatable,
None,
parameter.kind,
parameter.required);
let p = Rc::new(Node::Parameter(p));
parameters.push(p.clone());
let n = ParameterNameNode::new(parameter.name,
parameter.hidden,
PRIORITY_DEFAULT,
vec![p.clone()],
parameter.repeatable,
Some(p.clone()),
p.clone());
successors.push(Rc::new(Node::ParameterName(n)));
for alias in ¶meter.aliases {
let a = ParameterNameNode::new(alias,
parameter.hidden,
PRIORITY_DEFAULT,
vec![p.clone()],
parameter.repeatable,
Some(p.clone()),
p.clone());
successors.push(Rc::new(Node::ParameterName(a)));
}
}
fn build_simple_parameter(&self,
parameter: &Parameter,
parameters: &mut Vec<Rc<Node>>,
successors: &mut Vec<Rc<Node>>) {
let p = ParameterNode::new(parameter.name,
parameter.help_text,
parameter.hidden,
parameter.priority.unwrap_or(PRIORITY_PARAMETER),
vec![],
parameter.repeatable,
None,
parameter.kind,
parameter.required);
let p = Rc::new(Node::Parameter(p));
parameters.push(p.clone());
successors.push(p.clone());
}
}
/// Description of a command to be added to the [`CommandTree`].
///
/// The lifetime parameter `'a` refers to the lifetime
/// of the strings used for command names and help text.
///
/// [`CommandTree`]: struct.CommandTree.html
pub struct Command<'a> {
hidden: bool,
priority: i32,
name: &'a str,
help_text: Option<&'a str>,
parameters: Vec<Parameter<'a>>,
wrapped_root: Option<String>,
}
impl<'a> Command<'a> {
/// Construct a default (blank) command with the given `name`.
pub fn new(name: &'a str) -> Self {
Command {
hidden: false,
priority: PRIORITY_DEFAULT,
name: name,
help_text: None,
parameters: vec![],
wrapped_root: None,
}
}
/// Mark the command as hidden. Hidden commands will match
/// within the parser, but are not listed during completion.
pub fn hidden(mut self, hidden: bool) -> Self {
self.hidden = hidden;
self
}
/// Give the command a priority. This is used when sorting
/// out conflicts during matching and completion.
pub fn priority(mut self, priority: i32) -> Self {
self.priority = priority;
self
}
/// Supply help text for the command.
pub fn help(mut self, help_text: &'a str) -> Self {
self.help_text = Some(help_text);
self
}
/// Add a [`Parameter`] to the command.
///
/// [`Parameter`]: struct.Parameter.html
pub fn parameter(mut self, parameter: Parameter<'a>) -> Self {
self.parameters.push(parameter);
self
}
/// Create a [`WrapperNode`] instead of a [`CommandNode`]. The
/// `wrapped_root` signifies the path to the command that should
/// be wrapped by this command.
///
/// [`CommandNode`]: struct.CommandNode.html
/// [`WrapperNode`]: struct.WrapperNode.html
pub fn wraps(mut self, wrapped_root: String) -> Self {
self.wrapped_root = Some(wrapped_root);
self
}
}
/// Description of a parameter to be added to the [`Command`].
///
/// The lifetime parameter `'a` refers to the lifetime
/// of the strings used for parameter names, aliases and
/// help text.
///
/// [`Command`]: struct.Command.html
pub struct Parameter<'a> {
hidden: bool,
priority: Option<i32>,
name: &'a str,
repeatable: bool,
aliases: Vec<&'a str>,
help_text: Option<&'a str>,
kind: ParameterKind,
required: bool,
}
impl<'a> Parameter<'a> {
/// Construct a default (blank) parameter with the given `name`.
pub fn new(name: &'a str) -> Self {
Parameter {
hidden: false,
priority: None,
name: name,
repeatable: false,
aliases: vec![],
help_text: None,
kind: ParameterKind::Simple,
required: false,
}
}
/// Mark the parameter as hidden. Hidden parameters will match
/// within the parser, but are not listed during completion.
pub fn hidden(mut self, hidden: bool) -> Self {
self.hidden = hidden;
self
}
/// Give the parameter a priority. This is used when sorting
/// out conflicts during matching and completion.
///
/// The `priority` of a `Parameter` defaults to `PRIORITY_PARAMETER`
/// except for when the `kind` is `ParameterKind::Flag` in which
/// case, the default will be `PRIORITY_DEFAULT`.
pub fn priority(mut self, priority: i32) -> Self {
self.priority = Some(priority);
self
}
/// Establish whether or not this parameter is repeatable.
/// Repeated parameters produce a vector of values and can
/// be given multiple times within a single command invocation.
pub fn repeatable(mut self, repeatable: bool) -> Self {
self.repeatable = repeatable;
self
}
/// Add an alias that this parameter can use.
///
/// Aliases are currently only valid for parameters of `kind`
/// `ParameterKind::Named`.
pub fn alias(mut self, alias: &'a str) -> Self {
self.aliases.push(alias);
self
}
/// Supply the help text for the parameter.
pub fn help(mut self, help_text: &'a str) -> Self {
self.help_text = Some(help_text);
self
}
/// Establish whether or not this parameter is required.
pub fn required(mut self, required: bool) -> Self {
self.required = required;
self
}
/// Set which type of [`ParameterNode`] is supposed to be created
/// to represent this parameter.
///
/// [`ParameterNode`]: trait.ParameterNode.html
pub fn kind(mut self, kind: ParameterKind) -> Self {
self.kind = kind;
self
}
}
Remove reference to the defunct WrapperNode.
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::rc::Rc;
use super::constants::*;
use super::nodes::*;
/// Store a command tree while populating it. This is used
/// to construct a [`RootNode`] to be used with the [`Parser`].
///
/// The lifetime parameter `'a` refers to the lifetime
/// of the strings used for [command] and [parameter] names and
/// help text.
///
/// [command]: struct.Command.html
/// [parameter]: struct.Parameter.html
/// [`Parser`]: struct.Parser.html
/// [`RootNode`]: struct.RootNode.html
pub struct CommandTree<'a> {
commands: Vec<Command<'a>>,
}
impl<'a> Default for CommandTree<'a> {
fn default() -> Self {
CommandTree { commands: vec![] }
}
}
impl<'a> CommandTree<'a> {
/// Create a new `CommandTree`.
pub fn new() -> Self {
Default::default()
}
/// Add a `Command` to the `CommandTree`.
pub fn command(&mut self, command: Command<'a>) {
self.commands.push(command);
}
/// Construct the `CommandTree` and produce a `RootNode`.
pub fn finalize(&self) -> Rc<Node> {
let mut successors: Vec<Rc<Node>> = vec![];
for c in &self.commands {
successors.push(Rc::new(Node::Command(self.build_command(c))));
}
Rc::new(Node::Root(RootNode::new(successors)))
}
fn build_command(&self, command: &Command) -> CommandNode {
let mut parameters: Vec<Rc<Node>> = vec![];
let mut successors: Vec<Rc<Node>> = vec![];
for parameter in &command.parameters {
match parameter.kind {
ParameterKind::Flag => {
self.build_flag_parameter(parameter, &mut parameters, &mut successors);
}
ParameterKind::Named => {
self.build_named_parameter(parameter, &mut parameters, &mut successors);
}
ParameterKind::Simple => {
self.build_simple_parameter(parameter, &mut parameters, &mut successors);
}
};
}
// We'll want to find the right node for the wrapped_root
// and pass it along here.
CommandNode::new(command.name,
command.help_text,
command.hidden,
command.priority,
successors,
None,
parameters)
}
fn build_flag_parameter(&self,
parameter: &Parameter,
parameters: &mut Vec<Rc<Node>>,
successors: &mut Vec<Rc<Node>>) {
let p = ParameterNode::new(parameter.name,
parameter.help_text,
parameter.hidden,
parameter.priority.unwrap_or(PRIORITY_DEFAULT),
vec![],
parameter.repeatable,
None,
parameter.kind,
parameter.required);
let p = Rc::new(Node::Parameter(p));
parameters.push(p.clone());
successors.push(p);
}
fn build_named_parameter(&self,
parameter: &Parameter,
parameters: &mut Vec<Rc<Node>>,
successors: &mut Vec<Rc<Node>>) {
let p = ParameterNode::new(parameter.name,
parameter.help_text,
parameter.hidden,
parameter.priority
.unwrap_or(PRIORITY_PARAMETER),
vec![],
parameter.repeatable,
None,
parameter.kind,
parameter.required);
let p = Rc::new(Node::Parameter(p));
parameters.push(p.clone());
let n = ParameterNameNode::new(parameter.name,
parameter.hidden,
PRIORITY_DEFAULT,
vec![p.clone()],
parameter.repeatable,
Some(p.clone()),
p.clone());
successors.push(Rc::new(Node::ParameterName(n)));
for alias in ¶meter.aliases {
let a = ParameterNameNode::new(alias,
parameter.hidden,
PRIORITY_DEFAULT,
vec![p.clone()],
parameter.repeatable,
Some(p.clone()),
p.clone());
successors.push(Rc::new(Node::ParameterName(a)));
}
}
fn build_simple_parameter(&self,
parameter: &Parameter,
parameters: &mut Vec<Rc<Node>>,
successors: &mut Vec<Rc<Node>>) {
let p = ParameterNode::new(parameter.name,
parameter.help_text,
parameter.hidden,
parameter.priority.unwrap_or(PRIORITY_PARAMETER),
vec![],
parameter.repeatable,
None,
parameter.kind,
parameter.required);
let p = Rc::new(Node::Parameter(p));
parameters.push(p.clone());
successors.push(p.clone());
}
}
/// Description of a command to be added to the [`CommandTree`].
///
/// The lifetime parameter `'a` refers to the lifetime
/// of the strings used for command names and help text.
///
/// [`CommandTree`]: struct.CommandTree.html
pub struct Command<'a> {
hidden: bool,
priority: i32,
name: &'a str,
help_text: Option<&'a str>,
parameters: Vec<Parameter<'a>>,
wrapped_root: Option<String>,
}
impl<'a> Command<'a> {
/// Construct a default (blank) command with the given `name`.
pub fn new(name: &'a str) -> Self {
Command {
hidden: false,
priority: PRIORITY_DEFAULT,
name: name,
help_text: None,
parameters: vec![],
wrapped_root: None,
}
}
/// Mark the command as hidden. Hidden commands will match
/// within the parser, but are not listed during completion.
pub fn hidden(mut self, hidden: bool) -> Self {
self.hidden = hidden;
self
}
/// Give the command a priority. This is used when sorting
/// out conflicts during matching and completion.
pub fn priority(mut self, priority: i32) -> Self {
self.priority = priority;
self
}
/// Supply help text for the command.
pub fn help(mut self, help_text: &'a str) -> Self {
self.help_text = Some(help_text);
self
}
/// Add a [`Parameter`] to the command.
///
/// [`Parameter`]: struct.Parameter.html
pub fn parameter(mut self, parameter: Parameter<'a>) -> Self {
self.parameters.push(parameter);
self
}
/// The `wrapped_root` signifies the path to the command that should
/// be wrapped by this command. This is used for the `help` command.
///
/// [`CommandNode`]: struct.CommandNode.html
pub fn wraps(mut self, wrapped_root: String) -> Self {
self.wrapped_root = Some(wrapped_root);
self
}
}
/// Description of a parameter to be added to the [`Command`].
///
/// The lifetime parameter `'a` refers to the lifetime
/// of the strings used for parameter names, aliases and
/// help text.
///
/// [`Command`]: struct.Command.html
pub struct Parameter<'a> {
hidden: bool,
priority: Option<i32>,
name: &'a str,
repeatable: bool,
aliases: Vec<&'a str>,
help_text: Option<&'a str>,
kind: ParameterKind,
required: bool,
}
impl<'a> Parameter<'a> {
/// Construct a default (blank) parameter with the given `name`.
pub fn new(name: &'a str) -> Self {
Parameter {
hidden: false,
priority: None,
name: name,
repeatable: false,
aliases: vec![],
help_text: None,
kind: ParameterKind::Simple,
required: false,
}
}
/// Mark the parameter as hidden. Hidden parameters will match
/// within the parser, but are not listed during completion.
pub fn hidden(mut self, hidden: bool) -> Self {
self.hidden = hidden;
self
}
/// Give the parameter a priority. This is used when sorting
/// out conflicts during matching and completion.
///
/// The `priority` of a `Parameter` defaults to `PRIORITY_PARAMETER`
/// except for when the `kind` is `ParameterKind::Flag` in which
/// case, the default will be `PRIORITY_DEFAULT`.
pub fn priority(mut self, priority: i32) -> Self {
self.priority = Some(priority);
self
}
/// Establish whether or not this parameter is repeatable.
/// Repeated parameters produce a vector of values and can
/// be given multiple times within a single command invocation.
pub fn repeatable(mut self, repeatable: bool) -> Self {
self.repeatable = repeatable;
self
}
/// Add an alias that this parameter can use.
///
/// Aliases are currently only valid for parameters of `kind`
/// `ParameterKind::Named`.
pub fn alias(mut self, alias: &'a str) -> Self {
self.aliases.push(alias);
self
}
/// Supply the help text for the parameter.
pub fn help(mut self, help_text: &'a str) -> Self {
self.help_text = Some(help_text);
self
}
/// Establish whether or not this parameter is required.
pub fn required(mut self, required: bool) -> Self {
self.required = required;
self
}
/// Set which type of [`ParameterNode`] is supposed to be created
/// to represent this parameter.
///
/// [`ParameterNode`]: trait.ParameterNode.html
pub fn kind(mut self, kind: ParameterKind) -> Self {
self.kind = kind;
self
}
}
|
use memory::MemSegment;
use cart::{Cart, ScreenMode};
use std::rc::Rc;
use std::cell::RefCell;
use super::Color;
use super::PaletteIndex;
use super::TilePattern;
/// Represents the PPU's memory map.
pub struct PPUMemory {
cart: Rc<RefCell<Cart>>,
vram: [u8; 0x0F00],
palette: [Color; 0x20],
}
fn get_nametable_addrs(mode: ScreenMode) -> [u16; 4] {
match mode {
ScreenMode::Vertical => [0x2000, 0x2400, 0x2000, 0x2400],
ScreenMode::Horizontal => [0x2000, 0x2000, 0x2400, 0x2400],
ScreenMode::OneScreenLow => [0x2000, 0x2000, 0x2000, 0x2000],
ScreenMode::OneScreenHigh => [0x2400, 0x2400, 0x2400, 0x2400],
ScreenMode::FourScreen => [0x2000, 0x2400, 0x2800, 0x2C00],
}
}
impl PPUMemory {
pub fn new(cart: Rc<RefCell<Cart>>) -> PPUMemory {
PPUMemory {
cart: cart,
vram: [0u8; 0x0F00],
palette: [Color::from_bits_truncate(0); 0x20],
}
}
}
fn get_tile_addr(tile_id: u8, plane: u8, fine_y_scroll: u16, tile_table: u16) -> u16 {
let mut tile_addr = 0u16;
tile_addr |= fine_y_scroll;
tile_addr |= plane as u16; //Plane must be 0 for low or 8 for high
tile_addr |= (tile_id as u16) << 4;
tile_addr |= tile_table; //Table must be 0x0000 or 0x1000
tile_addr
}
impl PPUMemory {
pub fn read_bypass_palette(&mut self, idx: u16) -> u8 {
let idx = self.translate_vram_address(idx);
self.vram[idx]
}
fn translate_vram_address(&self, idx: u16) -> usize {
let idx = idx & 0x0FFF;
let nametable_num = (idx / 0x0400) as usize;
let idx_in_nametable = idx % 0x400;
let mode = self.cart.borrow().get_mirroring_mode();
let translated = get_nametable_addrs(mode)[nametable_num] + idx_in_nametable;
translated as usize % self.vram.len()
}
pub fn read_palette(&mut self, idx: PaletteIndex) -> Color {
let bits = self.read(idx.to_addr());
Color::from_bits_truncate(bits)
}
pub fn read_tile_pattern(&mut self,
tile_id: u8,
fine_y_scroll: u16,
tile_table: u16)
-> TilePattern {
let lo_addr = get_tile_addr(tile_id, 0, fine_y_scroll, tile_table);
let hi_addr = get_tile_addr(tile_id, 8, fine_y_scroll, tile_table);
TilePattern {
lo: self.read(lo_addr),
hi: self.read(hi_addr),
}
}
#[allow(dead_code)]
pub fn dump_nametable(&mut self, idx: u16) {
let start_idx = 0x2000 + (idx * 0x400);
println!("Nametable {}:", idx);
self.print_columns(start_idx..(start_idx + 0x3C0), 32)
}
#[allow(dead_code)]
pub fn dump_attribute_table(&mut self, idx: u16) {
let start_idx = 0x2000 + (idx * 0x400);
println!("Attribute table {}:", idx);
self.print_columns((start_idx + 0x3C0)..(start_idx + 0x400), 32);
}
}
impl MemSegment for PPUMemory {
fn read(&mut self, idx: u16) -> u8 {
match idx {
0x0000...0x1FFF => {
let mut cart = self.cart.borrow_mut();
cart.chr_read(idx)
}
0x2000...0x3EFF => self.read_bypass_palette(idx),
0x3F00...0x3FFF => {
match (idx & 0x001F) as usize {
0x10 => self.palette[0x00],
0x14 => self.palette[0x04],
0x18 => self.palette[0x08],
0x1C => self.palette[0x0C],
x => self.palette[x],
}
.bits()
}
x => invalid_address!(x),
}
}
fn write(&mut self, idx: u16, val: u8) {
match idx {
0x0000...0x1FFF => {
let mut cart = self.cart.borrow_mut();
cart.chr_write(idx, val)
}
0x2000...0x3EFF => {
let idx = self.translate_vram_address(idx);
self.vram[idx] = val;
}
0x3F00...0x3FFF => {
let val = Color::from_bits_truncate(val);
match (idx & 0x001F) as usize {
0x10 => self.palette[0x00] = val,
0x14 => self.palette[0x04] = val,
0x18 => self.palette[0x08] = val,
0x1C => self.palette[0x0C] = val,
x => self.palette[x] = val,
}
}
x => invalid_address!(x),
}
}
}
#[cfg(test)]
mod tests {
use memory::MemSegment;
use ppu::tests::*;
use ppu::{Color, PPU};
use cart::ScreenMode;
#[test]
fn ppu_can_read_write_palette() {
let mut ppu = create_test_ppu();
ppu.reg.v = 0x3F00;
ppu.write(0x2007, 12);
ppu.reg.v = 0x3F00;
assert_eq!(ppu.ppu_mem.palette[0], Color::from_bits_truncate(12));
ppu.reg.v = 0x3F01;
ppu.write(0x2007, 212);
ppu.reg.v = 0x3F01;
assert_eq!(ppu.read(0x2007), 212 & 0x3F);
}
#[test]
fn test_palette_mirroring() {
let mut ppu = create_test_ppu();
let mirrors = [0x3F10, 0x3F14, 0x3F18, 0x3F1C];
let targets = [0x3F00, 0x3F04, 0x3F08, 0x3F0C];
for x in 0..4 {
ppu.reg.v = targets[x];
ppu.write(0x2007, 12);
ppu.reg.v = mirrors[x];
assert_eq!(ppu.read(0x2007), 12);
ppu.reg.v = mirrors[x];
ppu.write(0x2007, 12);
ppu.reg.v = targets[x];
assert_eq!(ppu.read(0x2007), 12);
}
}
fn to_nametable_idx(idx: u16, tbl: u16) -> u16 {
0x2000 + (0x0400 * tbl) + idx
}
fn assert_mirrored(ppu: &mut PPU, tbl1: u16, tbl2: u16) {
for idx in 0x0000..0x0400 {
let tbl1_idx = to_nametable_idx(idx, tbl1);
let tbl2_idx = to_nametable_idx(idx, tbl2);
println!("Translated: tbl1: {:04X}, tbl2: {:04X}",
ppu.ppu_mem.translate_vram_address(tbl1_idx),
ppu.ppu_mem.translate_vram_address(tbl2_idx),
);
ppu.ppu_mem.write(tbl1_idx, 0xFF);
assert_eq!(0xFF, ppu.ppu_mem.read(tbl2_idx));
ppu.ppu_mem.write(tbl2_idx, 0x61);
assert_eq!(0x61, ppu.ppu_mem.read(tbl1_idx));
}
}
fn assert_not_mirrored(ppu: &mut PPU, tbl1: u16, tbl2: u16) {
for idx in 0x0000..0x0400 {
let tbl1_idx = to_nametable_idx(idx, tbl1);
let tbl2_idx = to_nametable_idx(idx, tbl2);
println!("Translated: tbl1: {:04X}, tbl2: {:04X}",
ppu.ppu_mem.translate_vram_address(tbl1_idx),
ppu.ppu_mem.translate_vram_address(tbl2_idx),
);
ppu.ppu_mem.write(tbl1_idx, 0x00);
ppu.ppu_mem.write(tbl2_idx, 0x00);
ppu.ppu_mem.write(tbl1_idx, 0xFF);
assert_eq!(0x00, ppu.ppu_mem.read(tbl2_idx));
ppu.ppu_mem.write(tbl2_idx, 0x61);
assert_eq!(0xFF, ppu.ppu_mem.read(tbl1_idx));
}
}
#[test]
fn single_screen_mirroring_mirrors_both_ways() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::OneScreenLow);
assert_mirrored(&mut ppu, 0, 1);
assert_mirrored(&mut ppu, 1, 2);
assert_mirrored(&mut ppu, 2, 3);
}
#[test]
fn four_screen_mirroring_mirrors_both_ways() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::FourScreen);
assert_not_mirrored(&mut ppu, 0, 1);
assert_not_mirrored(&mut ppu, 1, 2);
assert_not_mirrored(&mut ppu, 2, 3);
}
#[test]
fn horizontal_mirroring_mirrors_horizontally() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::Horizontal);
assert_mirrored(&mut ppu, 0, 1);
assert_mirrored(&mut ppu, 2, 3);
assert_not_mirrored(&mut ppu, 0, 2);
assert_not_mirrored(&mut ppu, 1, 3);
}
#[test]
fn vertical_mirroring_mirrors_vertically() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::Vertical);
assert_not_mirrored(&mut ppu, 0, 1);
assert_not_mirrored(&mut ppu, 2, 3);
assert_mirrored(&mut ppu, 0, 2);
assert_mirrored(&mut ppu, 1, 3);
}
}
Optimized the read_palette function.
It can only ever read from the palette memory, so there's no need to call the general-purpose PPUMemory::read method
use memory::MemSegment;
use cart::{Cart, ScreenMode};
use std::rc::Rc;
use std::cell::RefCell;
use super::Color;
use super::PaletteIndex;
use super::TilePattern;
/// Represents the PPU's memory map.
pub struct PPUMemory {
cart: Rc<RefCell<Cart>>,
vram: [u8; 0x0F00],
palette: [Color; 0x20],
}
fn get_nametable_addrs(mode: ScreenMode) -> [u16; 4] {
match mode {
ScreenMode::Vertical => [0x2000, 0x2400, 0x2000, 0x2400],
ScreenMode::Horizontal => [0x2000, 0x2000, 0x2400, 0x2400],
ScreenMode::OneScreenLow => [0x2000, 0x2000, 0x2000, 0x2000],
ScreenMode::OneScreenHigh => [0x2400, 0x2400, 0x2400, 0x2400],
ScreenMode::FourScreen => [0x2000, 0x2400, 0x2800, 0x2C00],
}
}
impl PPUMemory {
pub fn new(cart: Rc<RefCell<Cart>>) -> PPUMemory {
PPUMemory {
cart: cart,
vram: [0u8; 0x0F00],
palette: [Color::from_bits_truncate(0); 0x20],
}
}
}
fn get_tile_addr(tile_id: u8, plane: u8, fine_y_scroll: u16, tile_table: u16) -> u16 {
let mut tile_addr = 0u16;
tile_addr |= fine_y_scroll;
tile_addr |= plane as u16; //Plane must be 0 for low or 8 for high
tile_addr |= (tile_id as u16) << 4;
tile_addr |= tile_table; //Table must be 0x0000 or 0x1000
tile_addr
}
impl PPUMemory {
pub fn read_bypass_palette(&mut self, idx: u16) -> u8 {
let idx = self.translate_vram_address(idx);
self.vram[idx]
}
fn translate_vram_address(&self, idx: u16) -> usize {
let idx = idx & 0x0FFF;
let nametable_num = (idx / 0x0400) as usize;
let idx_in_nametable = idx % 0x400;
let mode = self.cart.borrow().get_mirroring_mode();
let translated = get_nametable_addrs(mode)[nametable_num] + idx_in_nametable;
translated as usize % self.vram.len()
}
pub fn read_palette(&mut self, idx: PaletteIndex) -> Color {
self.read_palette_mem(idx.to_addr() as usize)
}
fn read_palette_mem(&self, idx: usize) -> Color {
match (idx % 0x1F) as usize {
0x10 => self.palette[0x00],
0x14 => self.palette[0x04],
0x18 => self.palette[0x08],
0x1C => self.palette[0x0C],
x => self.palette[x],
}
}
pub fn read_tile_pattern(&mut self,
tile_id: u8,
fine_y_scroll: u16,
tile_table: u16)
-> TilePattern {
let lo_addr = get_tile_addr(tile_id, 0, fine_y_scroll, tile_table);
let hi_addr = get_tile_addr(tile_id, 8, fine_y_scroll, tile_table);
TilePattern {
lo: self.read(lo_addr),
hi: self.read(hi_addr),
}
}
#[allow(dead_code)]
pub fn dump_nametable(&mut self, idx: u16) {
let start_idx = 0x2000 + (idx * 0x400);
println!("Nametable {}:", idx);
self.print_columns(start_idx..(start_idx + 0x3C0), 32)
}
#[allow(dead_code)]
pub fn dump_attribute_table(&mut self, idx: u16) {
let start_idx = 0x2000 + (idx * 0x400);
println!("Attribute table {}:", idx);
self.print_columns((start_idx + 0x3C0)..(start_idx + 0x400), 32);
}
}
impl MemSegment for PPUMemory {
fn read(&mut self, idx: u16) -> u8 {
match idx {
0x0000...0x1FFF => {
let mut cart = self.cart.borrow_mut();
cart.chr_read(idx)
}
0x2000...0x3EFF => self.read_bypass_palette(idx),
0x3F00...0x3FFF => self.read_palette_mem(idx as usize).bits(),
x => invalid_address!(x),
}
}
fn write(&mut self, idx: u16, val: u8) {
match idx {
0x0000...0x1FFF => {
let mut cart = self.cart.borrow_mut();
cart.chr_write(idx, val)
}
0x2000...0x3EFF => {
let idx = self.translate_vram_address(idx);
self.vram[idx] = val;
}
0x3F00...0x3FFF => {
let val = Color::from_bits_truncate(val);
match (idx & 0x001F) as usize {
0x10 => self.palette[0x00] = val,
0x14 => self.palette[0x04] = val,
0x18 => self.palette[0x08] = val,
0x1C => self.palette[0x0C] = val,
x => self.palette[x] = val,
}
}
x => invalid_address!(x),
}
}
}
#[cfg(test)]
mod tests {
use memory::MemSegment;
use ppu::tests::*;
use ppu::{Color, PPU};
use cart::ScreenMode;
#[test]
fn ppu_can_read_write_palette() {
let mut ppu = create_test_ppu();
ppu.reg.v = 0x3F00;
ppu.write(0x2007, 12);
ppu.reg.v = 0x3F00;
assert_eq!(ppu.ppu_mem.palette[0], Color::from_bits_truncate(12));
ppu.reg.v = 0x3F01;
ppu.write(0x2007, 212);
ppu.reg.v = 0x3F01;
assert_eq!(ppu.read(0x2007), 212 & 0x3F);
}
#[test]
fn test_palette_mirroring() {
let mut ppu = create_test_ppu();
let mirrors = [0x3F10, 0x3F14, 0x3F18, 0x3F1C];
let targets = [0x3F00, 0x3F04, 0x3F08, 0x3F0C];
for x in 0..4 {
ppu.reg.v = targets[x];
ppu.write(0x2007, 12);
ppu.reg.v = mirrors[x];
assert_eq!(ppu.read(0x2007), 12);
ppu.reg.v = mirrors[x];
ppu.write(0x2007, 12);
ppu.reg.v = targets[x];
assert_eq!(ppu.read(0x2007), 12);
}
}
fn to_nametable_idx(idx: u16, tbl: u16) -> u16 {
0x2000 + (0x0400 * tbl) + idx
}
fn assert_mirrored(ppu: &mut PPU, tbl1: u16, tbl2: u16) {
for idx in 0x0000..0x0400 {
let tbl1_idx = to_nametable_idx(idx, tbl1);
let tbl2_idx = to_nametable_idx(idx, tbl2);
println!("Translated: tbl1: {:04X}, tbl2: {:04X}",
ppu.ppu_mem.translate_vram_address(tbl1_idx),
ppu.ppu_mem.translate_vram_address(tbl2_idx),
);
ppu.ppu_mem.write(tbl1_idx, 0xFF);
assert_eq!(0xFF, ppu.ppu_mem.read(tbl2_idx));
ppu.ppu_mem.write(tbl2_idx, 0x61);
assert_eq!(0x61, ppu.ppu_mem.read(tbl1_idx));
}
}
fn assert_not_mirrored(ppu: &mut PPU, tbl1: u16, tbl2: u16) {
for idx in 0x0000..0x0400 {
let tbl1_idx = to_nametable_idx(idx, tbl1);
let tbl2_idx = to_nametable_idx(idx, tbl2);
println!("Translated: tbl1: {:04X}, tbl2: {:04X}",
ppu.ppu_mem.translate_vram_address(tbl1_idx),
ppu.ppu_mem.translate_vram_address(tbl2_idx),
);
ppu.ppu_mem.write(tbl1_idx, 0x00);
ppu.ppu_mem.write(tbl2_idx, 0x00);
ppu.ppu_mem.write(tbl1_idx, 0xFF);
assert_eq!(0x00, ppu.ppu_mem.read(tbl2_idx));
ppu.ppu_mem.write(tbl2_idx, 0x61);
assert_eq!(0xFF, ppu.ppu_mem.read(tbl1_idx));
}
}
#[test]
fn single_screen_mirroring_mirrors_both_ways() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::OneScreenLow);
assert_mirrored(&mut ppu, 0, 1);
assert_mirrored(&mut ppu, 1, 2);
assert_mirrored(&mut ppu, 2, 3);
}
#[test]
fn four_screen_mirroring_mirrors_both_ways() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::FourScreen);
assert_not_mirrored(&mut ppu, 0, 1);
assert_not_mirrored(&mut ppu, 1, 2);
assert_not_mirrored(&mut ppu, 2, 3);
}
#[test]
fn horizontal_mirroring_mirrors_horizontally() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::Horizontal);
assert_mirrored(&mut ppu, 0, 1);
assert_mirrored(&mut ppu, 2, 3);
assert_not_mirrored(&mut ppu, 0, 2);
assert_not_mirrored(&mut ppu, 1, 3);
}
#[test]
fn vertical_mirroring_mirrors_vertically() {
let mut ppu = create_test_ppu_with_mirroring(ScreenMode::Vertical);
assert_not_mirrored(&mut ppu, 0, 1);
assert_not_mirrored(&mut ppu, 2, 3);
assert_mirrored(&mut ppu, 0, 2);
assert_mirrored(&mut ppu, 1, 3);
}
}
|
use yaml_rust::Yaml;
use command::{Command, Session, SendKeys, Split, Layout, Window, Window2, Attach};
#[cfg(test)] use yaml_rust::{YamlLoader};
pub fn main(yaml_string: &Vec<Yaml>, project_name: String) -> Vec<Command> {
let mut commands: Vec<Command> = vec!();
for doc in yaml_string {
let root = match doc["root"].as_str() {
Some(x) => Some(x.to_string()),
None => None
};
let (first_window, windows) = doc["windows"].as_vec().expect("No Windows have been defined.").split_at(1);
match &first_window[0] {
&Yaml::Hash(ref h) => {
for (k, v) in h {
if v.as_hash().is_some() {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: k.as_str().unwrap().to_string(), root: root.clone()}));
commands.append(&mut pane_matcher(project_name.clone(), v, root.clone(), k.as_str().unwrap().to_string()));
} else {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: k.as_str().unwrap().to_string(), root: root.clone()}));
commands.push(Command::SendKeys(SendKeys{target: format!("{}:{}", project_name, k.as_str().unwrap().to_string()).to_string(), exec: v.as_str().expect("Bad exec command").to_string()}));
}
}
},
&Yaml::String(ref s) => {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: s.clone(), root: root.clone()}))
},
&Yaml::Integer(ref s) => {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: s.to_string(), root: root.clone()}))
},
_ => panic!("Muxed config file formatting isn't recognized.")
};
for (i,window) in windows.iter().enumerate() {
match window {
&Yaml::Hash(ref h) => {
for (k, v) in h {
if v.as_hash().is_some() {
commands.push(Command::Window(Window{value: k.as_str().unwrap().to_string(), root: root.clone(), exec: "".to_string()}));
commands.append(&mut pane_matcher(project_name.clone(), v, root.clone(), k.as_str().unwrap().to_string()));
} else {
commands.push(Command::Window2(Window2{session_name: project_name.clone(), name: k.as_str().unwrap().to_string(), root: root.clone()}));
commands.push(Command::SendKeys(SendKeys{target: format!("{}:{}", project_name, k.as_str().unwrap().to_string()).to_string(), exec: v.as_str().expect("Bad exec command").to_string()}));
}
}
},
&Yaml::String(ref s) => {
commands.push(Command::Window2(Window2{session_name: project_name.clone(), name: s.clone(), root: root.clone()}))
},
&Yaml::Integer(ref s) => {
commands.push(Command::Window2(Window2{session_name: project_name.clone(), name: s.to_string(), root: root.clone()}))
},
_ => panic!("Muxed config file formatting isn't recognized.")
};
};
};
commands.push(Command::Attach(Attach{name: project_name.clone()}));
commands
}
fn pane_matcher(session: String, panes: &Yaml, root: Option<String>, window: String) -> Vec<Command> {
let mut commands = vec!();
let panes2 = panes["panes"].as_vec().expect("Something is wrong with panes.");
for (i, pane) in panes2.iter().enumerate() {
if i < (panes2.len()-1) {
commands.push(Command::Split(Split{target: format!("{}:{}.{}", session, window, i.to_string()).to_string(), root: root.clone()}));
};
commands.push(Command::SendKeys(SendKeys{target: format!("{}:{}.{}", session, window, i).to_string(), exec: pane.as_str().expect("Bad exec command").to_string()}));
};
if panes["layout"].as_str().is_some() {
let layout = panes["layout"].as_str().unwrap().to_string();
commands.push(Command::Layout(Layout{target: format!("{}:{}", session, window).to_string(), layout: panes["layout"].as_str().expect("Bad layout").to_string()}));
};
commands
}
#[test]
pub fn windows_defined_as_array_has_4_commands() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
assert_eq!(main(&yaml, "muxed".to_string()).len(), 4)
}
#[test]
pub fn windows_defined_as_array_has_1_session() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Session(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn windows_defined_as_array_has_2_windows() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Window2(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 2)
}
#[test]
pub fn windows_defined_as_array_has_1_attach() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Attach(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn windows_with_integer_names() {
let s = "---
windows: [1, 'vim', 3]
";
let yaml = YamlLoader::load_from_str(s).unwrap();
assert_eq!(main(&yaml, "muxed".to_string()).len(), 4)
}
#[test]
pub fn windows_as_list() {
let s = "---
windows:
- cargo: ''
- vim: ''
- git: ''
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let commands = main(&yaml, "muxed".to_string());
assert_eq!(commands.len(), 7)
}
#[test]
pub fn root_command() {
let s = "---
root: '~/.muxed'
windows:
- cargo: ''
- vim: ''
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let commands = main(&yaml, "muxed".to_string());
let first_window: Option<Session> = match commands[0].clone() {
Command::Session(w) => Some(w),
_ => None
};
assert_eq!(first_window.unwrap().root.unwrap(), "~/.muxed".to_string())
}
#[test]
pub fn panes_array_has_7_commands() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let commands = main(&yaml, "muxed".to_string());
println!("{:?}", commands);
assert_eq!(commands.len(), 6)
}
#[test]
pub fn panes_array_has_1_split() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Split(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn panes_array_has_1_layout() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Layout(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn panes_array_has_no_window() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Window2(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 0)
}
#[test]
pub fn panes_array_has_1_session() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Session(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
//#[test]
//pub fn panes_command_execs_array() {
// let s = "---
//windows:
// - editor:
// layout: 'main-vertical'
// panes: ['vim', 'guard']
//";
// let yaml = YamlLoader::load_from_str(s).unwrap();
// let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
// &Command::SendKeys(_) => true,
// _ => false
// }).collect();
//
// assert_eq!(remains.len(), 2);
// assert_eq!(remains[0].exec, "vim");
// assert_eq!(remains[1].exec, "guard")
//}
Minor refactoring
use yaml_rust::Yaml;
use command::{Command, Session, SendKeys, Split, Layout, Window, Window2, Attach};
#[cfg(test)] use yaml_rust::{YamlLoader};
pub fn main(yaml_string: &Vec<Yaml>, project_name: String) -> Vec<Command> {
let mut commands: Vec<Command> = vec!();
for doc in yaml_string {
let root = match doc["root"].as_str() {
Some(x) => Some(x.to_string()),
None => None
};
let (first_window, windows) = doc["windows"].as_vec().expect("No Windows have been defined.").split_at(1);
match &first_window[0] {
&Yaml::Hash(ref h) => {
for (k, v) in h {
if v.as_hash().is_some() {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: k.as_str().unwrap().to_string(), root: root.clone()}));
commands.append(&mut pane_matcher(project_name.clone(), v, root.clone(), k.as_str().unwrap().to_string()));
} else {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: k.as_str().unwrap().to_string(), root: root.clone()}));
commands.push(Command::SendKeys(SendKeys{target: format!("{}:{}", project_name, k.as_str().unwrap().to_string()).to_string(), exec: v.as_str().expect("Bad exec command").to_string()}));
}
}
},
&Yaml::String(ref s) => {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: s.clone(), root: root.clone()}))
},
&Yaml::Integer(ref s) => {
commands.push(Command::Session(Session{name: project_name.clone(), window_name: s.to_string(), root: root.clone()}))
},
_ => panic!("Muxed config file formatting isn't recognized.")
};
for window in windows.iter() {
match window {
&Yaml::Hash(ref h) => {
for (k, v) in h {
if v.as_hash().is_some() {
commands.push(Command::Window(Window{value: k.as_str().unwrap().to_string(), root: root.clone(), exec: "".to_string()}));
commands.append(&mut pane_matcher(project_name.clone(), v, root.clone(), k.as_str().unwrap().to_string()));
} else {
commands.push(Command::Window2(Window2{session_name: project_name.clone(), name: k.as_str().unwrap().to_string(), root: root.clone()}));
commands.push(Command::SendKeys(SendKeys{target: format!("{}:{}", project_name, k.as_str().unwrap().to_string()).to_string(), exec: v.as_str().expect("Bad exec command").to_string()}));
}
}
},
&Yaml::String(ref s) => {
commands.push(Command::Window2(Window2{session_name: project_name.clone(), name: s.clone(), root: root.clone()}))
},
&Yaml::Integer(ref s) => {
commands.push(Command::Window2(Window2{session_name: project_name.clone(), name: s.to_string(), root: root.clone()}))
},
_ => panic!("Muxed config file formatting isn't recognized.")
};
};
};
commands.push(Command::Attach(Attach{name: project_name.clone()}));
commands
}
fn pane_matcher(session: String, panes: &Yaml, root: Option<String>, window: String) -> Vec<Command> {
let mut commands = vec!();
let panes2 = panes["panes"].as_vec().expect("Something is wrong with panes.");
for (i, pane) in panes2.iter().enumerate() {
if i < (panes2.len()-1) {
commands.push(Command::Split(Split{target: format!("{}:{}.{}", session, window, i.to_string()).to_string(), root: root.clone()}));
};
commands.push(Command::SendKeys(SendKeys{target: format!("{}:{}.{}", session, window, i).to_string(), exec: pane.as_str().expect("Bad exec command").to_string()}));
};
if panes["layout"].as_str().is_some() {
let layout = panes["layout"].as_str().expect("Bad layout").to_string();
commands.push(Command::Layout(Layout{target: format!("{}:{}", session, window).to_string(), layout: layout}));
};
commands
}
#[test]
pub fn windows_defined_as_array_has_4_commands() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
assert_eq!(main(&yaml, "muxed".to_string()).len(), 4)
}
#[test]
pub fn windows_defined_as_array_has_1_session() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Session(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn windows_defined_as_array_has_2_windows() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Window2(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 2)
}
#[test]
pub fn windows_defined_as_array_has_1_attach() {
let s = "---
windows: ['cargo', 'vim', 'git']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Attach(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn windows_with_integer_names() {
let s = "---
windows: [1, 'vim', 3]
";
let yaml = YamlLoader::load_from_str(s).unwrap();
assert_eq!(main(&yaml, "muxed".to_string()).len(), 4)
}
#[test]
pub fn windows_as_list() {
let s = "---
windows:
- cargo: ''
- vim: ''
- git: ''
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let commands = main(&yaml, "muxed".to_string());
assert_eq!(commands.len(), 7)
}
#[test]
pub fn root_command() {
let s = "---
root: '~/.muxed'
windows:
- cargo: ''
- vim: ''
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let commands = main(&yaml, "muxed".to_string());
let first_window: Option<Session> = match commands[0].clone() {
Command::Session(w) => Some(w),
_ => None
};
assert_eq!(first_window.unwrap().root.unwrap(), "~/.muxed".to_string())
}
#[test]
pub fn panes_array_has_7_commands() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let commands = main(&yaml, "muxed".to_string());
println!("{:?}", commands);
assert_eq!(commands.len(), 6)
}
#[test]
pub fn panes_array_has_1_split() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Split(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn panes_array_has_1_layout() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Layout(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
#[test]
pub fn panes_array_has_no_window() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Window2(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 0)
}
#[test]
pub fn panes_array_has_1_session() {
let s = "---
windows:
- editor:
layout: 'main-vertical'
panes: ['vim', 'guard']
";
let yaml = YamlLoader::load_from_str(s).unwrap();
let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
&Command::Session(_) => true,
_ => false
}).collect();
assert_eq!(remains.len(), 1)
}
//#[test]
//pub fn panes_command_execs_array() {
// let s = "---
//windows:
// - editor:
// layout: 'main-vertical'
// panes: ['vim', 'guard']
//";
// let yaml = YamlLoader::load_from_str(s).unwrap();
// let remains: Vec<Command> = main(&yaml, "muxed".to_string()).into_iter().filter(|x| match x {
// &Command::SendKeys(_) => true,
// _ => false
// }).collect();
//
// assert_eq!(remains.len(), 2);
// assert_eq!(remains[0].exec, "vim");
// assert_eq!(remains[1].exec, "guard")
//}
|
use calx::color::*;
use calx::{Rgba, Kernel, KernelTerrain};
use calx::backend::{Image};
use content::{TerrainType, Brush};
/// Surface angle for a visible sprite, used for dynamic lighting.
///
/// ```notrust
///
/// # north #
/// n n
/// w e
///
/// # x_ _y #
/// s -x_ _y- s
/// w -*- e
/// y- -x
/// # south #
/// ```
#[derive(Copy, Eq, PartialEq, Clone, Debug)]
pub enum Angle {
Up,
North,
Northeast,
Southeast,
YWall,
South,
XWall,
Southwest,
Northwest,
}
#[derive(Copy, Eq, PartialEq, Clone, Debug)]
pub enum Purpose {
Element,
Filler,
}
impl Angle {
/// Return the angle of the vertical surface, if any, in degrees.
pub fn degree(&self) -> Option<f32> {
match *self {
Angle::Up => None,
Angle::North => Some(0.0),
Angle::Northeast => Some(60.0),
Angle::Southeast => Some(120.0),
Angle::YWall => Some(150.0),
Angle::South => Some(180.0),
Angle::XWall => Some(210.0),
Angle::Southwest => Some(240.0),
Angle::Northwest => Some(300.0),
}
}
}
/// Generate draw instructions for a terrain cell.
///
/// Params to the draw function: Draw layer, brush, brush frame, main
/// color, border color.
pub fn render<F>(k: &Kernel<TerrainType>, mut draw: F)
where F: FnMut(Image, Angle, Purpose, Rgba, Rgba)
{
use content::Brush::*;
use self::Angle::*;
use self::Purpose::*;
enum T {
Floor(Brush, Rgba),
Floor2(Brush, Rgba, Rgba),
Prop(Brush, Rgba),
Prop2(Brush, Rgba, Rgba),
Wall(Brush, Rgba),
Wall2(Brush, Rgba, Rgba),
Block(Brush, Rgba),
Block2(Brush, Rgba, Rgba),
Filler(Brush),
}
fn process<C: KernelTerrain, F>(
k: &Kernel<C>, draw: &mut F, kind: T)
where F: FnMut(Image, Angle, Purpose, Rgba, Rgba)
{
match kind {
T::Floor(brush, color) => process(k, draw, T::Floor2(brush, color, BLACK)),
T::Prop(brush, color) => process(k, draw, T::Prop2(brush, color, BLACK)),
T::Wall(brush, color) => process(k, draw, T::Wall2(brush, color, BLACK)),
T::Block(brush, color) => process(k, draw, T::Block2(brush, color, BLACK)),
T::Floor2(brush, color, back) => {
draw(brush.get(0), Up, Element, color, back);
}
T::Prop2(brush, color, back) => {
draw(brush.get(0), South, Element, color, back);
}
T::Wall2(brush, color, back) => {
let extends = k.wall_extends();
if extends[0] {
draw(brush.get(2), XWall, Element, color, back);
} else {
draw(brush.get(0), XWall, Element, color, back);
}
if extends[1] {
draw(brush.get(3), YWall, Element, color, back);
} else {
draw(brush.get(1), YWall, Element, color, back);
}
}
T::Block2(brush, color, back) => {
let faces = k.block_faces();
if faces[5] { draw(BlockRear.get(0), Northwest, Element, color, BLACK); }
if faces[0] {
draw(BlockRear.get(1), North, Element, color, BLACK);
draw(BlockRear.get(2), North, Element, color, BLACK);
}
if faces[1] { draw(BlockRear.get(3), Northeast, Element, color, BLACK); }
if faces[4] { draw(brush.get(0), Southwest, Element, color, back); }
if faces[3] {
draw(brush.get(1), South, Element, color, back);
draw(brush.get(2), South, Element, color, back);
}
if faces[2] { draw(brush.get(3), Southeast, Element, color, back); }
}
T::Filler(brush) => {
draw(brush.get(0), Up, Filler, BLACK, BLACK);
}
}
}
for i in match k.center {
TerrainType::Void => vec![T::Floor(BlankFloor, MAGENTA)],
TerrainType::Floor => vec![T::Floor(Floor, SLATEGRAY)],
TerrainType::Water => vec![T::Floor(Water, ROYALBLUE)],
TerrainType::Shallows => vec![T::Floor(Shallows, CORNFLOWERBLUE)],
TerrainType::Magma => vec![T::Floor2(Water, YELLOW, DARKRED)],
TerrainType::Downstairs => vec![T::Floor(StairsDown, SLATEGRAY)],
TerrainType::Wall => vec![
T::Filler(BlankFloor),
T::Wall(BrickWall, LIGHTSLATEGRAY),
],
TerrainType::RockWall => vec![
T::Filler(BlankFloor),
T::Wall(RockWall, LIGHTSLATEGRAY),
],
TerrainType::Rock => vec![
T::Filler(BlankFloor),
T::Block(BlockRock, DARKGOLDENROD),
],
TerrainType::Tree => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(TreeTrunk, SADDLEBROWN),
T::Prop(TreeFoliage, GREEN),
],
TerrainType::Grass => vec![T::Floor(Floor, DARKGREEN)],
TerrainType::Grass2 => vec![ T::Floor(Grass, DARKGREEN)],
TerrainType::Stalagmite => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Stalagmite, DARKGOLDENROD),
],
TerrainType::Door => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BrickOpenWall, LIGHTSLATEGRAY),
T::Wall(DoorWall, SADDLEBROWN),
],
TerrainType::OpenDoor => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BrickOpenWall, LIGHTSLATEGRAY),
],
TerrainType::Window => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BrickWindowWall, LIGHTSLATEGRAY),
],
TerrainType::Table => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Table, DARKGOLDENROD),
],
TerrainType::Fence => vec![
// The floor type beneath the fence tile is visible, make it grass
// if there's grass behind the fence. Otherwise make it regular
// floor.
if k.n == TerrainType::Grass || k.ne == TerrainType::Grass || k.nw == TerrainType::Grass {
T::Floor(Grass, GREEN)
} else {
T::Floor(Floor, SLATEGRAY)
},
T::Wall(FenceWall, DARKGOLDENROD),
],
TerrainType::Bars => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BarsWall, GAINSBORO),
],
TerrainType::Fountain => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Table, DARKGOLDENROD),
],
TerrainType::Altar => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Fountain, GAINSBORO),
],
TerrainType::Barrel => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Barrell, DARKGOLDENROD),
],
TerrainType::Grave => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Grave, SLATEGRAY),
],
TerrainType::Stone => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Stone, SLATEGRAY),
],
TerrainType::Menhir => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Menhir, SLATEGRAY),
],
TerrainType::DeadTree => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(TreeTrunk, SADDLEBROWN),
],
}.into_iter() {
process(k, &mut draw, i);
}
}
New block formatter
use calx::color::*;
use calx::{Rgba, Kernel, KernelTerrain};
use calx::backend::{Image};
use content::{TerrainType, Brush};
/// Surface angle for a visible sprite, used for dynamic lighting.
///
/// ```notrust
///
/// # north #
/// n n
/// w e
///
/// # x_ _y #
/// s -x_ _y- s
/// w -*- e
/// y- -x
/// # south #
/// ```
#[derive(Copy, Eq, PartialEq, Clone, Debug)]
pub enum Angle {
Up,
North,
XWallBack,
Northeast,
East,
Southeast,
YWall,
South,
XWall,
Southwest,
West,
Northwest,
YWallBack,
}
#[derive(Copy, Eq, PartialEq, Clone, Debug)]
pub enum Purpose {
Element,
Filler,
}
impl Angle {
/// Return the angle of the vertical surface, if any, in degrees.
pub fn degree(&self) -> Option<f32> {
match *self {
Angle::Up => None,
Angle::North => Some(0.0),
Angle::XWallBack => Some(30.0),
Angle::Northeast => Some(60.0),
Angle::East => Some(90.0),
Angle::Southeast => Some(120.0),
Angle::YWall => Some(150.0),
Angle::South => Some(180.0),
Angle::XWall => Some(210.0),
Angle::Southwest => Some(240.0),
Angle::West => Some(270.0),
Angle::Northwest => Some(300.0),
Angle::YWallBack => Some(330.0),
}
}
}
/// Generate draw instructions for a terrain cell.
///
/// Params to the draw function: Draw layer, brush, brush frame, main
/// color, border color.
pub fn render<F>(k: &Kernel<TerrainType>, mut draw: F)
where F: FnMut(Image, Angle, Purpose, Rgba, Rgba)
{
use content::Brush::*;
use self::Angle::*;
use self::Purpose::*;
enum T {
Floor(Brush, Rgba),
Floor2(Brush, Rgba, Rgba),
Prop(Brush, Rgba),
Prop2(Brush, Rgba, Rgba),
Wall(Brush, Rgba),
Wall2(Brush, Rgba, Rgba),
Block(Brush, Rgba),
Block2(Brush, Rgba, Rgba),
Filler(Brush),
}
fn process<C: KernelTerrain, F>(
k: &Kernel<C>, draw: &mut F, kind: T)
where F: FnMut(Image, Angle, Purpose, Rgba, Rgba)
{
match kind {
T::Floor(brush, color) => process(k, draw, T::Floor2(brush, color, BLACK)),
T::Prop(brush, color) => process(k, draw, T::Prop2(brush, color, BLACK)),
T::Wall(brush, color) => process(k, draw, T::Wall2(brush, color, BLACK)),
T::Block(brush, color) => process(k, draw, T::Block2(brush, color, BLACK)),
T::Floor2(brush, color, back) => {
draw(brush.get(0), Up, Element, color, back);
}
T::Prop2(brush, color, back) => {
draw(brush.get(0), South, Element, color, back);
}
T::Wall2(brush, color, back) => {
let extends = k.wall_extends();
if extends[0] {
draw(brush.get(2), XWall, Element, color, back);
} else {
draw(brush.get(0), XWall, Element, color, back);
}
if extends[1] {
draw(brush.get(3), YWall, Element, color, back);
} else {
draw(brush.get(1), YWall, Element, color, back);
}
}
T::Block2(brush, color, back) => {
// This part gets a little tricky. Basic idea is that
// there's an inner pointy-top hex core and the block hull
// will snap to that instead of the outer flat-top hex
// edge if neither adjacent face to the outer hex vertex
// is connected to another block.
//
// Based on how the sprites split up, the processing is
// done in four vertical segments.
let faces = k.block_faces();
// Do we snap to the outer vertices?
let ne_vertex = !faces[0] || !faces[1];
let e_vertex = !faces[1] || !faces[2];
let se_vertex = !faces[2] || !faces[3];
let sw_vertex = !faces[3] || !faces[4];
let w_vertex = !faces[4] || !faces[5];
let nw_vertex = !faces[5] || !faces[0];
// Segment 2, middle left
{
if faces[0] {
if nw_vertex && ne_vertex {
draw(BlockRear.get(1), North, Element, color, BLACK);
} else if nw_vertex {
draw(BlockRear.get(9), XWallBack, Element, color, BLACK);
} else {
draw(BlockRear.get(5), YWallBack, Element, color, BLACK);
}
}
if faces[3] {
if sw_vertex && se_vertex {
draw(brush.get(1), South, Element, color, back);
} else if sw_vertex {
draw(brush.get(5), YWall, Element, color, back);
} else {
draw(brush.get(9), XWall, Element, color, back);
}
}
}
// Segment 3, middle right
{
if faces[0] {
if ne_vertex && nw_vertex {
draw(BlockRear.get(2), North, Element, color, BLACK);
} else if ne_vertex {
draw(BlockRear.get(6), YWallBack, Element, color, BLACK);
} else {
draw(BlockRear.get(10), XWallBack, Element, color, BLACK);
}
}
if faces[3] {
if se_vertex && sw_vertex {
draw(brush.get(2), South, Element, color, back);
} else if se_vertex {
draw(brush.get(10), XWall, Element, color, back);
} else {
draw(brush.get(6), YWall, Element, color, back);
}
}
}
// The side segments need to come after the middle
// segments so that the vertical edges can overwrite the
// middle segment pixels.
// Segment 1, left edge
{
if w_vertex {
if faces[5] {
if nw_vertex {
draw(BlockRear.get(0), Northwest, Element, color, BLACK);
} else {
draw(BlockRear.get(4), YWallBack, Element, color, BLACK);
}
}
if faces[4] {
if sw_vertex {
draw(brush.get(0), Southwest, Element, color, back);
} else {
draw(brush.get(8), XWall, Element, color, back);
}
}
} else {
// Draw the left vertical line.
draw(BlockVertical.get(2), West, Element, color, BLACK);
if !faces[0] {
draw(BlockVertical.get(0), West, Element, color, BLACK);
}
if !faces[3] {
draw(BlockVertical.get(4), West, Element, color, BLACK);
}
}
}
// Segment 4, right edge
{
if e_vertex {
if faces[1] {
if ne_vertex {
draw(BlockRear.get(3), Northeast, Element, color, BLACK);
} else {
draw(BlockRear.get(11), XWallBack, Element, color, BLACK);
}
}
if faces[2] {
if se_vertex {
draw(brush.get(3), Southeast, Element, color, back);
} else {
draw(brush.get(7), YWall, Element, color, back);
}
}
} else {
// Draw the right vertical line.
draw(BlockVertical.get(3), East, Element, color, BLACK);
if !faces[0] {
draw(BlockVertical.get(1), East, Element, color, BLACK);
}
if !faces[3] {
draw(BlockVertical.get(5), East, Element, color, BLACK);
}
}
}
}
T::Filler(brush) => {
draw(brush.get(0), Up, Filler, BLACK, BLACK);
}
}
}
for i in match k.center {
TerrainType::Void => vec![T::Floor(BlankFloor, MAGENTA)],
TerrainType::Floor => vec![T::Floor(Floor, SLATEGRAY)],
TerrainType::Water => vec![T::Floor(Water, ROYALBLUE)],
TerrainType::Shallows => vec![T::Floor(Shallows, CORNFLOWERBLUE)],
TerrainType::Magma => vec![T::Floor2(Water, YELLOW, DARKRED)],
TerrainType::Downstairs => vec![T::Floor(StairsDown, SLATEGRAY)],
TerrainType::Wall => vec![
T::Filler(BlankFloor),
T::Wall(BrickWall, LIGHTSLATEGRAY),
],
TerrainType::RockWall => vec![
T::Filler(BlankFloor),
T::Wall(RockWall, LIGHTSLATEGRAY),
],
TerrainType::Rock => vec![
T::Filler(BlankFloor),
T::Block(BlockRock, DARKGOLDENROD),
],
TerrainType::Tree => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(TreeTrunk, SADDLEBROWN),
T::Prop(TreeFoliage, GREEN),
],
TerrainType::Grass => vec![T::Floor(Floor, DARKGREEN)],
TerrainType::Grass2 => vec![ T::Floor(Grass, DARKGREEN)],
TerrainType::Stalagmite => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Stalagmite, DARKGOLDENROD),
],
TerrainType::Door => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BrickOpenWall, LIGHTSLATEGRAY),
T::Wall(DoorWall, SADDLEBROWN),
],
TerrainType::OpenDoor => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BrickOpenWall, LIGHTSLATEGRAY),
],
TerrainType::Window => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BrickWindowWall, LIGHTSLATEGRAY),
],
TerrainType::Table => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Table, DARKGOLDENROD),
],
TerrainType::Fence => vec![
// The floor type beneath the fence tile is visible, make it grass
// if there's grass behind the fence. Otherwise make it regular
// floor.
if k.n == TerrainType::Grass || k.ne == TerrainType::Grass || k.nw == TerrainType::Grass {
T::Floor(Grass, GREEN)
} else {
T::Floor(Floor, SLATEGRAY)
},
T::Wall(FenceWall, DARKGOLDENROD),
],
TerrainType::Bars => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Wall(BarsWall, GAINSBORO),
],
TerrainType::Fountain => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Table, DARKGOLDENROD),
],
TerrainType::Altar => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Fountain, GAINSBORO),
],
TerrainType::Barrel => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Barrell, DARKGOLDENROD),
],
TerrainType::Grave => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Grave, SLATEGRAY),
],
TerrainType::Stone => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Stone, SLATEGRAY),
],
TerrainType::Menhir => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(Menhir, SLATEGRAY),
],
TerrainType::DeadTree => vec![
T::Floor(BlankFloor, SLATEGRAY),
T::Prop(TreeTrunk, SADDLEBROWN),
],
}.into_iter() {
process(k, &mut draw, i);
}
}
|
use token;
// Calculate accepts input tokens that are
// ordered according to Reverse Polish Notation
// and returns a result
pub fn calculate(input: &Vec<token::Token>) -> Option<f64> {
let mut input = input.clone();
let mut stack = Vec::new();
let mut len = input.len();
// Iterate over the tokens and calculate a result
while len > 0 {
let tok = input.remove(0);
match tok {
token::Token::DecimalNumber(n) => stack.push(token::Token::DecimalNumber(n)),
token::Token::Operator(o, _, _) => {
let right = stack.pop();
let left = stack.pop();
match (left, right) {
(Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) => stack.push(token::Token::DecimalNumber(operate(o, n1, n2))),
_ => break
}
},
token::Token::WholeNumber(n) => stack.push(token::Token::DecimalNumber(n as f64)),
token::Token::FunctionCall(function_name) => {
match &function_name as &str {
"cos" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.cos()));
}
},
"sin" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.sin()));
}
},
"tan" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.tan()));
}
},
"floor" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.floor()));
}
},
"ceil" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.ceil()));
}
},
"round" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.round()));
}
},
"trunc" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.trunc()));
}
},
"fract" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.fract()));
}
},
"pow" => {
let right = stack.pop();
let left = stack.pop();
if let (Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) = (left, right) {
stack.push(token::Token::DecimalNumber(n1.powf(n2)));
}
},
"sqrt" => {
let arg = stack.pop();
match arg {
Some(token::Token::DecimalNumber(n)) => stack.push(token::Token::DecimalNumber((n as f64).sqrt())),
Some(token::Token::WholeNumber(n)) => stack.push(token::Token::DecimalNumber((n as f64).sqrt())),
_ => ()
}
},
"max" => {
let right = stack.pop();
let left = stack.pop();
if let (Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) = (left, right) {
stack.push(token::Token::DecimalNumber(n1.max(n2)));
}
},
"min" => {
let right = stack.pop();
let left = stack.pop();
if let (Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) = (left, right) {
stack.push(token::Token::DecimalNumber(n1.min(n2)));
}
},
"sum" => {
let values = Vec::new();
loop {
match stack.last() {
Some(&token::Token::DecimalNumber(n)) => {
values.push(n);
},
Some(&token::Token::LeftParenthesis) => break
_ => ()
}
}
stack.push(token::Token::DecimalNumber(values.fold(0, |acc, val| acc + val).collect()));
},
_ => break
}
},
_ => ()
}
len = input.len();
}
let result = stack.pop();
match result {
Some(token::Token::DecimalNumber(n)) => Some(n),
_ => None
}
}
fn operate(operator: char, left: f64, right: f64) -> f64 {
match operator {
'+' => left + right,
'-' => left - right,
'*' => left * right,
'/' => left / right,
'^' => left.powf(right),
_ => 0f64
}
}
Added a sum() function
use token;
// Calculate accepts input tokens that are
// ordered according to Reverse Polish Notation
// and returns a result
pub fn calculate(input: &Vec<token::Token>) -> Option<f64> {
let mut input = input.clone();
let mut stack = Vec::new();
let mut len = input.len();
// Iterate over the tokens and calculate a result
while len > 0 {
let tok = input.remove(0);
match tok {
token::Token::DecimalNumber(n) => stack.push(token::Token::DecimalNumber(n)),
token::Token::Operator(o, _, _) => {
let right = stack.pop();
let left = stack.pop();
match (left, right) {
(Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) => stack.push(token::Token::DecimalNumber(operate(o, n1, n2))),
_ => break
}
},
token::Token::WholeNumber(n) => stack.push(token::Token::DecimalNumber(n as f64)),
token::Token::FunctionCall(function_name) => {
match &function_name as &str {
"cos" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.cos()));
}
},
"sin" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.sin()));
}
},
"tan" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.tan()));
}
},
"floor" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.floor()));
}
},
"ceil" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.ceil()));
}
},
"round" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.round()));
}
},
"trunc" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.trunc()));
}
},
"fract" => {
let arg = stack.pop();
if let Some(token::Token::DecimalNumber(n1)) = arg {
stack.push(token::Token::DecimalNumber(n1.fract()));
}
},
"pow" => {
let right = stack.pop();
let left = stack.pop();
if let (Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) = (left, right) {
stack.push(token::Token::DecimalNumber(n1.powf(n2)));
}
},
"sqrt" => {
let arg = stack.pop();
match arg {
Some(token::Token::DecimalNumber(n)) => stack.push(token::Token::DecimalNumber((n as f64).sqrt())),
Some(token::Token::WholeNumber(n)) => stack.push(token::Token::DecimalNumber((n as f64).sqrt())),
_ => ()
}
},
"max" => {
let right = stack.pop();
let left = stack.pop();
if let (Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) = (left, right) {
stack.push(token::Token::DecimalNumber(n1.max(n2)));
}
},
"min" => {
let right = stack.pop();
let left = stack.pop();
if let (Some(token::Token::DecimalNumber(n1)), Some(token::Token::DecimalNumber(n2))) = (left, right) {
stack.push(token::Token::DecimalNumber(n1.min(n2)));
}
},
"sum" => {
let mut values = Vec::new();
loop {
match stack.last() {
Some(&token::Token::DecimalNumber(n)) => {
values.push(n);
stack.pop();
},
_ => break
}
}
stack.push(token::Token::DecimalNumber(values.iter().fold(0.0, |acc, val| acc + val)));
},
_ => break
}
},
_ => ()
}
len = input.len();
}
let result = stack.pop();
match result {
Some(token::Token::DecimalNumber(n)) => Some(n),
_ => None
}
}
fn operate(operator: char, left: f64, right: f64) -> f64 {
match operator {
'+' => left + right,
'-' => left - right,
'*' => left * right,
'/' => left / right,
'^' => left.powf(right),
_ => 0f64
}
}
|
// Copyright 2018, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use futures_channel::{mpsc, oneshot};
use futures_core::future::Future;
use futures_core::stream::Stream;
use futures_core::task;
use futures_core::task::Poll;
use futures_util::future::FutureExt;
use futures_util::stream::StreamExt;
use std::marker::Unpin;
use std::pin;
use Continue;
use MainContext;
use Priority;
use Source;
/// Represents a `Future` around a `glib::Source`. The future will
/// be resolved once the source has provided a value
pub struct SourceFuture<F, T> {
create_source: Option<F>,
source: Option<(Source, oneshot::Receiver<T>)>,
}
impl<F, T: 'static> SourceFuture<F, T>
where
F: FnOnce(oneshot::Sender<T>) -> Source + Send + 'static,
{
/// Create a new `SourceFuture`
///
/// The provided closure should return a newly created `glib::Source` when called
/// and pass the value provided by the source to the oneshot sender that is passed
/// to the closure.
pub fn new(create_source: F) -> SourceFuture<F, T> {
SourceFuture {
create_source: Some(create_source),
source: None,
}
}
}
impl<F, T> Unpin for SourceFuture<F, T> {}
impl<F, T> Future for SourceFuture<F, T>
where
F: FnOnce(oneshot::Sender<T>) -> Source + Send + 'static,
{
type Output = T;
fn poll(mut self: pin::Pin<&mut Self>, ctx: &mut task::Context) -> Poll<T> {
let SourceFuture {
ref mut create_source,
ref mut source,
..
} = *self;
if let Some(create_source) = create_source.take() {
let main_context = MainContext::ref_thread_default();
assert!(
main_context.is_owner(),
"Spawning futures only allowed if the thread is owning the MainContext"
);
// Channel for sending back the Source result to our future here.
//
// In theory we could directly continue polling the
// corresponding task from the Source callback,
// however this would break at the very least
// the g_main_current_source() API.
let (send, recv) = oneshot::channel();
let s = create_source(send);
s.attach(Some(&main_context));
*source = Some((s, recv));
}
// At this point we must have a receiver
let res = {
let &mut (_, ref mut receiver) = source.as_mut().unwrap();
receiver.poll_unpin(ctx)
};
#[allow(clippy::match_wild_err_arm)]
match res {
Poll::Ready(Err(_)) => panic!("Source sender was unexpectedly closed"),
Poll::Ready(Ok(v)) => {
// Get rid of the reference to the source, it triggered
let _ = source.take();
Poll::Ready(v)
}
Poll::Pending => Poll::Pending,
}
}
}
impl<T, F> Drop for SourceFuture<T, F> {
fn drop(&mut self) {
// Get rid of the source, we don't care anymore if it still triggers
if let Some((source, _)) = self.source.take() {
source.destroy();
}
}
}
/// Create a `Future` that will resolve after the given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future(value: u32) -> Box<dyn Future<Output = ()> + std::marker::Unpin + Send> {
timeout_future_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Future` that will resolve after the given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future_with_priority(
priority: Priority,
value: u32,
) -> Box<dyn Future<Output = ()> + std::marker::Unpin + Send> {
Box::new(SourceFuture::new(move |send| {
let mut send = Some(send);
::timeout_source_new(value, None, priority, move || {
let _ = send.take().unwrap().send(());
Continue(false)
})
}))
}
/// Create a `Future` that will resolve after the given number of seconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future_seconds(
value: u32,
) -> Box<dyn Future<Output = ()> + std::marker::Unpin + Send> {
timeout_future_seconds_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Future` that will resolve after the given number of seconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future_seconds_with_priority(
priority: Priority,
value: u32,
) -> Box<dyn Future<Output = ()> + std::marker::Unpin + Send> {
Box::new(SourceFuture::new(move |send| {
let mut send = Some(send);
::timeout_source_new_seconds(value, None, priority, move || {
let _ = send.take().unwrap().send(());
Continue(false)
})
}))
}
/// Create a `Future` that will resolve once the child process with the given pid exits
///
/// The `Future` will resolve to the pid of the child process and the exit code.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn child_watch_future(
pid: ::Pid,
) -> Box<dyn Future<Output = (::Pid, i32)> + std::marker::Unpin + Send> {
child_watch_future_with_priority(::PRIORITY_DEFAULT, pid)
}
/// Create a `Future` that will resolve once the child process with the given pid exits
///
/// The `Future` will resolve to the pid of the child process and the exit code.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn child_watch_future_with_priority(
priority: Priority,
pid: ::Pid,
) -> Box<dyn Future<Output = (::Pid, i32)> + std::marker::Unpin + Send> {
Box::new(SourceFuture::new(move |send| {
let mut send = Some(send);
::child_watch_source_new(pid, None, priority, move |pid, code| {
let _ = send.take().unwrap().send((pid, code));
})
}))
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Future` that will resolve once the given UNIX signal is raised
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_future(signum: i32) -> Box<dyn Future<Output = ()> + std::marker::Unpin + Send> {
unix_signal_future_with_priority(::PRIORITY_DEFAULT, signum)
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Future` that will resolve once the given UNIX signal is raised
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_future_with_priority(
priority: Priority,
signum: i32,
) -> Box<dyn Future<Output = ()> + std::marker::Unpin + Send> {
Box::new(SourceFuture::new(move |send| {
let mut send = Some(send);
::unix_signal_source_new(signum, None, priority, move || {
let _ = send.take().unwrap().send(());
Continue(false)
})
}))
}
/// Represents a `Stream` around a `glib::Source`. The stream will
/// be provide all values that are provided by the source
pub struct SourceStream<F, T> {
create_source: Option<F>,
source: Option<(Source, mpsc::UnboundedReceiver<T>)>,
}
impl<F, T> Unpin for SourceStream<F, T> {}
impl<F, T: 'static> SourceStream<F, T>
where
F: FnOnce(mpsc::UnboundedSender<T>) -> Source + Send + 'static,
{
/// Create a new `SourceStream`
///
/// The provided closure should return a newly created `glib::Source` when called
/// and pass the values provided by the source to the sender that is passed
/// to the closure.
pub fn new(create_source: F) -> SourceStream<F, T> {
SourceStream {
create_source: Some(create_source),
source: None,
}
}
}
impl<F, T> Stream for SourceStream<F, T>
where
F: FnOnce(mpsc::UnboundedSender<T>) -> Source + Send + 'static,
{
type Item = T;
fn poll_next(mut self: pin::Pin<&mut Self>, ctx: &mut task::Context) -> Poll<Option<T>> {
let SourceStream {
ref mut create_source,
ref mut source,
..
} = *self;
if let Some(create_source) = create_source.take() {
let main_context = MainContext::ref_thread_default();
assert!(
main_context.is_owner(),
"Spawning futures only allowed if the thread is owning the MainContext"
);
// Channel for sending back the Source result to our future here.
//
// In theory we could directly continue polling the
// corresponding task from the Source callback,
// however this would break at the very least
// the g_main_current_source() API.
let (send, recv) = mpsc::unbounded();
let s = create_source(send);
s.attach(Some(&main_context));
*source = Some((s, recv));
}
// At this point we must have a receiver
let res = {
let &mut (_, ref mut receiver) = source.as_mut().unwrap();
receiver.poll_next_unpin(ctx)
};
#[allow(clippy::match_wild_err_arm)]
match res {
Poll::Ready(v) => {
if v.is_none() {
// Get rid of the reference to the source, it triggered
let _ = source.take();
}
Poll::Ready(v)
}
Poll::Pending => Poll::Pending,
}
}
}
impl<T, F> Drop for SourceStream<T, F> {
fn drop(&mut self) {
// Get rid of the source, we don't care anymore if it still triggers
if let Some((source, _)) = self.source.take() {
source.destroy();
}
}
}
/// Create a `Stream` that will provide a value every given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream(value: u32) -> Box<dyn Stream<Item = ()> + std::marker::Unpin + Send> {
interval_stream_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Stream` that will provide a value every given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream_with_priority(
priority: Priority,
value: u32,
) -> Box<dyn Stream<Item = ()> + std::marker::Unpin + Send> {
Box::new(SourceStream::new(move |send| {
::timeout_source_new(value, None, priority, move || {
if send.unbounded_send(()).is_err() {
Continue(false)
} else {
Continue(true)
}
})
}))
}
/// Create a `Stream` that will provide a value every given number of seconds.
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream_seconds(
value: u32,
) -> Box<dyn Stream<Item = ()> + std::marker::Unpin + Send> {
interval_stream_seconds_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Stream` that will provide a value every given number of seconds.
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream_seconds_with_priority(
priority: Priority,
value: u32,
) -> Box<dyn Stream<Item = ()> + std::marker::Unpin + Send> {
Box::new(SourceStream::new(move |send| {
::timeout_source_new_seconds(value, None, priority, move || {
if send.unbounded_send(()).is_err() {
Continue(false)
} else {
Continue(true)
}
})
}))
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Stream` that will provide a value whenever the given UNIX signal is raised
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_stream(signum: i32) -> Box<dyn Stream<Item = ()> + std::marker::Unpin + Send> {
unix_signal_stream_with_priority(::PRIORITY_DEFAULT, signum)
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Stream` that will provide a value whenever the given UNIX signal is raised
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_stream_with_priority(
priority: Priority,
signum: i32,
) -> Box<dyn Stream<Item = ()> + std::marker::Unpin + Send> {
Box::new(SourceStream::new(move |send| {
::unix_signal_source_new(signum, None, priority, move || {
if send.unbounded_send(()).is_err() {
Continue(false)
} else {
Continue(true)
}
})
}))
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_timeout() {
let c = MainContext::new();
let res = c.block_on(timeout_future(20));
assert_eq!(res, ());
}
#[test]
fn test_timeout_send() {
let c = MainContext::new();
let l = ::MainLoop::new(Some(&c), false);
let l_clone = l.clone();
c.spawn(timeout_future(20).then(move |()| {
l_clone.quit();
futures_util::future::ready(())
}));
l.run();
}
#[test]
fn test_interval() {
let c = MainContext::new();
let mut count = 0;
{
let count = &mut count;
let res = c.block_on(
interval_stream(20)
.take(2)
.for_each(|()| {
*count = *count + 1;
futures_util::future::ready(())
})
.map(|_| ()),
);
assert_eq!(res, ());
}
assert_eq!(count, 2);
}
#[test]
fn test_timeout_and_channel() {
let c = MainContext::default();
let res = c.block_on(timeout_future(20).then(|()| {
let (sender, receiver) = oneshot::channel();
thread::spawn(move || {
sender.send(1).unwrap();
});
receiver.then(|i| futures_util::future::ready(i.unwrap()))
}));
assert_eq!(res, 1);
}
}
Return Pin<Box<_>> instead of Box<_> for our futures/streams
// Copyright 2018, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use futures_channel::{mpsc, oneshot};
use futures_core::future::Future;
use futures_core::stream::Stream;
use futures_core::task;
use futures_core::task::Poll;
use futures_util::future::FutureExt;
use futures_util::stream::StreamExt;
use std::marker::Unpin;
use std::pin;
use std::pin::Pin;
use Continue;
use MainContext;
use Priority;
use Source;
/// Represents a `Future` around a `glib::Source`. The future will
/// be resolved once the source has provided a value
pub struct SourceFuture<F, T> {
create_source: Option<F>,
source: Option<(Source, oneshot::Receiver<T>)>,
}
impl<F, T: 'static> SourceFuture<F, T>
where
F: FnOnce(oneshot::Sender<T>) -> Source + Send + 'static,
{
/// Create a new `SourceFuture`
///
/// The provided closure should return a newly created `glib::Source` when called
/// and pass the value provided by the source to the oneshot sender that is passed
/// to the closure.
pub fn new(create_source: F) -> SourceFuture<F, T> {
SourceFuture {
create_source: Some(create_source),
source: None,
}
}
}
impl<F, T> Unpin for SourceFuture<F, T> {}
impl<F, T> Future for SourceFuture<F, T>
where
F: FnOnce(oneshot::Sender<T>) -> Source + Send + 'static,
{
type Output = T;
fn poll(mut self: pin::Pin<&mut Self>, ctx: &mut task::Context) -> Poll<T> {
let SourceFuture {
ref mut create_source,
ref mut source,
..
} = *self;
if let Some(create_source) = create_source.take() {
let main_context = MainContext::ref_thread_default();
assert!(
main_context.is_owner(),
"Spawning futures only allowed if the thread is owning the MainContext"
);
// Channel for sending back the Source result to our future here.
//
// In theory we could directly continue polling the
// corresponding task from the Source callback,
// however this would break at the very least
// the g_main_current_source() API.
let (send, recv) = oneshot::channel();
let s = create_source(send);
s.attach(Some(&main_context));
*source = Some((s, recv));
}
// At this point we must have a receiver
let res = {
let &mut (_, ref mut receiver) = source.as_mut().unwrap();
receiver.poll_unpin(ctx)
};
#[allow(clippy::match_wild_err_arm)]
match res {
Poll::Ready(Err(_)) => panic!("Source sender was unexpectedly closed"),
Poll::Ready(Ok(v)) => {
// Get rid of the reference to the source, it triggered
let _ = source.take();
Poll::Ready(v)
}
Poll::Pending => Poll::Pending,
}
}
}
impl<T, F> Drop for SourceFuture<T, F> {
fn drop(&mut self) {
// Get rid of the source, we don't care anymore if it still triggers
if let Some((source, _)) = self.source.take() {
source.destroy();
}
}
}
/// Create a `Future` that will resolve after the given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future(value: u32) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
timeout_future_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Future` that will resolve after the given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future_with_priority(
priority: Priority,
value: u32,
) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
Box::pin(SourceFuture::new(move |send| {
let mut send = Some(send);
::timeout_source_new(value, None, priority, move || {
let _ = send.take().unwrap().send(());
Continue(false)
})
}))
}
/// Create a `Future` that will resolve after the given number of seconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future_seconds(value: u32) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
timeout_future_seconds_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Future` that will resolve after the given number of seconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn timeout_future_seconds_with_priority(
priority: Priority,
value: u32,
) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
Box::pin(SourceFuture::new(move |send| {
let mut send = Some(send);
::timeout_source_new_seconds(value, None, priority, move || {
let _ = send.take().unwrap().send(());
Continue(false)
})
}))
}
/// Create a `Future` that will resolve once the child process with the given pid exits
///
/// The `Future` will resolve to the pid of the child process and the exit code.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn child_watch_future(
pid: ::Pid,
) -> Pin<Box<dyn Future<Output = (::Pid, i32)> + Send + 'static>> {
child_watch_future_with_priority(::PRIORITY_DEFAULT, pid)
}
/// Create a `Future` that will resolve once the child process with the given pid exits
///
/// The `Future` will resolve to the pid of the child process and the exit code.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn child_watch_future_with_priority(
priority: Priority,
pid: ::Pid,
) -> Pin<Box<dyn Future<Output = (::Pid, i32)> + Send + 'static>> {
Box::pin(SourceFuture::new(move |send| {
let mut send = Some(send);
::child_watch_source_new(pid, None, priority, move |pid, code| {
let _ = send.take().unwrap().send((pid, code));
})
}))
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Future` that will resolve once the given UNIX signal is raised
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_future(signum: i32) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
unix_signal_future_with_priority(::PRIORITY_DEFAULT, signum)
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Future` that will resolve once the given UNIX signal is raised
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_future_with_priority(
priority: Priority,
signum: i32,
) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
Box::pin(SourceFuture::new(move |send| {
let mut send = Some(send);
::unix_signal_source_new(signum, None, priority, move || {
let _ = send.take().unwrap().send(());
Continue(false)
})
}))
}
/// Represents a `Stream` around a `glib::Source`. The stream will
/// be provide all values that are provided by the source
pub struct SourceStream<F, T> {
create_source: Option<F>,
source: Option<(Source, mpsc::UnboundedReceiver<T>)>,
}
impl<F, T> Unpin for SourceStream<F, T> {}
impl<F, T: 'static> SourceStream<F, T>
where
F: FnOnce(mpsc::UnboundedSender<T>) -> Source + Send + 'static,
{
/// Create a new `SourceStream`
///
/// The provided closure should return a newly created `glib::Source` when called
/// and pass the values provided by the source to the sender that is passed
/// to the closure.
pub fn new(create_source: F) -> SourceStream<F, T> {
SourceStream {
create_source: Some(create_source),
source: None,
}
}
}
impl<F, T> Stream for SourceStream<F, T>
where
F: FnOnce(mpsc::UnboundedSender<T>) -> Source + Send + 'static,
{
type Item = T;
fn poll_next(mut self: pin::Pin<&mut Self>, ctx: &mut task::Context) -> Poll<Option<T>> {
let SourceStream {
ref mut create_source,
ref mut source,
..
} = *self;
if let Some(create_source) = create_source.take() {
let main_context = MainContext::ref_thread_default();
assert!(
main_context.is_owner(),
"Spawning futures only allowed if the thread is owning the MainContext"
);
// Channel for sending back the Source result to our future here.
//
// In theory we could directly continue polling the
// corresponding task from the Source callback,
// however this would break at the very least
// the g_main_current_source() API.
let (send, recv) = mpsc::unbounded();
let s = create_source(send);
s.attach(Some(&main_context));
*source = Some((s, recv));
}
// At this point we must have a receiver
let res = {
let &mut (_, ref mut receiver) = source.as_mut().unwrap();
receiver.poll_next_unpin(ctx)
};
#[allow(clippy::match_wild_err_arm)]
match res {
Poll::Ready(v) => {
if v.is_none() {
// Get rid of the reference to the source, it triggered
let _ = source.take();
}
Poll::Ready(v)
}
Poll::Pending => Poll::Pending,
}
}
}
impl<T, F> Drop for SourceStream<T, F> {
fn drop(&mut self) {
// Get rid of the source, we don't care anymore if it still triggers
if let Some((source, _)) = self.source.take() {
source.destroy();
}
}
}
/// Create a `Stream` that will provide a value every given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream(value: u32) -> Pin<Box<dyn Stream<Item = ()> + Send + 'static>> {
interval_stream_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Stream` that will provide a value every given number of milliseconds.
///
/// The `Future` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream_with_priority(
priority: Priority,
value: u32,
) -> Pin<Box<dyn Stream<Item = ()> + Send + 'static>> {
Box::pin(SourceStream::new(move |send| {
::timeout_source_new(value, None, priority, move || {
if send.unbounded_send(()).is_err() {
Continue(false)
} else {
Continue(true)
}
})
}))
}
/// Create a `Stream` that will provide a value every given number of seconds.
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream_seconds(value: u32) -> Pin<Box<dyn Stream<Item = ()> + Send + 'static>> {
interval_stream_seconds_with_priority(::PRIORITY_DEFAULT, value)
}
/// Create a `Stream` that will provide a value every given number of seconds.
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn interval_stream_seconds_with_priority(
priority: Priority,
value: u32,
) -> Pin<Box<dyn Stream<Item = ()> + Send + 'static>> {
Box::pin(SourceStream::new(move |send| {
::timeout_source_new_seconds(value, None, priority, move || {
if send.unbounded_send(()).is_err() {
Continue(false)
} else {
Continue(true)
}
})
}))
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Stream` that will provide a value whenever the given UNIX signal is raised
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_stream(signum: i32) -> Pin<Box<dyn Stream<Item = ()> + Send + 'static>> {
unix_signal_stream_with_priority(::PRIORITY_DEFAULT, signum)
}
#[cfg(any(unix, feature = "dox"))]
/// Create a `Stream` that will provide a value whenever the given UNIX signal is raised
///
/// The `Stream` must be spawned on an `Executor` backed by a `glib::MainContext`.
pub fn unix_signal_stream_with_priority(
priority: Priority,
signum: i32,
) -> Pin<Box<dyn Stream<Item = ()> + Send + 'static>> {
Box::pin(SourceStream::new(move |send| {
::unix_signal_source_new(signum, None, priority, move || {
if send.unbounded_send(()).is_err() {
Continue(false)
} else {
Continue(true)
}
})
}))
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_timeout() {
let c = MainContext::new();
let res = c.block_on(timeout_future(20));
assert_eq!(res, ());
}
#[test]
fn test_timeout_send() {
let c = MainContext::new();
let l = ::MainLoop::new(Some(&c), false);
let l_clone = l.clone();
c.spawn(timeout_future(20).then(move |()| {
l_clone.quit();
futures_util::future::ready(())
}));
l.run();
}
#[test]
fn test_interval() {
let c = MainContext::new();
let mut count = 0;
{
let count = &mut count;
let res = c.block_on(
interval_stream(20)
.take(2)
.for_each(|()| {
*count = *count + 1;
futures_util::future::ready(())
})
.map(|_| ()),
);
assert_eq!(res, ());
}
assert_eq!(count, 2);
}
#[test]
fn test_timeout_and_channel() {
let c = MainContext::default();
let res = c.block_on(timeout_future(20).then(|()| {
let (sender, receiver) = oneshot::channel();
thread::spawn(move || {
sender.send(1).unwrap();
});
receiver.then(|i| futures_util::future::ready(i.unwrap()))
}));
assert_eq!(res, 1);
}
}
|
#![allow(unused_variables)]
extern crate orbclient;
extern crate sinulation;
#[cfg(target_os = "redox")]
use sinulation::Trig;
use super::vid;
// Had to store this somewhere.
/*cargo build --example tetrahedrane_example --target i386-unknown-redox.json -- -C no-prepopulate-passes -C no-stack-check -C opt-level=2 -Z no-landing-pads -A dead_code
*/
/// Stores data about rendering, window and camera.
pub struct Window {
pub screen_x: u32,
pub screen_y: u32,
pub camera_x: f32,
pub camera_y: f32,
pub camera_z: f32,
pub camera_x_y: f32,
pub camera_x_z: f32,
pub camera_y_z: f32,
pub window: Box<orbclient::window::Window>,
pub render_queue: Vec<vid::Triangle>,
}
impl Window {
/// Create a new window.
///
/// * `triangle_space` - how much space to preallocate for the triangles
pub fn new(screen_x: u32, screen_y: u32, window_name: &str, triangle_space: usize) -> Window {
let win = orbclient::window::Window::new_flags(10, 10, screen_x, screen_y, window_name, true).unwrap();
Window {
screen_x: screen_x,
screen_y: screen_y,
camera_x: 0.0,
camera_y: 0.0,
camera_z: 0.0,
camera_x_y: 0.0,
camera_x_z: 0.0,
camera_y_z: 0.0,
window: win,
render_queue: Vec::with_capacity(triangle_space),
}
}
/// Renders triangles onto the framebuffer.
pub fn render(&mut self) {
for mut triangle in &mut self.render_queue {
let flat_1 = triangle.p1.flat_point(self.screen_x, self.screen_y,
triangle.x + self.camera_x,
triangle.y + self.camera_y,
triangle.z + self.camera_z);
let flat_2 = triangle.p2.flat_point(self.screen_x, self.screen_y,
triangle.x + self.camera_x,
triangle.y + self.camera_y,
triangle.z + self.camera_z);
let flat_3 = triangle.p3.flat_point(self.screen_x, self.screen_y,
triangle.x + self.camera_x,
triangle.y + self.camera_y,
triangle.z + self.camera_z);
self.window.line(flat_1.x, flat_1.y, flat_2.x, flat_2.y, triangle.color.orb_color());
self.window.line(flat_3.x, flat_3.y, flat_2.x, flat_2.y, triangle.color.orb_color());
self.window.line(flat_1.x, flat_1.y, flat_3.x, flat_3.y, triangle.color.orb_color());
}
let used_space = self.render_queue.len();
self.render_queue = Vec::with_capacity(used_space);
}
/// Push a triangle onto the render queue.
pub fn push(&mut self, triangle: vid::Triangle) {
self.render_queue.push(triangle);
}
/// Push a group of triangles onto the render queue.
pub fn push_group(&mut self, group: &vid::TriangleGroup) {
for triangle in &group.triangles {
self.push(triangle.clone());
}
}
pub fn normalize_camera(&mut self) {
#[cfg(not(target_os = "redox"))]
use std::f32::consts::PI;
#[cfg(target_os = "redox")]
const PI: f32 = 3.141592653589793;
if self.camera_x_z > (PI * 2.0) {
self.camera_x_z -= (PI * 2.0);
}
if self.camera_x_y > (PI * 2.0) {
self.camera_x_y -= (PI * 2.0);
}
if self.camera_y_z > (PI * 2.0) {
self.camera_y_z -= (PI * 2.0);
}
}
}
Added some docstrings
#![allow(unused_variables)]
extern crate orbclient;
extern crate sinulation;
#[cfg(target_os = "redox")]
use sinulation::Trig;
use super::vid;
// Had to store this somewhere.
/*cargo build --example tetrahedrane_example --target i386-unknown-redox.json -- -C no-prepopulate-passes -C no-stack-check -C opt-level=2 -Z no-landing-pads -A dead_code
*/
/// Stores data about rendering, window and camera.
pub struct Window {
pub screen_x: u32,
pub screen_y: u32,
pub camera_x: f32,
pub camera_y: f32,
pub camera_z: f32,
pub camera_x_y: f32,
pub camera_x_z: f32,
pub camera_y_z: f32,
pub window: Box<orbclient::window::Window>,
pub render_queue: Vec<vid::Triangle>,
}
impl Window {
/// Create a new window.
///
/// * `triangle_space` - how much space to preallocate for the triangles
pub fn new(screen_x: u32, screen_y: u32, window_name: &str, triangle_space: usize) -> Window {
let win = orbclient::window::Window::new_flags(10, 10, screen_x, screen_y, window_name, true).unwrap();
Window {
screen_x: screen_x,
screen_y: screen_y,
camera_x: 0.0,
camera_y: 0.0,
camera_z: 0.0,
camera_x_y: 0.0,
camera_x_z: 0.0,
camera_y_z: 0.0,
window: win,
render_queue: Vec::with_capacity(triangle_space),
}
}
/// Renders triangles onto the framebuffer.
pub fn render(&mut self) {
for mut triangle in &mut self.render_queue {
let flat_1 = triangle.p1.flat_point(self.screen_x, self.screen_y,
triangle.x + self.camera_x,
triangle.y + self.camera_y,
triangle.z + self.camera_z);
let flat_2 = triangle.p2.flat_point(self.screen_x, self.screen_y,
triangle.x + self.camera_x,
triangle.y + self.camera_y,
triangle.z + self.camera_z);
let flat_3 = triangle.p3.flat_point(self.screen_x, self.screen_y,
triangle.x + self.camera_x,
triangle.y + self.camera_y,
triangle.z + self.camera_z);
self.window.line(flat_1.x, flat_1.y, flat_2.x, flat_2.y, triangle.color.orb_color());
self.window.line(flat_3.x, flat_3.y, flat_2.x, flat_2.y, triangle.color.orb_color());
self.window.line(flat_1.x, flat_1.y, flat_3.x, flat_3.y, triangle.color.orb_color());
}
let used_space = self.render_queue.len();
self.render_queue = Vec::with_capacity(used_space);
}
/// Push a triangle onto the render queue.
pub fn push(&mut self, triangle: vid::Triangle) {
self.render_queue.push(triangle);
}
/// Push a group of triangles onto the render queue.
pub fn push_group(&mut self, group: &vid::TriangleGroup) {
for triangle in &group.triangles {
self.push(triangle.clone());
}
}
/// Normalize the camera rotations.
pub fn normalize_camera(&mut self) {
#[cfg(not(target_os = "redox"))]
use std::f32::consts::PI;
#[cfg(target_os = "redox")]
const PI: f32 = 3.141592653589793;
if self.camera_x_z > (PI * 2.0) {
self.camera_x_z -= (PI * 2.0);
}
if self.camera_x_y > (PI * 2.0) {
self.camera_x_y -= (PI * 2.0);
}
if self.camera_y_z > (PI * 2.0) {
self.camera_y_z -= (PI * 2.0);
}
}
}
|
//! Utility functions for use within wlroots-rs
use std::ffi::CString;
use std::process::exit;
/// Converts a Rust string into C string without error handling.
/// If any error occurs, it is logged and then the program is immediantly
/// aborted.
pub fn safe_as_cstring<S>(string: S) -> CString
where S: Into<Vec<u8>>
{
match CString::new(string) {
Ok(string) => string,
Err(err) => {
wlr_log!(L_ERROR,
"Error occured while trying to convert a Rust string to a C string {:?}",
err);
exit(1)
}
}
}
Added C string to Rust string util function
//! Utility functions for use within wlroots-rs
use std::ffi::{CStr, CString};
use std::os::raw::c_char;
use std::process::exit;
/// Converts a Rust string into C string without error handling.
/// If any error occurs, it is logged and then the program is immediantly
/// aborted.
pub fn safe_as_cstring<S>(string: S) -> CString
where S: Into<Vec<u8>>
{
match CString::new(string) {
Ok(string) => string,
Err(err) => {
wlr_log!(L_ERROR,
"Error occured while trying to convert a Rust string to a C string {:?}",
err);
exit(1)
}
}
}
/// Converts a C string into a Rust string without error handling.
/// The pointer passed to this function _must_ be valid.
pub unsafe fn c_to_rust_string(c_str: *const c_char) -> Option<String> {
if c_str.is_null() {
None
} else {
Some(CStr::from_ptr(c_str).to_string_lossy().into_owned())
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.